summaryrefslogtreecommitdiff
path: root/include/trace
diff options
context:
space:
mode:
Diffstat (limited to 'include/trace')
-rw-r--r--include/trace/bpf_probe.h72
-rw-r--r--include/trace/define_trace.h39
-rw-r--r--include/trace/events/9p.h11
-rw-r--r--include/trace/events/afs.h1148
-rw-r--r--include/trace/events/alarmtimer.h2
-rw-r--r--include/trace/events/amdxdna.h101
-rw-r--r--include/trace/events/asoc.h68
-rw-r--r--include/trace/events/avc.h6
-rw-r--r--include/trace/events/block.h175
-rw-r--r--include/trace/events/bpf_test_run.h17
-rw-r--r--include/trace/events/bridge.h72
-rw-r--r--include/trace/events/btrfs.h627
-rw-r--r--include/trace/events/cachefiles.h193
-rw-r--r--include/trace/events/capability.h57
-rw-r--r--include/trace/events/cgroup.h63
-rw-r--r--include/trace/events/clk.h18
-rw-r--r--include/trace/events/cma.h95
-rw-r--r--include/trace/events/compaction.h17
-rw-r--r--include/trace/events/csd.h72
-rw-r--r--include/trace/events/cxl.h112
-rw-r--r--include/trace/events/damon.h86
-rw-r--r--include/trace/events/devfreq.h4
-rw-r--r--include/trace/events/devlink.h52
-rw-r--r--include/trace/events/dlm.h109
-rw-r--r--include/trace/events/dma.h475
-rw-r--r--include/trace/events/dma_fence.h42
-rw-r--r--include/trace/events/erofs.h89
-rw-r--r--include/trace/events/exceptions.h43
-rw-r--r--include/trace/events/ext4.h237
-rw-r--r--include/trace/events/f2fs.h399
-rw-r--r--include/trace/events/fib.h11
-rw-r--r--include/trace/events/fib6.h15
-rw-r--r--include/trace/events/filelock.h107
-rw-r--r--include/trace/events/filemap.h84
-rw-r--r--include/trace/events/firewire.h912
-rw-r--r--include/trace/events/firewire_ohci.h101
-rw-r--r--include/trace/events/fs_dax.h100
-rw-r--r--include/trace/events/fscache.h4
-rw-r--r--include/trace/events/fsi.h31
-rw-r--r--include/trace/events/fsi_master_i2cr.h107
-rw-r--r--include/trace/events/habanalabs.h55
-rw-r--r--include/trace/events/handshake.h319
-rw-r--r--include/trace/events/huge_memory.h53
-rw-r--r--include/trace/events/hugetlbfs.h156
-rw-r--r--include/trace/events/hw_pressure.h (renamed from include/trace/events/thermal_pressure.h)14
-rw-r--r--include/trace/events/hwmon.h16
-rw-r--r--include/trace/events/icmp.h67
-rw-r--r--include/trace/events/initcall.h2
-rw-r--r--include/trace/events/intel_ifs.h49
-rw-r--r--include/trace/events/intel_ish.h2
-rw-r--r--include/trace/events/io_uring.h93
-rw-r--r--include/trace/events/iocost.h14
-rw-r--r--include/trace/events/iommu.h15
-rw-r--r--include/trace/events/ipi.h46
-rw-r--r--include/trace/events/irq.h49
-rw-r--r--include/trace/events/irq_matrix.h8
-rw-r--r--include/trace/events/iscsi.h2
-rw-r--r--include/trace/events/jbd2.h12
-rw-r--r--include/trace/events/kmem.h89
-rw-r--r--include/trace/events/ksm.h284
-rw-r--r--include/trace/events/kvm.h126
-rw-r--r--include/trace/events/kyber.h8
-rw-r--r--include/trace/events/lock.h4
-rw-r--r--include/trace/events/mce.h66
-rw-r--r--include/trace/events/mdio.h2
-rw-r--r--include/trace/events/memcg.h106
-rw-r--r--include/trace/events/memory-failure.h98
-rw-r--r--include/trace/events/migrate.h27
-rw-r--r--include/trace/events/mmap.h56
-rw-r--r--include/trace/events/mmap_lock.h44
-rw-r--r--include/trace/events/mmc.h4
-rw-r--r--include/trace/events/mmflags.h211
-rw-r--r--include/trace/events/module.h8
-rw-r--r--include/trace/events/mptcp.h4
-rw-r--r--include/trace/events/napi.h35
-rw-r--r--include/trace/events/nbd.h2
-rw-r--r--include/trace/events/neigh.h10
-rw-r--r--include/trace/events/net.h52
-rw-r--r--include/trace/events/net_probe_common.h71
-rw-r--r--include/trace/events/netfs.h560
-rw-r--r--include/trace/events/netlink.h2
-rw-r--r--include/trace/events/nilfs2.h6
-rw-r--r--include/trace/events/notifier.h69
-rw-r--r--include/trace/events/oom.h40
-rw-r--r--include/trace/events/osnoise.h98
-rw-r--r--include/trace/events/page_pool.h32
-rw-r--r--include/trace/events/page_ref.h8
-rw-r--r--include/trace/events/power.h112
-rw-r--r--include/trace/events/preemptirq.h8
-rw-r--r--include/trace/events/pwc.h4
-rw-r--r--include/trace/events/pwm.h132
-rw-r--r--include/trace/events/qdisc.h28
-rw-r--r--include/trace/events/qla.h2
-rw-r--r--include/trace/events/qrtr.h35
-rw-r--r--include/trace/events/rcu.h89
-rw-r--r--include/trace/events/readahead.h132
-rw-r--r--include/trace/events/regulator.h6
-rw-r--r--include/trace/events/rpcgss.h48
-rw-r--r--include/trace/events/rpcrdma.h428
-rw-r--r--include/trace/events/rpm.h46
-rw-r--r--include/trace/events/rseq.h9
-rw-r--r--include/trace/events/rust_sample.h31
-rw-r--r--include/trace/events/rv.h142
-rw-r--r--include/trace/events/rxrpc.h1673
-rw-r--r--include/trace/events/sched.h317
-rw-r--r--include/trace/events/sched_ext.h90
-rw-r--r--include/trace/events/scmi.h48
-rw-r--r--include/trace/events/scsi.h39
-rw-r--r--include/trace/events/skb.h21
-rw-r--r--include/trace/events/sock.h111
-rw-r--r--include/trace/events/sof.h14
-rw-r--r--include/trace/events/sof_intel.h18
-rw-r--r--include/trace/events/spi-mem.h106
-rw-r--r--include/trace/events/spi.h12
-rw-r--r--include/trace/events/sunrpc.h424
-rw-r--r--include/trace/events/swiotlb.h2
-rw-r--r--include/trace/events/syscalls.h4
-rw-r--r--include/trace/events/target.h8
-rw-r--r--include/trace/events/task.h52
-rw-r--r--include/trace/events/tcp.h611
-rw-r--r--include/trace/events/tegra_apb_dma.h6
-rw-r--r--include/trace/events/thermal.h199
-rw-r--r--include/trace/events/thermal_power_allocator.h88
-rw-r--r--include/trace/events/thp.h35
-rw-r--r--include/trace/events/timer.h57
-rw-r--r--include/trace/events/timer_migration.h298
-rw-r--r--include/trace/events/timestamp.h124
-rw-r--r--include/trace/events/tsm_mr.h80
-rw-r--r--include/trace/events/udp.h29
-rw-r--r--include/trace/events/ufs.h396
-rw-r--r--include/trace/events/vmscan.h53
-rw-r--r--include/trace/events/vsock_virtio_transport_common.h12
-rw-r--r--include/trace/events/wbt.h8
-rw-r--r--include/trace/events/workqueue.h6
-rw-r--r--include/trace/events/writeback.h90
-rw-r--r--include/trace/events/xdp.h43
-rw-r--r--include/trace/events/xen.h12
-rw-r--r--include/trace/misc/fs.h43
-rw-r--r--include/trace/misc/nfs.h38
-rw-r--r--include/trace/perf.h90
-rw-r--r--include/trace/stages/stage2_data_offsets.h4
-rw-r--r--include/trace/stages/stage3_trace_output.h11
-rw-r--r--include/trace/stages/stage4_event_fields.h3
-rw-r--r--include/trace/stages/stage5_get_offsets.h46
-rw-r--r--include/trace/stages/stage6_event_callback.h31
-rw-r--r--include/trace/stages/stage7_class_define.h2
-rw-r--r--include/trace/syscall.h8
-rw-r--r--include/trace/trace_events.h62
148 files changed, 11551 insertions, 4192 deletions
diff --git a/include/trace/bpf_probe.h b/include/trace/bpf_probe.h
index 155c495b89ea..9391d54d3f12 100644
--- a/include/trace/bpf_probe.h
+++ b/include/trace/bpf_probe.h
@@ -4,50 +4,7 @@
#ifdef CONFIG_BPF_EVENTS
-#undef __entry
-#define __entry entry
-
-#undef __get_dynamic_array
-#define __get_dynamic_array(field) \
- ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
-
-#undef __get_dynamic_array_len
-#define __get_dynamic_array_len(field) \
- ((__entry->__data_loc_##field >> 16) & 0xffff)
-
-#undef __get_str
-#define __get_str(field) ((char *)__get_dynamic_array(field))
-
-#undef __get_bitmask
-#define __get_bitmask(field) (char *)__get_dynamic_array(field)
-
-#undef __get_cpumask
-#define __get_cpumask(field) (char *)__get_dynamic_array(field)
-
-#undef __get_sockaddr
-#define __get_sockaddr(field) ((struct sockaddr *)__get_dynamic_array(field))
-
-#undef __get_rel_dynamic_array
-#define __get_rel_dynamic_array(field) \
- ((void *)(&__entry->__rel_loc_##field) + \
- sizeof(__entry->__rel_loc_##field) + \
- (__entry->__rel_loc_##field & 0xffff))
-
-#undef __get_rel_dynamic_array_len
-#define __get_rel_dynamic_array_len(field) \
- ((__entry->__rel_loc_##field >> 16) & 0xffff)
-
-#undef __get_rel_str
-#define __get_rel_str(field) ((char *)__get_rel_dynamic_array(field))
-
-#undef __get_rel_bitmask
-#define __get_rel_bitmask(field) (char *)__get_rel_dynamic_array(field)
-
-#undef __get_rel_cpumask
-#define __get_rel_cpumask(field) (char *)__get_rel_dynamic_array(field)
-
-#undef __get_rel_sockaddr
-#define __get_rel_sockaddr(field) ((struct sockaddr *)__get_rel_dynamic_array(field))
+#include "stages/stage6_event_callback.h"
#undef __perf_count
#define __perf_count(c) (c)
@@ -55,6 +12,8 @@
#undef __perf_task
#define __perf_task(t) (t)
+#include <linux/args.h>
+
/* cast any integer, pointer, or small struct to u64 */
#define UINTTYPE(size) \
__typeof__(__builtin_choose_expr(size == 1, (u8)1, \
@@ -87,14 +46,27 @@
static notrace void \
__bpf_trace_##call(void *__data, proto) \
{ \
- struct bpf_prog *prog = __data; \
- CONCATENATE(bpf_trace_run, COUNT_ARGS(args))(prog, CAST_TO_U64(args)); \
+ CONCATENATE(bpf_trace_run, COUNT_ARGS(args))(__data, CAST_TO_U64(args)); \
}
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
__BPF_DECLARE_TRACE(call, PARAMS(proto), PARAMS(args))
+#define __BPF_DECLARE_TRACE_SYSCALL(call, proto, args) \
+static notrace void \
+__bpf_trace_##call(void *__data, proto) \
+{ \
+ might_fault(); \
+ preempt_disable_notrace(); \
+ CONCATENATE(bpf_trace_run, COUNT_ARGS(args))(__data, CAST_TO_U64(args)); \
+ preempt_enable_notrace(); \
+}
+
+#undef DECLARE_EVENT_SYSCALL_CLASS
+#define DECLARE_EVENT_SYSCALL_CLASS(call, proto, args, tstruct, assign, print) \
+ __BPF_DECLARE_TRACE_SYSCALL(call, PARAMS(proto), PARAMS(args))
+
/*
* This part is compiled out, it is only here as a build time check
* to make sure that if the tracepoint handling changes, the
@@ -147,14 +119,14 @@ static inline void bpf_test_buffer_##call(void) \
#undef DECLARE_TRACE
#define DECLARE_TRACE(call, proto, args) \
- __BPF_DECLARE_TRACE(call, PARAMS(proto), PARAMS(args)) \
- __DEFINE_EVENT(call, call, PARAMS(proto), PARAMS(args), 0)
+ __BPF_DECLARE_TRACE(call##_tp, PARAMS(proto), PARAMS(args)) \
+ __DEFINE_EVENT(call##_tp, call##_tp, PARAMS(proto), PARAMS(args), 0)
#undef DECLARE_TRACE_WRITABLE
#define DECLARE_TRACE_WRITABLE(call, proto, args, size) \
__CHECK_WRITABLE_BUF_SIZE(call, PARAMS(proto), PARAMS(args), size) \
- __BPF_DECLARE_TRACE(call, PARAMS(proto), PARAMS(args)) \
- __DEFINE_EVENT(call, call, PARAMS(proto), PARAMS(args), size)
+ __BPF_DECLARE_TRACE(call##_tp, PARAMS(proto), PARAMS(args)) \
+ __DEFINE_EVENT(call##_tp, call##_tp, PARAMS(proto), PARAMS(args), size)
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
diff --git a/include/trace/define_trace.h b/include/trace/define_trace.h
index 00723935dcc7..b2ba5a80583f 100644
--- a/include/trace/define_trace.h
+++ b/include/trace/define_trace.h
@@ -46,6 +46,10 @@
assign, print, reg, unreg) \
DEFINE_TRACE_FN(name, reg, unreg, PARAMS(proto), PARAMS(args))
+#undef TRACE_EVENT_SYSCALL
+#define TRACE_EVENT_SYSCALL(name, proto, args, struct, assign, print, reg, unreg) \
+ DEFINE_TRACE_SYSCALL(name, reg, unreg, PARAMS(proto), PARAMS(args))
+
#undef TRACE_EVENT_NOP
#define TRACE_EVENT_NOP(name, proto, args, struct, assign, print)
@@ -70,8 +74,27 @@
#undef DECLARE_TRACE
#define DECLARE_TRACE(name, proto, args) \
+ DEFINE_TRACE(name##_tp, PARAMS(proto), PARAMS(args))
+
+#undef DECLARE_TRACE_CONDITION
+#define DECLARE_TRACE_CONDITION(name, proto, args, cond) \
+ DEFINE_TRACE(name##_tp, PARAMS(proto), PARAMS(args))
+
+#undef DECLARE_TRACE_EVENT
+#define DECLARE_TRACE_EVENT(name, proto, args) \
+ DEFINE_TRACE(name, PARAMS(proto), PARAMS(args))
+
+#undef DECLARE_TRACE_EVENT_CONDITION
+#define DECLARE_TRACE_EVENT_CONDITION(name, proto, args, cond) \
DEFINE_TRACE(name, PARAMS(proto), PARAMS(args))
+/* If requested, create helpers for calling these tracepoints from Rust. */
+#ifdef CREATE_RUST_TRACE_POINTS
+#undef DEFINE_RUST_DO_TRACE
+#define DEFINE_RUST_DO_TRACE(name, proto, args) \
+ __DEFINE_RUST_DO_TRACE(name, PARAMS(proto), PARAMS(args))
+#endif
+
#undef TRACE_INCLUDE
#undef __TRACE_INCLUDE
@@ -97,6 +120,13 @@
/* Make all open coded DECLARE_TRACE nops */
#undef DECLARE_TRACE
#define DECLARE_TRACE(name, proto, args)
+#undef DECLARE_TRACE_CONDITION
+#define DECLARE_TRACE_CONDITION(name, proto, args, cond)
+
+#undef DECLARE_TRACE_EVENT
+#define DECLARE_TRACE_EVENT(name, proto, args)
+#undef DECLARE_TRACE_EVENT_CONDITION
+#define DECLARE_TRACE_EVENT_CONDITION(name, proto, args, cond)
#ifdef TRACEPOINTS_ENABLED
#include <trace/trace_events.h>
@@ -107,6 +137,7 @@
#undef TRACE_EVENT
#undef TRACE_EVENT_FN
#undef TRACE_EVENT_FN_COND
+#undef TRACE_EVENT_SYSCALL
#undef TRACE_EVENT_CONDITION
#undef TRACE_EVENT_NOP
#undef DEFINE_EVENT_NOP
@@ -117,6 +148,9 @@
#undef DEFINE_EVENT_CONDITION
#undef TRACE_HEADER_MULTI_READ
#undef DECLARE_TRACE
+#undef DECLARE_TRACE_CONDITION
+#undef DECLARE_TRACE_EVENT
+#undef DECLARE_TRACE_EVENT_CONDITION
/* Only undef what we defined in this file */
#ifdef UNDEF_TRACE_INCLUDE_FILE
@@ -129,6 +163,11 @@
# undef UNDEF_TRACE_INCLUDE_PATH
#endif
+#ifdef CREATE_RUST_TRACE_POINTS
+# undef DEFINE_RUST_DO_TRACE
+# define DEFINE_RUST_DO_TRACE(name, proto, args)
+#endif
+
/* We may be processing more files */
#define CREATE_TRACE_POINTS
diff --git a/include/trace/events/9p.h b/include/trace/events/9p.h
index 4dfa6d7f83ba..cd104a1343e2 100644
--- a/include/trace/events/9p.h
+++ b/include/trace/events/9p.h
@@ -178,18 +178,21 @@ TRACE_EVENT(9p_protocol_dump,
__field( void *, clnt )
__field( __u8, type )
__field( __u16, tag )
- __array( unsigned char, line, P9_PROTO_DUMP_SZ )
+ __dynamic_array(unsigned char, line,
+ min_t(size_t, pdu->capacity, P9_PROTO_DUMP_SZ))
),
TP_fast_assign(
__entry->clnt = clnt;
__entry->type = pdu->id;
__entry->tag = pdu->tag;
- memcpy(__entry->line, pdu->sdata, P9_PROTO_DUMP_SZ);
+ memcpy(__get_dynamic_array(line), pdu->sdata,
+ __get_dynamic_array_len(line));
),
- TP_printk("clnt %lu %s(tag = %d)\n%.3x: %16ph\n%.3x: %16ph\n",
+ TP_printk("clnt %lu %s(tag = %d)\n%*ph\n",
(unsigned long)__entry->clnt, show_9p_op(__entry->type),
- __entry->tag, 0, __entry->line, 16, __entry->line + 16)
+ __entry->tag, __get_dynamic_array_len(line),
+ __get_dynamic_array(line))
);
diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h
index e9d412d19dbb..1b3c48b5591d 100644
--- a/include/trace/events/afs.h
+++ b/include/trace/events/afs.h
@@ -18,97 +18,6 @@
#ifndef __AFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
#define __AFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
-enum afs_call_trace {
- afs_call_trace_alloc,
- afs_call_trace_free,
- afs_call_trace_get,
- afs_call_trace_put,
- afs_call_trace_wake,
- afs_call_trace_work,
-};
-
-enum afs_server_trace {
- afs_server_trace_alloc,
- afs_server_trace_callback,
- afs_server_trace_destroy,
- afs_server_trace_free,
- afs_server_trace_gc,
- afs_server_trace_get_by_addr,
- afs_server_trace_get_by_uuid,
- afs_server_trace_get_caps,
- afs_server_trace_get_install,
- afs_server_trace_get_new_cbi,
- afs_server_trace_get_probe,
- afs_server_trace_give_up_cb,
- afs_server_trace_purging,
- afs_server_trace_put_call,
- afs_server_trace_put_cbi,
- afs_server_trace_put_find_rsq,
- afs_server_trace_put_probe,
- afs_server_trace_put_slist,
- afs_server_trace_put_slist_isort,
- afs_server_trace_put_uuid_rsq,
- afs_server_trace_update,
-};
-
-
-enum afs_volume_trace {
- afs_volume_trace_alloc,
- afs_volume_trace_free,
- afs_volume_trace_get_alloc_sbi,
- afs_volume_trace_get_cell_insert,
- afs_volume_trace_get_new_op,
- afs_volume_trace_get_query_alias,
- afs_volume_trace_put_cell_dup,
- afs_volume_trace_put_cell_root,
- afs_volume_trace_put_destroy_sbi,
- afs_volume_trace_put_free_fc,
- afs_volume_trace_put_put_op,
- afs_volume_trace_put_query_alias,
- afs_volume_trace_put_validate_fc,
- afs_volume_trace_remove,
-};
-
-enum afs_cell_trace {
- afs_cell_trace_alloc,
- afs_cell_trace_free,
- afs_cell_trace_get_queue_dns,
- afs_cell_trace_get_queue_manage,
- afs_cell_trace_get_queue_new,
- afs_cell_trace_get_vol,
- afs_cell_trace_insert,
- afs_cell_trace_manage,
- afs_cell_trace_put_candidate,
- afs_cell_trace_put_destroy,
- afs_cell_trace_put_queue_fail,
- afs_cell_trace_put_queue_work,
- afs_cell_trace_put_vol,
- afs_cell_trace_see_source,
- afs_cell_trace_see_ws,
- afs_cell_trace_unuse_alias,
- afs_cell_trace_unuse_check_alias,
- afs_cell_trace_unuse_delete,
- afs_cell_trace_unuse_fc,
- afs_cell_trace_unuse_lookup,
- afs_cell_trace_unuse_mntpt,
- afs_cell_trace_unuse_no_pin,
- afs_cell_trace_unuse_parse,
- afs_cell_trace_unuse_pin,
- afs_cell_trace_unuse_probe,
- afs_cell_trace_unuse_sbi,
- afs_cell_trace_unuse_ws,
- afs_cell_trace_use_alias,
- afs_cell_trace_use_check_alias,
- afs_cell_trace_use_fc,
- afs_cell_trace_use_fc_alias,
- afs_cell_trace_use_lookup,
- afs_cell_trace_use_mntpt,
- afs_cell_trace_use_pin,
- afs_cell_trace_use_probe,
- afs_cell_trace_use_sbi,
- afs_cell_trace_wait,
-};
-
enum afs_fs_operation {
afs_FS_FetchData = 130, /* AFS Fetch file data */
afs_FS_FetchACL = 131, /* AFS Fetch file ACL */
@@ -160,6 +69,9 @@ enum afs_fs_operation {
yfs_FS_RemoveACL = 64171,
yfs_FS_RemoveFile2 = 64173,
yfs_FS_StoreOpaqueACL2 = 64174,
+ yfs_FS_Rename_Replace = 64176,
+ yfs_FS_Rename_NoReplace = 64177,
+ yfs_FS_Rename_Exchange = 64187,
yfs_FS_InlineBulkStatus = 64536, /* YFS Fetch multiple file statuses with errors */
yfs_FS_FetchData64 = 64537, /* YFS Fetch file data */
yfs_FS_StoreData64 = 64538, /* YFS Store file data */
@@ -202,121 +114,6 @@ enum yfs_cm_operation {
yfs_CB_CallBack = 64204,
};
-enum afs_edit_dir_op {
- afs_edit_dir_create,
- afs_edit_dir_create_error,
- afs_edit_dir_create_inval,
- afs_edit_dir_create_nospc,
- afs_edit_dir_delete,
- afs_edit_dir_delete_error,
- afs_edit_dir_delete_inval,
- afs_edit_dir_delete_noent,
-};
-
-enum afs_edit_dir_reason {
- afs_edit_dir_for_create,
- afs_edit_dir_for_link,
- afs_edit_dir_for_mkdir,
- afs_edit_dir_for_rename_0,
- afs_edit_dir_for_rename_1,
- afs_edit_dir_for_rename_2,
- afs_edit_dir_for_rmdir,
- afs_edit_dir_for_silly_0,
- afs_edit_dir_for_silly_1,
- afs_edit_dir_for_symlink,
- afs_edit_dir_for_unlink,
-};
-
-enum afs_eproto_cause {
- afs_eproto_bad_status,
- afs_eproto_cb_count,
- afs_eproto_cb_fid_count,
- afs_eproto_cellname_len,
- afs_eproto_file_type,
- afs_eproto_ibulkst_cb_count,
- afs_eproto_ibulkst_count,
- afs_eproto_motd_len,
- afs_eproto_offline_msg_len,
- afs_eproto_volname_len,
- afs_eproto_yvl_fsendpt4_len,
- afs_eproto_yvl_fsendpt6_len,
- afs_eproto_yvl_fsendpt_num,
- afs_eproto_yvl_fsendpt_type,
- afs_eproto_yvl_vlendpt4_len,
- afs_eproto_yvl_vlendpt6_len,
- afs_eproto_yvl_vlendpt_type,
-};
-
-enum afs_io_error {
- afs_io_error_cm_reply,
- afs_io_error_extract,
- afs_io_error_fs_probe_fail,
- afs_io_error_vl_lookup_fail,
- afs_io_error_vl_probe_fail,
-};
-
-enum afs_file_error {
- afs_file_error_dir_bad_magic,
- afs_file_error_dir_big,
- afs_file_error_dir_missing_page,
- afs_file_error_dir_name_too_long,
- afs_file_error_dir_over_end,
- afs_file_error_dir_small,
- afs_file_error_dir_unmarked_ext,
- afs_file_error_mntpt,
- afs_file_error_writeback_fail,
-};
-
-enum afs_flock_event {
- afs_flock_acquired,
- afs_flock_callback_break,
- afs_flock_defer_unlock,
- afs_flock_extend_fail,
- afs_flock_fail_other,
- afs_flock_fail_perm,
- afs_flock_no_lockers,
- afs_flock_release_fail,
- afs_flock_silly_delete,
- afs_flock_timestamp,
- afs_flock_try_to_lock,
- afs_flock_vfs_lock,
- afs_flock_vfs_locking,
- afs_flock_waited,
- afs_flock_waiting,
- afs_flock_work_extending,
- afs_flock_work_retry,
- afs_flock_work_unlocking,
- afs_flock_would_block,
-};
-
-enum afs_flock_operation {
- afs_flock_op_copy_lock,
- afs_flock_op_flock,
- afs_flock_op_grant,
- afs_flock_op_lock,
- afs_flock_op_release_lock,
- afs_flock_op_return_ok,
- afs_flock_op_return_eagain,
- afs_flock_op_return_edeadlk,
- afs_flock_op_return_error,
- afs_flock_op_set_lock,
- afs_flock_op_unlock,
- afs_flock_op_wake,
-};
-
-enum afs_cb_break_reason {
- afs_cb_break_no_break,
- afs_cb_break_no_promise,
- afs_cb_break_for_callback,
- afs_cb_break_for_deleted,
- afs_cb_break_for_lapsed,
- afs_cb_break_for_s_reinit,
- afs_cb_break_for_unlink,
- afs_cb_break_for_v_break,
- afs_cb_break_for_volume_callback,
- afs_cb_break_for_zap,
-};
-
#endif /* end __AFS_DECLARE_TRACE_ENUMS_ONCE_ONLY */
/*
@@ -324,6 +121,8 @@ enum afs_cb_break_reason {
*/
#define afs_call_traces \
EM(afs_call_trace_alloc, "ALLOC") \
+ EM(afs_call_trace_async_abort, "ASYAB") \
+ EM(afs_call_trace_async_kill, "ASYKL") \
EM(afs_call_trace_free, "FREE ") \
EM(afs_call_trace_get, "GET ") \
EM(afs_call_trace_put, "PUT ") \
@@ -331,35 +130,39 @@ enum afs_cb_break_reason {
E_(afs_call_trace_work, "QUEUE")
#define afs_server_traces \
- EM(afs_server_trace_alloc, "ALLOC ") \
EM(afs_server_trace_callback, "CALLBACK ") \
EM(afs_server_trace_destroy, "DESTROY ") \
EM(afs_server_trace_free, "FREE ") \
EM(afs_server_trace_gc, "GC ") \
- EM(afs_server_trace_get_by_addr, "GET addr ") \
- EM(afs_server_trace_get_by_uuid, "GET uuid ") \
- EM(afs_server_trace_get_caps, "GET caps ") \
- EM(afs_server_trace_get_install, "GET inst ") \
- EM(afs_server_trace_get_new_cbi, "GET cbi ") \
EM(afs_server_trace_get_probe, "GET probe") \
- EM(afs_server_trace_give_up_cb, "giveup-cb") \
EM(afs_server_trace_purging, "PURGE ") \
- EM(afs_server_trace_put_call, "PUT call ") \
EM(afs_server_trace_put_cbi, "PUT cbi ") \
- EM(afs_server_trace_put_find_rsq, "PUT f-rsq") \
EM(afs_server_trace_put_probe, "PUT probe") \
- EM(afs_server_trace_put_slist, "PUT slist") \
- EM(afs_server_trace_put_slist_isort, "PUT isort") \
- EM(afs_server_trace_put_uuid_rsq, "PUT u-req") \
- E_(afs_server_trace_update, "UPDATE")
+ EM(afs_server_trace_see_destroyer, "SEE destr") \
+ EM(afs_server_trace_see_expired, "SEE expd ") \
+ EM(afs_server_trace_see_purge, "SEE purge") \
+ EM(afs_server_trace_see_timer, "SEE timer") \
+ EM(afs_server_trace_unuse_call, "UNU call ") \
+ EM(afs_server_trace_unuse_create_fail, "UNU cfail") \
+ EM(afs_server_trace_unuse_slist, "UNU slist") \
+ EM(afs_server_trace_unuse_slist_isort, "UNU isort") \
+ EM(afs_server_trace_update, "UPDATE ") \
+ EM(afs_server_trace_use_by_uuid, "USE uuid ") \
+ EM(afs_server_trace_use_cm_call, "USE cm-cl") \
+ EM(afs_server_trace_use_get_caps, "USE gcaps") \
+ EM(afs_server_trace_use_give_up_cb, "USE gvupc") \
+ EM(afs_server_trace_use_install, "USE inst ") \
+ E_(afs_server_trace_wait_create, "WAIT crt ")
#define afs_volume_traces \
EM(afs_volume_trace_alloc, "ALLOC ") \
EM(afs_volume_trace_free, "FREE ") \
EM(afs_volume_trace_get_alloc_sbi, "GET sbi-alloc ") \
+ EM(afs_volume_trace_get_callback, "GET callback ") \
EM(afs_volume_trace_get_cell_insert, "GET cell-insrt") \
EM(afs_volume_trace_get_new_op, "GET op-new ") \
EM(afs_volume_trace_get_query_alias, "GET cell-alias") \
+ EM(afs_volume_trace_put_callback, "PUT callback ") \
EM(afs_volume_trace_put_cell_dup, "PUT cell-dup ") \
EM(afs_volume_trace_put_cell_root, "PUT cell-root ") \
EM(afs_volume_trace_put_destroy_sbi, "PUT sbi-destry") \
@@ -371,42 +174,88 @@ enum afs_cb_break_reason {
#define afs_cell_traces \
EM(afs_cell_trace_alloc, "ALLOC ") \
+ EM(afs_cell_trace_destroy, "DESTROY ") \
EM(afs_cell_trace_free, "FREE ") \
- EM(afs_cell_trace_get_queue_dns, "GET q-dns ") \
- EM(afs_cell_trace_get_queue_manage, "GET q-mng ") \
- EM(afs_cell_trace_get_queue_new, "GET q-new ") \
+ EM(afs_cell_trace_get_atcell, "GET atcell") \
+ EM(afs_cell_trace_get_server, "GET server") \
EM(afs_cell_trace_get_vol, "GET vol ") \
- EM(afs_cell_trace_insert, "INSERT ") \
- EM(afs_cell_trace_manage, "MANAGE ") \
+ EM(afs_cell_trace_purge, "PURGE ") \
+ EM(afs_cell_trace_put_atcell, "PUT atcell") \
EM(afs_cell_trace_put_candidate, "PUT candid") \
- EM(afs_cell_trace_put_destroy, "PUT destry") \
- EM(afs_cell_trace_put_queue_work, "PUT q-work") \
- EM(afs_cell_trace_put_queue_fail, "PUT q-fail") \
+ EM(afs_cell_trace_put_final, "PUT final ") \
+ EM(afs_cell_trace_put_server, "PUT server") \
EM(afs_cell_trace_put_vol, "PUT vol ") \
+ EM(afs_cell_trace_queue_again, "QUE again ") \
+ EM(afs_cell_trace_queue_dns, "QUE dns ") \
+ EM(afs_cell_trace_queue_new, "QUE new ") \
+ EM(afs_cell_trace_queue_purge, "QUE purge ") \
+ EM(afs_cell_trace_manage, "MANAGE ") \
+ EM(afs_cell_trace_managed, "MANAGED ") \
EM(afs_cell_trace_see_source, "SEE source") \
- EM(afs_cell_trace_see_ws, "SEE ws ") \
+ EM(afs_cell_trace_see_mgmt_timer, "SEE mtimer") \
EM(afs_cell_trace_unuse_alias, "UNU alias ") \
EM(afs_cell_trace_unuse_check_alias, "UNU chk-al") \
EM(afs_cell_trace_unuse_delete, "UNU delete") \
+ EM(afs_cell_trace_unuse_dynroot_mntpt, "UNU dyn-mp") \
EM(afs_cell_trace_unuse_fc, "UNU fc ") \
- EM(afs_cell_trace_unuse_lookup, "UNU lookup") \
+ EM(afs_cell_trace_unuse_lookup_dynroot, "UNU lu-dyn") \
+ EM(afs_cell_trace_unuse_lookup_error, "UNU lu-err") \
EM(afs_cell_trace_unuse_mntpt, "UNU mntpt ") \
+ EM(afs_cell_trace_unuse_no_pin, "UNU no-pin") \
EM(afs_cell_trace_unuse_parse, "UNU parse ") \
EM(afs_cell_trace_unuse_pin, "UNU pin ") \
- EM(afs_cell_trace_unuse_probe, "UNU probe ") \
EM(afs_cell_trace_unuse_sbi, "UNU sbi ") \
EM(afs_cell_trace_unuse_ws, "UNU ws ") \
EM(afs_cell_trace_use_alias, "USE alias ") \
EM(afs_cell_trace_use_check_alias, "USE chk-al") \
EM(afs_cell_trace_use_fc, "USE fc ") \
EM(afs_cell_trace_use_fc_alias, "USE fc-al ") \
- EM(afs_cell_trace_use_lookup, "USE lookup") \
+ EM(afs_cell_trace_use_lookup_add, "USE lu-add") \
+ EM(afs_cell_trace_use_lookup_canonical, "USE lu-can") \
+ EM(afs_cell_trace_use_lookup_dynroot, "USE lu-dyn") \
+ EM(afs_cell_trace_use_lookup_mntpt, "USE lu-mpt") \
+ EM(afs_cell_trace_use_lookup_mount, "USE lu-mnt") \
+ EM(afs_cell_trace_use_lookup_ws, "USE lu-ws ") \
EM(afs_cell_trace_use_mntpt, "USE mntpt ") \
EM(afs_cell_trace_use_pin, "USE pin ") \
EM(afs_cell_trace_use_probe, "USE probe ") \
EM(afs_cell_trace_use_sbi, "USE sbi ") \
E_(afs_cell_trace_wait, "WAIT ")
+#define afs_alist_traces \
+ EM(afs_alist_trace_alloc, "ALLOC ") \
+ EM(afs_alist_trace_get_estate, "GET estate") \
+ EM(afs_alist_trace_get_vlgetcaps, "GET vgtcap") \
+ EM(afs_alist_trace_get_vlprobe, "GET vprobe") \
+ EM(afs_alist_trace_get_vlrotate_set, "GET vl-rot") \
+ EM(afs_alist_trace_put_estate, "PUT estate") \
+ EM(afs_alist_trace_put_getaddru, "PUT GtAdrU") \
+ EM(afs_alist_trace_put_parse_empty, "PUT p-empt") \
+ EM(afs_alist_trace_put_parse_error, "PUT p-err ") \
+ EM(afs_alist_trace_put_server_create, "PUT sv-crt") \
+ EM(afs_alist_trace_put_server_oom, "PUT sv-oom") \
+ EM(afs_alist_trace_put_server_update, "PUT sv-upd") \
+ EM(afs_alist_trace_put_vlgetcaps, "PUT vgtcap") \
+ EM(afs_alist_trace_put_vlprobe, "PUT vprobe") \
+ EM(afs_alist_trace_put_vlrotate_end, "PUT vr-end") \
+ EM(afs_alist_trace_put_vlrotate_fail, "PUT vr-fai") \
+ EM(afs_alist_trace_put_vlrotate_next, "PUT vr-nxt") \
+ EM(afs_alist_trace_put_vlrotate_restart,"PUT vr-rst") \
+ EM(afs_alist_trace_put_vlserver, "PUT vlsrvr") \
+ EM(afs_alist_trace_put_vlserver_old, "PUT vs-old") \
+ E_(afs_alist_trace_free, "FREE ")
+
+#define afs_estate_traces \
+ EM(afs_estate_trace_alloc_probe, "ALLOC prob") \
+ EM(afs_estate_trace_alloc_server, "ALLOC srvr") \
+ EM(afs_estate_trace_get_server_state, "GET srv-st") \
+ EM(afs_estate_trace_get_getcaps, "GET getcap") \
+ EM(afs_estate_trace_put_getcaps, "PUT getcap") \
+ EM(afs_estate_trace_put_probe, "PUT probe ") \
+ EM(afs_estate_trace_put_server, "PUT server") \
+ EM(afs_estate_trace_put_server_state, "PUT srv-st") \
+ E_(afs_estate_trace_free, "FREE ")
+
#define afs_fs_operations \
EM(afs_FS_FetchData, "FS.FetchData") \
EM(afs_FS_FetchStatus, "FS.FetchStatus") \
@@ -454,6 +303,9 @@ enum afs_cb_break_reason {
EM(yfs_FS_RemoveACL, "YFS.RemoveACL") \
EM(yfs_FS_RemoveFile2, "YFS.RemoveFile2") \
EM(yfs_FS_StoreOpaqueACL2, "YFS.StoreOpaqueACL2") \
+ EM(yfs_FS_Rename_Replace, "YFS.Rename_Replace") \
+ EM(yfs_FS_Rename_NoReplace, "YFS.Rename_NoReplace") \
+ EM(yfs_FS_Rename_Exchange, "YFS.Rename_Exchange") \
EM(yfs_FS_InlineBulkStatus, "YFS.InlineBulkStatus") \
EM(yfs_FS_FetchData64, "YFS.FetchData64") \
EM(yfs_FS_StoreData64, "YFS.StoreData64") \
@@ -492,6 +344,44 @@ enum afs_cb_break_reason {
EM(yfs_CB_TellMeAboutYourself, "YFSCB.TellMeAboutYourself") \
E_(yfs_CB_CallBack, "YFSCB.CallBack")
+#define afs_cb_promise_traces \
+ EM(afs_cb_promise_clear_cb_break, "CLEAR cb-break") \
+ EM(afs_cb_promise_clear_rmdir, "CLEAR rmdir") \
+ EM(afs_cb_promise_clear_rotate_server, "CLEAR rot-srv") \
+ EM(afs_cb_promise_clear_server_change, "CLEAR srv-chg") \
+ EM(afs_cb_promise_clear_vol_init_cb, "CLEAR vol-init-cb") \
+ EM(afs_cb_promise_set_apply_cb, "SET apply-cb") \
+ EM(afs_cb_promise_set_new_inode, "SET new-inode") \
+ E_(afs_cb_promise_set_new_symlink, "SET new-symlink")
+
+#define afs_vnode_invalid_traces \
+ EM(afs_vnode_invalid_trace_cb_ro_snapshot, "cb-ro-snapshot") \
+ EM(afs_vnode_invalid_trace_cb_scrub, "cb-scrub") \
+ EM(afs_vnode_invalid_trace_cb_v_break, "cb-v-break") \
+ EM(afs_vnode_invalid_trace_expired, "expired") \
+ EM(afs_vnode_invalid_trace_no_cb_promise, "no-cb-promise") \
+ EM(afs_vnode_invalid_trace_vol_expired, "vol-expired") \
+ EM(afs_vnode_invalid_trace_zap_data, "zap-data") \
+ E_(afs_vnode_valid_trace, "valid")
+
+#define afs_dir_invalid_traces \
+ EM(afs_dir_invalid_edit_add_bad_size, "edit-add-bad-size") \
+ EM(afs_dir_invalid_edit_add_no_slots, "edit-add-no-slots") \
+ EM(afs_dir_invalid_edit_add_too_many_blocks, "edit-add-too-many-blocks") \
+ EM(afs_dir_invalid_edit_get_block, "edit-get-block") \
+ EM(afs_dir_invalid_edit_mkdir, "edit-mkdir") \
+ EM(afs_dir_invalid_edit_rem_bad_size, "edit-rem-bad-size") \
+ EM(afs_dir_invalid_edit_rem_wrong_name, "edit-rem-wrong_name") \
+ EM(afs_dir_invalid_edit_upd_bad_size, "edit-upd-bad-size") \
+ EM(afs_dir_invalid_edit_upd_no_dd, "edit-upd-no-dotdot") \
+ EM(afs_dir_invalid_dv_mismatch, "dv-mismatch") \
+ EM(afs_dir_invalid_inval_folio, "inv-folio") \
+ EM(afs_dir_invalid_iter_stale, "iter-stale") \
+ EM(afs_dir_invalid_reclaimed_folio, "reclaimed-folio") \
+ EM(afs_dir_invalid_release_folio, "rel-folio") \
+ EM(afs_dir_invalid_remote, "remote") \
+ E_(afs_dir_invalid_subdir_removed, "subdir-removed")
+
#define afs_edit_dir_ops \
EM(afs_edit_dir_create, "create") \
EM(afs_edit_dir_create_error, "c_fail") \
@@ -500,7 +390,12 @@ enum afs_cb_break_reason {
EM(afs_edit_dir_delete, "delete") \
EM(afs_edit_dir_delete_error, "d_err ") \
EM(afs_edit_dir_delete_inval, "d_invl") \
- E_(afs_edit_dir_delete_noent, "d_nent")
+ EM(afs_edit_dir_delete_noent, "d_nent") \
+ EM(afs_edit_dir_mkdir, "mk_ent") \
+ EM(afs_edit_dir_update_dd, "u_ddot") \
+ EM(afs_edit_dir_update_error, "u_fail") \
+ EM(afs_edit_dir_update_inval, "u_invl") \
+ E_(afs_edit_dir_update_nodd, "u_nodd")
#define afs_edit_dir_reasons \
EM(afs_edit_dir_for_create, "Create") \
@@ -509,6 +404,7 @@ enum afs_cb_break_reason {
EM(afs_edit_dir_for_rename_0, "Renam0") \
EM(afs_edit_dir_for_rename_1, "Renam1") \
EM(afs_edit_dir_for_rename_2, "Renam2") \
+ EM(afs_edit_dir_for_rename_sub, "RnmSub") \
EM(afs_edit_dir_for_rmdir, "RmDir ") \
EM(afs_edit_dir_for_silly_0, "S_Ren0") \
EM(afs_edit_dir_for_silly_1, "S_Ren1") \
@@ -549,6 +445,7 @@ enum afs_cb_break_reason {
EM(afs_file_error_dir_over_end, "DIR_ENT_OVER_END") \
EM(afs_file_error_dir_small, "DIR_SMALL") \
EM(afs_file_error_dir_unmarked_ext, "DIR_UNMARKED_EXT") \
+ EM(afs_file_error_symlink_big, "SYM_BIG") \
EM(afs_file_error_mntpt, "MNTPT_READ_FAILED") \
E_(afs_file_error_writeback_fail, "WRITEBACK_FAILED")
@@ -604,15 +501,70 @@ enum afs_cb_break_reason {
#define afs_cb_break_reasons \
EM(afs_cb_break_no_break, "no-break") \
- EM(afs_cb_break_no_promise, "no-promise") \
EM(afs_cb_break_for_callback, "break-cb") \
+ EM(afs_cb_break_for_creation_regress, "creation-regress") \
EM(afs_cb_break_for_deleted, "break-del") \
- EM(afs_cb_break_for_lapsed, "break-lapsed") \
EM(afs_cb_break_for_s_reinit, "s-reinit") \
EM(afs_cb_break_for_unlink, "break-unlink") \
- EM(afs_cb_break_for_v_break, "break-v") \
+ EM(afs_cb_break_for_update_regress, "update-regress") \
EM(afs_cb_break_for_volume_callback, "break-v-cb") \
- E_(afs_cb_break_for_zap, "break-zap")
+ EM(afs_cb_break_for_vos_release, "break-vos-release") \
+ E_(afs_cb_break_volume_excluded, "vol-excluded")
+
+#define afs_rotate_traces \
+ EM(afs_rotate_trace_aborted, "Abortd") \
+ EM(afs_rotate_trace_busy_sleep, "BsySlp") \
+ EM(afs_rotate_trace_check_vol_status, "VolStt") \
+ EM(afs_rotate_trace_failed, "Failed") \
+ EM(afs_rotate_trace_iter, "Iter ") \
+ EM(afs_rotate_trace_iterate_addr, "ItAddr") \
+ EM(afs_rotate_trace_next_server, "NextSv") \
+ EM(afs_rotate_trace_no_more_servers, "NoMore") \
+ EM(afs_rotate_trace_nomem, "Nomem ") \
+ EM(afs_rotate_trace_probe_error, "PrbErr") \
+ EM(afs_rotate_trace_probe_fileserver, "PrbFsv") \
+ EM(afs_rotate_trace_probe_none, "PrbNon") \
+ EM(afs_rotate_trace_probe_response, "PrbRsp") \
+ EM(afs_rotate_trace_probe_superseded, "PrbSup") \
+ EM(afs_rotate_trace_restart, "Rstart") \
+ EM(afs_rotate_trace_retry_server, "RtrySv") \
+ EM(afs_rotate_trace_selected_server, "SlctSv") \
+ EM(afs_rotate_trace_stale_lock, "StlLck") \
+ EM(afs_rotate_trace_start, "Start ") \
+ EM(afs_rotate_trace_stop, "Stop ") \
+ E_(afs_rotate_trace_stopped, "Stoppd")
+
+/*
+ * Generate enums for tracing information.
+ */
+#ifndef __AFS_GENERATE_TRACE_ENUMS_ONCE_ONLY
+#define __AFS_GENERATE_TRACE_ENUMS_ONCE_ONLY
+
+#undef EM
+#undef E_
+#define EM(a, b) a,
+#define E_(a, b) a
+
+enum afs_alist_trace { afs_alist_traces } __mode(byte);
+enum afs_call_trace { afs_call_traces } __mode(byte);
+enum afs_cb_break_reason { afs_cb_break_reasons } __mode(byte);
+enum afs_cb_promise_trace { afs_cb_promise_traces } __mode(byte);
+enum afs_cell_trace { afs_cell_traces } __mode(byte);
+enum afs_dir_invalid_trace { afs_dir_invalid_traces} __mode(byte);
+enum afs_edit_dir_op { afs_edit_dir_ops } __mode(byte);
+enum afs_edit_dir_reason { afs_edit_dir_reasons } __mode(byte);
+enum afs_eproto_cause { afs_eproto_causes } __mode(byte);
+enum afs_estate_trace { afs_estate_traces } __mode(byte);
+enum afs_file_error { afs_file_errors } __mode(byte);
+enum afs_flock_event { afs_flock_events } __mode(byte);
+enum afs_flock_operation { afs_flock_operations } __mode(byte);
+enum afs_io_error { afs_io_errors } __mode(byte);
+enum afs_rotate_trace { afs_rotate_traces } __mode(byte);
+enum afs_server_trace { afs_server_traces } __mode(byte);
+enum afs_vnode_invalid_trace { afs_vnode_invalid_traces} __mode(byte);
+enum afs_volume_trace { afs_volume_traces } __mode(byte);
+
+#endif /* end __AFS_GENERATE_TRACE_ENUMS_ONCE_ONLY */
/*
* Export enum symbols via userspace.
@@ -622,21 +574,27 @@ enum afs_cb_break_reason {
#define EM(a, b) TRACE_DEFINE_ENUM(a);
#define E_(a, b) TRACE_DEFINE_ENUM(a);
+afs_alist_traces;
afs_call_traces;
-afs_server_traces;
+afs_cb_break_reasons;
+afs_cb_promise_traces;
afs_cell_traces;
-afs_fs_operations;
-afs_vl_operations;
afs_cm_operations;
-yfs_cm_operations;
+afs_dir_invalid_traces;
afs_edit_dir_ops;
afs_edit_dir_reasons;
afs_eproto_causes;
-afs_io_errors;
+afs_estate_traces;
afs_file_errors;
-afs_flock_types;
afs_flock_operations;
-afs_cb_break_reasons;
+afs_flock_types;
+afs_fs_operations;
+afs_io_errors;
+afs_rotate_traces;
+afs_server_traces;
+afs_vnode_invalid_traces;
+afs_vl_operations;
+yfs_cm_operations;
/*
* Now redefine the EM() and E_() macros to map the enums to the strings that
@@ -654,12 +612,12 @@ TRACE_EVENT(afs_receive_data,
TP_ARGS(call, iter, want_more, ret),
TP_STRUCT__entry(
- __field(loff_t, remain )
- __field(unsigned int, call )
- __field(enum afs_call_state, state )
- __field(unsigned short, unmarshall )
- __field(bool, want_more )
- __field(int, ret )
+ __field(loff_t, remain)
+ __field(unsigned int, call)
+ __field(enum afs_call_state, state)
+ __field(unsigned short, unmarshall)
+ __field(bool, want_more)
+ __field(int, ret)
),
TP_fast_assign(
@@ -686,9 +644,9 @@ TRACE_EVENT(afs_notify_call,
TP_ARGS(rxcall, call),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum afs_call_state, state )
- __field(unsigned short, unmarshall )
+ __field(unsigned int, call)
+ __field(enum afs_call_state, state)
+ __field(unsigned short, unmarshall)
),
TP_fast_assign(
@@ -708,22 +666,29 @@ TRACE_EVENT(afs_cb_call,
TP_ARGS(call),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(u32, op )
- __field(u16, service_id )
+ __field(unsigned int, call)
+ __field(u32, op)
+ __field(u16, service_id)
+ __field(u8, security_ix)
+ __field(u32, enctype)
),
TP_fast_assign(
__entry->call = call->debug_id;
__entry->op = call->operation_ID;
__entry->service_id = call->service_id;
+ __entry->security_ix = call->security_ix;
+ __entry->enctype = call->enctype;
),
- TP_printk("c=%08x %s",
+ TP_printk("c=%08x %s sv=%u sx=%u en=%u",
__entry->call,
__entry->service_id == 2501 ?
__print_symbolic(__entry->op, yfs_cm_operations) :
- __print_symbolic(__entry->op, afs_cm_operations))
+ __print_symbolic(__entry->op, afs_cm_operations),
+ __entry->service_id,
+ __entry->security_ix,
+ __entry->enctype)
);
TRACE_EVENT(afs_call,
@@ -733,11 +698,11 @@ TRACE_EVENT(afs_call,
TP_ARGS(call_debug_id, op, ref, outstanding, where),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(int, op )
- __field(int, ref )
- __field(int, outstanding )
- __field(const void *, where )
+ __field(unsigned int, call)
+ __field(int, op)
+ __field(int, ref)
+ __field(int, outstanding)
+ __field(const void *, where)
),
TP_fast_assign(
@@ -762,9 +727,9 @@ TRACE_EVENT(afs_make_fs_call,
TP_ARGS(call, fid),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum afs_fs_operation, op )
- __field_struct(struct afs_fid, fid )
+ __field(unsigned int, call)
+ __field(enum afs_fs_operation, op)
+ __field_struct(struct afs_fid, fid)
),
TP_fast_assign(
@@ -779,7 +744,7 @@ TRACE_EVENT(afs_make_fs_call,
}
),
- TP_printk("c=%08x %06llx:%06llx:%06x %s",
+ TP_printk("c=%08x V=%llx i=%llx:%x %s",
__entry->call,
__entry->fid.vid,
__entry->fid.vnode,
@@ -794,10 +759,10 @@ TRACE_EVENT(afs_make_fs_calli,
TP_ARGS(call, fid, i),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(unsigned int, i )
- __field(enum afs_fs_operation, op )
- __field_struct(struct afs_fid, fid )
+ __field(unsigned int, call)
+ __field(unsigned int, i)
+ __field(enum afs_fs_operation, op)
+ __field_struct(struct afs_fid, fid)
),
TP_fast_assign(
@@ -813,7 +778,7 @@ TRACE_EVENT(afs_make_fs_calli,
}
),
- TP_printk("c=%08x %06llx:%06llx:%06x %s i=%u",
+ TP_printk("c=%08x V=%llx i=%llx:%x %s i=%u",
__entry->call,
__entry->fid.vid,
__entry->fid.vnode,
@@ -829,10 +794,10 @@ TRACE_EVENT(afs_make_fs_call1,
TP_ARGS(call, fid, name),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum afs_fs_operation, op )
- __field_struct(struct afs_fid, fid )
- __array(char, name, 24 )
+ __field(unsigned int, call)
+ __field(enum afs_fs_operation, op)
+ __field_struct(struct afs_fid, fid)
+ __array(char, name, 24)
),
TP_fast_assign(
@@ -850,7 +815,7 @@ TRACE_EVENT(afs_make_fs_call1,
__entry->name[__len] = 0;
),
- TP_printk("c=%08x %06llx:%06llx:%06x %s \"%s\"",
+ TP_printk("c=%08x V=%llx i=%llx:%x %s \"%s\"",
__entry->call,
__entry->fid.vid,
__entry->fid.vnode,
@@ -866,11 +831,11 @@ TRACE_EVENT(afs_make_fs_call2,
TP_ARGS(call, fid, name, name2),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum afs_fs_operation, op )
- __field_struct(struct afs_fid, fid )
- __array(char, name, 24 )
- __array(char, name2, 24 )
+ __field(unsigned int, call)
+ __field(enum afs_fs_operation, op)
+ __field_struct(struct afs_fid, fid)
+ __array(char, name, 24)
+ __array(char, name2, 24)
),
TP_fast_assign(
@@ -891,7 +856,7 @@ TRACE_EVENT(afs_make_fs_call2,
__entry->name2[__len2] = 0;
),
- TP_printk("c=%08x %06llx:%06llx:%06x %s \"%s\" \"%s\"",
+ TP_printk("c=%08x V=%llx i=%llx:%x %s \"%s\" \"%s\"",
__entry->call,
__entry->fid.vid,
__entry->fid.vnode,
@@ -907,8 +872,8 @@ TRACE_EVENT(afs_make_vl_call,
TP_ARGS(call),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum afs_vl_operation, op )
+ __field(unsigned int, call)
+ __field(enum afs_vl_operation, op)
),
TP_fast_assign(
@@ -927,10 +892,10 @@ TRACE_EVENT(afs_call_done,
TP_ARGS(call),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(struct rxrpc_call *, rx_call )
- __field(int, ret )
- __field(u32, abort_code )
+ __field(unsigned int, call)
+ __field(struct rxrpc_call *, rx_call)
+ __field(int, ret)
+ __field(u32, abort_code)
),
TP_fast_assign(
@@ -953,10 +918,10 @@ TRACE_EVENT(afs_send_data,
TP_ARGS(call, msg),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(unsigned int, flags )
- __field(loff_t, offset )
- __field(loff_t, count )
+ __field(unsigned int, call)
+ __field(unsigned int, flags)
+ __field(loff_t, offset)
+ __field(loff_t, count)
),
TP_fast_assign(
@@ -977,10 +942,10 @@ TRACE_EVENT(afs_sent_data,
TP_ARGS(call, msg, ret),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(int, ret )
- __field(loff_t, offset )
- __field(loff_t, count )
+ __field(unsigned int, call)
+ __field(int, ret)
+ __field(loff_t, offset)
+ __field(loff_t, count)
),
TP_fast_assign(
@@ -996,57 +961,26 @@ TRACE_EVENT(afs_sent_data,
);
TRACE_EVENT(afs_dir_check_failed,
- TP_PROTO(struct afs_vnode *vnode, loff_t off, loff_t i_size),
+ TP_PROTO(struct afs_vnode *vnode, loff_t off),
- TP_ARGS(vnode, off, i_size),
+ TP_ARGS(vnode, off),
TP_STRUCT__entry(
- __field(struct afs_vnode *, vnode )
- __field(loff_t, off )
- __field(loff_t, i_size )
+ __field(struct afs_vnode *, vnode)
+ __field(loff_t, off)
+ __field(loff_t, i_size)
),
TP_fast_assign(
__entry->vnode = vnode;
__entry->off = off;
- __entry->i_size = i_size;
+ __entry->i_size = i_size_read(&vnode->netfs.inode);
),
TP_printk("vn=%p %llx/%llx",
__entry->vnode, __entry->off, __entry->i_size)
);
-TRACE_EVENT(afs_folio_dirty,
- TP_PROTO(struct afs_vnode *vnode, const char *where, struct folio *folio),
-
- TP_ARGS(vnode, where, folio),
-
- TP_STRUCT__entry(
- __field(struct afs_vnode *, vnode )
- __field(const char *, where )
- __field(pgoff_t, index )
- __field(unsigned long, from )
- __field(unsigned long, to )
- ),
-
- TP_fast_assign(
- unsigned long priv = (unsigned long)folio_get_private(folio);
- __entry->vnode = vnode;
- __entry->where = where;
- __entry->index = folio_index(folio);
- __entry->from = afs_folio_dirty_from(folio, priv);
- __entry->to = afs_folio_dirty_to(folio, priv);
- __entry->to |= (afs_is_folio_dirty_mmapped(priv) ?
- (1UL << (BITS_PER_LONG - 1)) : 0);
- ),
-
- TP_printk("vn=%p %lx %s %lx-%lx%s",
- __entry->vnode, __entry->index, __entry->where,
- __entry->from,
- __entry->to & ~(1UL << (BITS_PER_LONG - 1)),
- __entry->to & (1UL << (BITS_PER_LONG - 1)) ? " M" : "")
- );
-
TRACE_EVENT(afs_call_state,
TP_PROTO(struct afs_call *call,
enum afs_call_state from,
@@ -1056,11 +990,11 @@ TRACE_EVENT(afs_call_state,
TP_ARGS(call, from, to, ret, remote_abort),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum afs_call_state, from )
- __field(enum afs_call_state, to )
- __field(int, ret )
- __field(u32, abort )
+ __field(unsigned int, call)
+ __field(enum afs_call_state, from)
+ __field(enum afs_call_state, to)
+ __field(int, ret)
+ __field(u32, abort)
),
TP_fast_assign(
@@ -1084,9 +1018,9 @@ TRACE_EVENT(afs_lookup,
TP_ARGS(dvnode, name, fid),
TP_STRUCT__entry(
- __field_struct(struct afs_fid, dfid )
- __field_struct(struct afs_fid, fid )
- __array(char, name, 24 )
+ __field_struct(struct afs_fid, dfid)
+ __field_struct(struct afs_fid, fid)
+ __array(char, name, 24)
),
TP_fast_assign(
@@ -1116,15 +1050,15 @@ TRACE_EVENT(afs_edit_dir,
TP_ARGS(dvnode, why, op, block, slot, f_vnode, f_unique, name),
TP_STRUCT__entry(
- __field(unsigned int, vnode )
- __field(unsigned int, unique )
- __field(enum afs_edit_dir_reason, why )
- __field(enum afs_edit_dir_op, op )
- __field(unsigned int, block )
- __field(unsigned short, slot )
- __field(unsigned int, f_vnode )
- __field(unsigned int, f_unique )
- __array(char, name, 24 )
+ __field(unsigned int, vnode)
+ __field(unsigned int, unique)
+ __field(enum afs_edit_dir_reason, why)
+ __field(enum afs_edit_dir_op, op)
+ __field(unsigned int, block)
+ __field(unsigned short, slot)
+ __field(unsigned int, f_vnode)
+ __field(unsigned int, f_unique)
+ __array(char, name, 24)
),
TP_fast_assign(
@@ -1142,7 +1076,7 @@ TRACE_EVENT(afs_edit_dir,
__entry->name[__len] = 0;
),
- TP_printk("d=%x:%x %s %s %u[%u] f=%x:%x \"%s\"",
+ TP_printk("di=%x:%x %s %s %u[%u] fi=%x:%x \"%s\"",
__entry->vnode, __entry->unique,
__print_symbolic(__entry->why, afs_edit_dir_reasons),
__print_symbolic(__entry->op, afs_edit_dir_ops),
@@ -1151,14 +1085,130 @@ TRACE_EVENT(afs_edit_dir,
__entry->name)
);
+TRACE_EVENT(afs_dir_invalid,
+ TP_PROTO(const struct afs_vnode *dvnode, enum afs_dir_invalid_trace trace),
+
+ TP_ARGS(dvnode, trace),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, vnode)
+ __field(unsigned int, unique)
+ __field(enum afs_dir_invalid_trace, trace)
+ ),
+
+ TP_fast_assign(
+ __entry->vnode = dvnode->fid.vnode;
+ __entry->unique = dvnode->fid.unique;
+ __entry->trace = trace;
+ ),
+
+ TP_printk("di=%x:%x %s",
+ __entry->vnode, __entry->unique,
+ __print_symbolic(__entry->trace, afs_dir_invalid_traces))
+ );
+
+TRACE_EVENT(afs_cb_promise,
+ TP_PROTO(const struct afs_vnode *vnode, enum afs_cb_promise_trace trace),
+
+ TP_ARGS(vnode, trace),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, vnode)
+ __field(unsigned int, unique)
+ __field(enum afs_cb_promise_trace, trace)
+ ),
+
+ TP_fast_assign(
+ __entry->vnode = vnode->fid.vnode;
+ __entry->unique = vnode->fid.unique;
+ __entry->trace = trace;
+ ),
+
+ TP_printk("di=%x:%x %s",
+ __entry->vnode, __entry->unique,
+ __print_symbolic(__entry->trace, afs_cb_promise_traces))
+ );
+
+TRACE_EVENT(afs_vnode_invalid,
+ TP_PROTO(const struct afs_vnode *vnode, enum afs_vnode_invalid_trace trace),
+
+ TP_ARGS(vnode, trace),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, vnode)
+ __field(unsigned int, unique)
+ __field(enum afs_vnode_invalid_trace, trace)
+ ),
+
+ TP_fast_assign(
+ __entry->vnode = vnode->fid.vnode;
+ __entry->unique = vnode->fid.unique;
+ __entry->trace = trace;
+ ),
+
+ TP_printk("di=%x:%x %s",
+ __entry->vnode, __entry->unique,
+ __print_symbolic(__entry->trace, afs_vnode_invalid_traces))
+ );
+
+TRACE_EVENT(afs_set_dv,
+ TP_PROTO(const struct afs_vnode *dvnode, u64 new_dv),
+
+ TP_ARGS(dvnode, new_dv),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, vnode)
+ __field(unsigned int, unique)
+ __field(u64, old_dv)
+ __field(u64, new_dv)
+ ),
+
+ TP_fast_assign(
+ __entry->vnode = dvnode->fid.vnode;
+ __entry->unique = dvnode->fid.unique;
+ __entry->old_dv = dvnode->status.data_version;
+ __entry->new_dv = new_dv;
+ ),
+
+ TP_printk("di=%x:%x dv=%llx -> dv=%llx",
+ __entry->vnode, __entry->unique,
+ __entry->old_dv, __entry->new_dv)
+ );
+
+TRACE_EVENT(afs_dv_mismatch,
+ TP_PROTO(const struct afs_vnode *dvnode, u64 before_dv, int delta, u64 new_dv),
+
+ TP_ARGS(dvnode, before_dv, delta, new_dv),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, vnode)
+ __field(unsigned int, unique)
+ __field(int, delta)
+ __field(u64, before_dv)
+ __field(u64, new_dv)
+ ),
+
+ TP_fast_assign(
+ __entry->vnode = dvnode->fid.vnode;
+ __entry->unique = dvnode->fid.unique;
+ __entry->delta = delta;
+ __entry->before_dv = before_dv;
+ __entry->new_dv = new_dv;
+ ),
+
+ TP_printk("di=%x:%x xdv=%llx+%d dv=%llx",
+ __entry->vnode, __entry->unique,
+ __entry->before_dv, __entry->delta, __entry->new_dv)
+ );
+
TRACE_EVENT(afs_protocol_error,
TP_PROTO(struct afs_call *call, enum afs_eproto_cause cause),
TP_ARGS(call, cause),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum afs_eproto_cause, cause )
+ __field(unsigned int, call)
+ __field(enum afs_eproto_cause, cause)
),
TP_fast_assign(
@@ -1177,9 +1227,9 @@ TRACE_EVENT(afs_io_error,
TP_ARGS(call, error, where),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(int, error )
- __field(enum afs_io_error, where )
+ __field(unsigned int, call)
+ __field(int, error)
+ __field(enum afs_io_error, where)
),
TP_fast_assign(
@@ -1199,9 +1249,9 @@ TRACE_EVENT(afs_file_error,
TP_ARGS(vnode, error, where),
TP_STRUCT__entry(
- __field_struct(struct afs_fid, fid )
- __field(int, error )
- __field(enum afs_file_error, where )
+ __field_struct(struct afs_fid, fid)
+ __field(int, error)
+ __field(enum afs_file_error, where)
),
TP_fast_assign(
@@ -1216,15 +1266,40 @@ TRACE_EVENT(afs_file_error,
__print_symbolic(__entry->where, afs_file_errors))
);
+TRACE_EVENT(afs_bulkstat_error,
+ TP_PROTO(struct afs_operation *op, struct afs_fid *fid, unsigned int index, s32 abort),
+
+ TP_ARGS(op, fid, index, abort),
+
+ TP_STRUCT__entry(
+ __field_struct(struct afs_fid, fid)
+ __field(unsigned int, op)
+ __field(unsigned int, index)
+ __field(s32, abort)
+ ),
+
+ TP_fast_assign(
+ __entry->op = op->debug_id;
+ __entry->fid = *fid;
+ __entry->index = index;
+ __entry->abort = abort;
+ ),
+
+ TP_printk("OP=%08x[%02x] %llx:%llx:%x a=%d",
+ __entry->op, __entry->index,
+ __entry->fid.vid, __entry->fid.vnode, __entry->fid.unique,
+ __entry->abort)
+ );
+
TRACE_EVENT(afs_cm_no_server,
- TP_PROTO(struct afs_call *call, struct sockaddr_rxrpc *srx),
+ TP_PROTO(struct afs_call *call, const struct sockaddr_rxrpc *srx),
TP_ARGS(call, srx),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(unsigned int, op_id )
- __field_struct(struct sockaddr_rxrpc, srx )
+ __field(unsigned int, call)
+ __field(unsigned int, op_id)
+ __field_struct(struct sockaddr_rxrpc, srx)
),
TP_fast_assign(
@@ -1243,9 +1318,9 @@ TRACE_EVENT(afs_cm_no_server_u,
TP_ARGS(call, uuid),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(unsigned int, op_id )
- __field_struct(uuid_t, uuid )
+ __field(unsigned int, call)
+ __field(unsigned int, op_id)
+ __field_struct(uuid_t, uuid)
),
TP_fast_assign(
@@ -1265,11 +1340,11 @@ TRACE_EVENT(afs_flock_ev,
TP_ARGS(vnode, fl, event, error),
TP_STRUCT__entry(
- __field_struct(struct afs_fid, fid )
- __field(enum afs_flock_event, event )
- __field(enum afs_lock_state, state )
- __field(int, error )
- __field(unsigned int, debug_id )
+ __field_struct(struct afs_fid, fid)
+ __field(enum afs_flock_event, event)
+ __field(enum afs_lock_state, state)
+ __field(int, error)
+ __field(unsigned int, debug_id)
),
TP_fast_assign(
@@ -1295,13 +1370,13 @@ TRACE_EVENT(afs_flock_op,
TP_ARGS(vnode, fl, op),
TP_STRUCT__entry(
- __field_struct(struct afs_fid, fid )
- __field(loff_t, from )
- __field(loff_t, len )
- __field(enum afs_flock_operation, op )
- __field(unsigned char, type )
- __field(unsigned int, flags )
- __field(unsigned int, debug_id )
+ __field_struct(struct afs_fid, fid)
+ __field(loff_t, from)
+ __field(loff_t, len)
+ __field(enum afs_flock_operation, op)
+ __field(unsigned char, type)
+ __field(unsigned int, flags)
+ __field(unsigned int, debug_id)
),
TP_fast_assign(
@@ -1309,8 +1384,8 @@ TRACE_EVENT(afs_flock_op,
__entry->from = fl->fl_start;
__entry->len = fl->fl_end - fl->fl_start + 1;
__entry->op = op;
- __entry->type = fl->fl_type;
- __entry->flags = fl->fl_flags;
+ __entry->type = fl->c.flc_type;
+ __entry->flags = fl->c.flc_flags;
__entry->debug_id = fl->fl_u.afs.debug_id;
),
@@ -1328,7 +1403,7 @@ TRACE_EVENT(afs_reload_dir,
TP_ARGS(vnode),
TP_STRUCT__entry(
- __field_struct(struct afs_fid, fid )
+ __field_struct(struct afs_fid, fid)
),
TP_fast_assign(
@@ -1345,8 +1420,8 @@ TRACE_EVENT(afs_silly_rename,
TP_ARGS(vnode, done),
TP_STRUCT__entry(
- __field_struct(struct afs_fid, fid )
- __field(bool, done )
+ __field_struct(struct afs_fid, fid)
+ __field(bool, done)
),
TP_fast_assign(
@@ -1365,9 +1440,9 @@ TRACE_EVENT(afs_get_tree,
TP_ARGS(cell, volume),
TP_STRUCT__entry(
- __field(u64, vid )
- __array(char, cell, 24 )
- __array(char, volume, 24 )
+ __field(u64, vid)
+ __array(char, cell, 24)
+ __array(char, volume, 24)
),
TP_fast_assign(
@@ -1385,6 +1460,30 @@ TRACE_EVENT(afs_get_tree,
__entry->cell, __entry->volume, __entry->vid)
);
+TRACE_EVENT(afs_cb_v_break,
+ TP_PROTO(afs_volid_t vid, unsigned int cb_v_break,
+ enum afs_cb_break_reason reason),
+
+ TP_ARGS(vid, cb_v_break, reason),
+
+ TP_STRUCT__entry(
+ __field(afs_volid_t, vid)
+ __field(unsigned int, cb_v_break)
+ __field(enum afs_cb_break_reason, reason)
+ ),
+
+ TP_fast_assign(
+ __entry->vid = vid;
+ __entry->cb_v_break = cb_v_break;
+ __entry->reason = reason;
+ ),
+
+ TP_printk("%llx vb=%x %s",
+ __entry->vid,
+ __entry->cb_v_break,
+ __print_symbolic(__entry->reason, afs_cb_break_reasons))
+ );
+
TRACE_EVENT(afs_cb_break,
TP_PROTO(struct afs_fid *fid, unsigned int cb_break,
enum afs_cb_break_reason reason, bool skipped),
@@ -1392,10 +1491,10 @@ TRACE_EVENT(afs_cb_break,
TP_ARGS(fid, cb_break, reason, skipped),
TP_STRUCT__entry(
- __field_struct(struct afs_fid, fid )
- __field(unsigned int, cb_break )
- __field(enum afs_cb_break_reason, reason )
- __field(bool, skipped )
+ __field_struct(struct afs_fid, fid)
+ __field(unsigned int, cb_break)
+ __field(enum afs_cb_break_reason, reason)
+ __field(bool, skipped)
),
TP_fast_assign(
@@ -1418,8 +1517,8 @@ TRACE_EVENT(afs_cb_miss,
TP_ARGS(fid, reason),
TP_STRUCT__entry(
- __field_struct(struct afs_fid, fid )
- __field(enum afs_cb_break_reason, reason )
+ __field_struct(struct afs_fid, fid)
+ __field(enum afs_cb_break_reason, reason)
),
TP_fast_assign(
@@ -1439,10 +1538,10 @@ TRACE_EVENT(afs_server,
TP_ARGS(server_debug_id, ref, active, reason),
TP_STRUCT__entry(
- __field(unsigned int, server )
- __field(int, ref )
- __field(int, active )
- __field(int, reason )
+ __field(unsigned int, server)
+ __field(int, ref)
+ __field(int, active)
+ __field(int, reason)
),
TP_fast_assign(
@@ -1452,7 +1551,7 @@ TRACE_EVENT(afs_server,
__entry->reason = reason;
),
- TP_printk("s=%08x %s u=%d a=%d",
+ TP_printk("s=%08x %s r=%d a=%d",
__entry->server,
__print_symbolic(__entry->reason, afs_server_traces),
__entry->ref,
@@ -1460,25 +1559,29 @@ TRACE_EVENT(afs_server,
);
TRACE_EVENT(afs_volume,
- TP_PROTO(afs_volid_t vid, int ref, enum afs_volume_trace reason),
+ TP_PROTO(unsigned int debug_id, afs_volid_t vid, int ref,
+ enum afs_volume_trace reason),
- TP_ARGS(vid, ref, reason),
+ TP_ARGS(debug_id, vid, ref, reason),
TP_STRUCT__entry(
- __field(afs_volid_t, vid )
- __field(int, ref )
- __field(enum afs_volume_trace, reason )
+ __field(unsigned int, debug_id)
+ __field(afs_volid_t, vid)
+ __field(int, ref)
+ __field(enum afs_volume_trace, reason)
),
TP_fast_assign(
- __entry->vid = vid;
- __entry->ref = ref;
- __entry->reason = reason;
+ __entry->debug_id = debug_id;
+ __entry->vid = vid;
+ __entry->ref = ref;
+ __entry->reason = reason;
),
- TP_printk("V=%llx %s ur=%d",
- __entry->vid,
+ TP_printk("V=%08x %s vid=%llx r=%d",
+ __entry->debug_id,
__print_symbolic(__entry->reason, afs_volume_traces),
+ __entry->vid,
__entry->ref)
);
@@ -1489,10 +1592,10 @@ TRACE_EVENT(afs_cell,
TP_ARGS(cell_debug_id, ref, active, reason),
TP_STRUCT__entry(
- __field(unsigned int, cell )
- __field(int, ref )
- __field(int, active )
- __field(int, reason )
+ __field(unsigned int, cell)
+ __field(int, ref)
+ __field(int, active)
+ __field(int, reason)
),
TP_fast_assign(
@@ -1509,6 +1612,229 @@ TRACE_EVENT(afs_cell,
__entry->active)
);
+TRACE_EVENT(afs_alist,
+ TP_PROTO(unsigned int alist_debug_id, int ref, enum afs_alist_trace reason),
+
+ TP_ARGS(alist_debug_id, ref, reason),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, alist)
+ __field(int, ref)
+ __field(int, active)
+ __field(int, reason)
+ ),
+
+ TP_fast_assign(
+ __entry->alist = alist_debug_id;
+ __entry->ref = ref;
+ __entry->reason = reason;
+ ),
+
+ TP_printk("AL=%08x %s r=%d",
+ __entry->alist,
+ __print_symbolic(__entry->reason, afs_alist_traces),
+ __entry->ref)
+ );
+
+TRACE_EVENT(afs_estate,
+ TP_PROTO(unsigned int server_debug_id, unsigned int estate_debug_id,
+ int ref, enum afs_estate_trace reason),
+
+ TP_ARGS(server_debug_id, estate_debug_id, ref, reason),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, server)
+ __field(unsigned int, estate)
+ __field(int, ref)
+ __field(int, active)
+ __field(int, reason)
+ ),
+
+ TP_fast_assign(
+ __entry->server = server_debug_id;
+ __entry->estate = estate_debug_id;
+ __entry->ref = ref;
+ __entry->reason = reason;
+ ),
+
+ TP_printk("ES=%08x[%x] %s r=%d",
+ __entry->server,
+ __entry->estate,
+ __print_symbolic(__entry->reason, afs_estate_traces),
+ __entry->ref)
+ );
+
+TRACE_EVENT(afs_fs_probe,
+ TP_PROTO(struct afs_server *server, bool tx, struct afs_endpoint_state *estate,
+ unsigned int addr_index, int error, s32 abort_code, unsigned int rtt_us),
+
+ TP_ARGS(server, tx, estate, addr_index, error, abort_code, rtt_us),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, server)
+ __field(unsigned int, estate)
+ __field(bool, tx)
+ __field(u16, addr_index)
+ __field(short, error)
+ __field(s32, abort_code)
+ __field(unsigned int, rtt_us)
+ __field_struct(struct sockaddr_rxrpc, srx)
+ ),
+
+ TP_fast_assign(
+ struct afs_addr_list *alist = estate->addresses;
+ __entry->server = server->debug_id;
+ __entry->estate = estate->probe_seq;
+ __entry->tx = tx;
+ __entry->addr_index = addr_index;
+ __entry->error = error;
+ __entry->abort_code = abort_code;
+ __entry->rtt_us = rtt_us;
+ memcpy(&__entry->srx, rxrpc_kernel_remote_srx(alist->addrs[addr_index].peer),
+ sizeof(__entry->srx));
+ ),
+
+ TP_printk("s=%08x %s pq=%x ax=%u e=%d ac=%d rtt=%d %pISpc",
+ __entry->server, __entry->tx ? "tx" : "rx", __entry->estate,
+ __entry->addr_index, __entry->error, __entry->abort_code, __entry->rtt_us,
+ &__entry->srx.transport)
+ );
+
+TRACE_EVENT(afs_vl_probe,
+ TP_PROTO(struct afs_vlserver *server, bool tx, struct afs_addr_list *alist,
+ unsigned int addr_index, int error, s32 abort_code, unsigned int rtt_us),
+
+ TP_ARGS(server, tx, alist, addr_index, error, abort_code, rtt_us),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, server)
+ __field(bool, tx)
+ __field(unsigned short, flags)
+ __field(u16, addr_index)
+ __field(short, error)
+ __field(s32, abort_code)
+ __field(unsigned int, rtt_us)
+ __field_struct(struct sockaddr_rxrpc, srx)
+ ),
+
+ TP_fast_assign(
+ __entry->server = server->debug_id;
+ __entry->tx = tx;
+ __entry->addr_index = addr_index;
+ __entry->error = error;
+ __entry->abort_code = abort_code;
+ __entry->rtt_us = rtt_us;
+ memcpy(&__entry->srx, rxrpc_kernel_remote_srx(alist->addrs[addr_index].peer),
+ sizeof(__entry->srx));
+ ),
+
+ TP_printk("vl=%08x %s ax=%u e=%d ac=%d rtt=%d %pISpc",
+ __entry->server, __entry->tx ? "tx" : "rx", __entry->addr_index,
+ __entry->error, __entry->abort_code, __entry->rtt_us,
+ &__entry->srx.transport)
+ );
+
+TRACE_EVENT(afs_rotate,
+ TP_PROTO(struct afs_operation *op, enum afs_rotate_trace reason, unsigned int extra),
+
+ TP_ARGS(op, reason, extra),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, op)
+ __field(unsigned int, flags)
+ __field(unsigned int, extra)
+ __field(unsigned short, iteration)
+ __field(short, server_index)
+ __field(short, addr_index)
+ __field(enum afs_rotate_trace, reason)
+ ),
+
+ TP_fast_assign(
+ __entry->op = op->debug_id;
+ __entry->flags = op->flags;
+ __entry->iteration = op->nr_iterations;
+ __entry->server_index = op->server_index;
+ __entry->addr_index = op->addr_index;
+ __entry->reason = reason;
+ __entry->extra = extra;
+ ),
+
+ TP_printk("OP=%08x it=%02x %s fl=%x sx=%d ax=%d ext=%d",
+ __entry->op,
+ __entry->iteration,
+ __print_symbolic(__entry->reason, afs_rotate_traces),
+ __entry->flags,
+ __entry->server_index,
+ __entry->addr_index,
+ __entry->extra)
+ );
+
+TRACE_EVENT(afs_make_call,
+ TP_PROTO(struct afs_call *call),
+
+ TP_ARGS(call),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call)
+ __field(bool, is_vl)
+ __field(enum afs_fs_operation, op)
+ __field_struct(struct afs_fid, fid)
+ __field_struct(struct sockaddr_rxrpc, srx)
+ ),
+
+ TP_fast_assign(
+ __entry->call = call->debug_id;
+ __entry->op = call->operation_ID;
+ __entry->fid = call->fid;
+ memcpy(&__entry->srx, rxrpc_kernel_remote_srx(call->peer),
+ sizeof(__entry->srx));
+ __entry->srx.srx_service = call->service_id;
+ __entry->is_vl = (__entry->srx.srx_service == VL_SERVICE ||
+ __entry->srx.srx_service == YFS_VL_SERVICE);
+ ),
+
+ TP_printk("c=%08x %pISpc+%u %s %llx:%llx:%x",
+ __entry->call,
+ &__entry->srx.transport,
+ __entry->srx.srx_service,
+ __entry->is_vl ?
+ __print_symbolic(__entry->op, afs_vl_operations) :
+ __print_symbolic(__entry->op, afs_fs_operations),
+ __entry->fid.vid,
+ __entry->fid.vnode,
+ __entry->fid.unique)
+ );
+
+TRACE_EVENT(afs_read_recv,
+ TP_PROTO(const struct afs_operation *op, const struct afs_call *call),
+
+ TP_ARGS(op, call),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, rreq)
+ __field(unsigned int, sreq)
+ __field(unsigned int, op)
+ __field(unsigned int, op_flags)
+ __field(unsigned int, call)
+ __field(enum afs_call_state, call_state)
+ ),
+
+ TP_fast_assign(
+ __entry->op = op->debug_id;
+ __entry->sreq = op->fetch.subreq->debug_index;
+ __entry->rreq = op->fetch.subreq->rreq->debug_id;
+ __entry->op_flags = op->flags;
+ __entry->call = call->debug_id;
+ __entry->call_state = call->state;
+ ),
+
+ TP_printk("R=%08x[%x] OP=%08x c=%08x cs=%x of=%x",
+ __entry->rreq, __entry->sreq,
+ __entry->op,
+ __entry->call, __entry->call_state,
+ __entry->op_flags)
+ );
+
#endif /* _TRACE_AFS_H */
/* This part must be outside protection */
diff --git a/include/trace/events/alarmtimer.h b/include/trace/events/alarmtimer.h
index 13483c7ca70b..8e9c76a7f21b 100644
--- a/include/trace/events/alarmtimer.h
+++ b/include/trace/events/alarmtimer.h
@@ -20,6 +20,7 @@ TRACE_DEFINE_ENUM(ALARM_BOOTTIME_FREEZER);
{ 1 << ALARM_REALTIME_FREEZER, "REALTIME Freezer" }, \
{ 1 << ALARM_BOOTTIME_FREEZER, "BOOTTIME Freezer" })
+#ifdef CONFIG_RTC_CLASS
TRACE_EVENT(alarmtimer_suspend,
TP_PROTO(ktime_t expires, int flag),
@@ -41,6 +42,7 @@ TRACE_EVENT(alarmtimer_suspend,
__entry->expires
)
);
+#endif /* CONFIG_RTC_CLASS */
DECLARE_EVENT_CLASS(alarm_class,
diff --git a/include/trace/events/amdxdna.h b/include/trace/events/amdxdna.h
new file mode 100644
index 000000000000..c6cb2da7b706
--- /dev/null
+++ b/include/trace/events/amdxdna.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM amdxdna
+
+#if !defined(_TRACE_AMDXDNA_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_AMDXDNA_H
+
+#include <drm/gpu_scheduler.h>
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(amdxdna_debug_point,
+ TP_PROTO(const char *name, u64 number, const char *str),
+
+ TP_ARGS(name, number, str),
+
+ TP_STRUCT__entry(__string(name, name)
+ __field(u64, number)
+ __string(str, str)),
+
+ TP_fast_assign(__assign_str(name);
+ __entry->number = number;
+ __assign_str(str);),
+
+ TP_printk("%s:%llu %s", __get_str(name), __entry->number,
+ __get_str(str))
+);
+
+TRACE_EVENT(xdna_job,
+ TP_PROTO(struct drm_sched_job *sched_job, const char *name, const char *str, u64 seq),
+
+ TP_ARGS(sched_job, name, str, seq),
+
+ TP_STRUCT__entry(__string(name, name)
+ __string(str, str)
+ __field(u64, fence_context)
+ __field(u64, fence_seqno)
+ __field(u64, seq)),
+
+ TP_fast_assign(__assign_str(name);
+ __assign_str(str);
+ __entry->fence_context = sched_job->s_fence->finished.context;
+ __entry->fence_seqno = sched_job->s_fence->finished.seqno;
+ __entry->seq = seq;),
+
+ TP_printk("fence=(context:%llu, seqno:%lld), %s seq#:%lld %s",
+ __entry->fence_context, __entry->fence_seqno,
+ __get_str(name), __entry->seq,
+ __get_str(str))
+);
+
+DECLARE_EVENT_CLASS(xdna_mbox_msg,
+ TP_PROTO(char *name, u8 chann_id, u32 opcode, u32 msg_id),
+
+ TP_ARGS(name, chann_id, opcode, msg_id),
+
+ TP_STRUCT__entry(__string(name, name)
+ __field(u32, chann_id)
+ __field(u32, opcode)
+ __field(u32, msg_id)),
+
+ TP_fast_assign(__assign_str(name);
+ __entry->chann_id = chann_id;
+ __entry->opcode = opcode;
+ __entry->msg_id = msg_id;),
+
+ TP_printk("%s.%d id 0x%x opcode 0x%x", __get_str(name),
+ __entry->chann_id, __entry->msg_id, __entry->opcode)
+);
+
+DEFINE_EVENT(xdna_mbox_msg, mbox_set_tail,
+ TP_PROTO(char *name, u8 chann_id, u32 opcode, u32 id),
+ TP_ARGS(name, chann_id, opcode, id)
+);
+
+DEFINE_EVENT(xdna_mbox_msg, mbox_set_head,
+ TP_PROTO(char *name, u8 chann_id, u32 opcode, u32 id),
+ TP_ARGS(name, chann_id, opcode, id)
+);
+
+TRACE_EVENT(mbox_irq_handle,
+ TP_PROTO(char *name, int irq),
+
+ TP_ARGS(name, irq),
+
+ TP_STRUCT__entry(__string(name, name)
+ __field(int, irq)),
+
+ TP_fast_assign(__assign_str(name);
+ __entry->irq = irq;),
+
+ TP_printk("%s.%d", __get_str(name), __entry->irq)
+);
+
+#endif /* !defined(_TRACE_AMDXDNA_H) || defined(TRACE_HEADER_MULTI_READ) */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/asoc.h b/include/trace/events/asoc.h
index 4d8ef71090af..4a645549164e 100644
--- a/include/trace/events/asoc.h
+++ b/include/trace/events/asoc.h
@@ -8,80 +8,88 @@
#include <linux/ktime.h>
#include <linux/tracepoint.h>
#include <sound/jack.h>
+#include <sound/pcm.h>
#define DAPM_DIRECT "(direct)"
#define DAPM_ARROW(dir) (((dir) == SND_SOC_DAPM_DIR_OUT) ? "->" : "<-")
+TRACE_DEFINE_ENUM(SND_SOC_DAPM_DIR_OUT);
+
struct snd_soc_jack;
struct snd_soc_card;
struct snd_soc_dapm_widget;
struct snd_soc_dapm_path;
-DECLARE_EVENT_CLASS(snd_soc_card,
+DECLARE_EVENT_CLASS(snd_soc_dapm,
- TP_PROTO(struct snd_soc_card *card, int val),
+ TP_PROTO(struct snd_soc_dapm_context *dapm, int val),
- TP_ARGS(card, val),
+ TP_ARGS(dapm, val),
TP_STRUCT__entry(
- __string( name, card->name )
- __field( int, val )
+ __string( card_name, snd_soc_dapm_to_card(dapm)->name)
+ __string( comp_name, snd_soc_dapm_to_component(dapm) ? snd_soc_dapm_to_component(dapm)->name : "(none)")
+ __field( int, val)
),
TP_fast_assign(
- __assign_str(name, card->name);
+ __assign_str(card_name);
+ __assign_str(comp_name);
__entry->val = val;
),
- TP_printk("card=%s val=%d", __get_str(name), (int)__entry->val)
+ TP_printk("card=%s component=%s val=%d",
+ __get_str(card_name), __get_str(comp_name), (int)__entry->val)
);
-DEFINE_EVENT(snd_soc_card, snd_soc_bias_level_start,
+DEFINE_EVENT(snd_soc_dapm, snd_soc_bias_level_start,
- TP_PROTO(struct snd_soc_card *card, int val),
+ TP_PROTO(struct snd_soc_dapm_context *dapm, int val),
- TP_ARGS(card, val)
+ TP_ARGS(dapm, val)
);
-DEFINE_EVENT(snd_soc_card, snd_soc_bias_level_done,
+DEFINE_EVENT(snd_soc_dapm, snd_soc_bias_level_done,
- TP_PROTO(struct snd_soc_card *card, int val),
+ TP_PROTO(struct snd_soc_dapm_context *dapm, int val),
- TP_ARGS(card, val)
+ TP_ARGS(dapm, val)
);
DECLARE_EVENT_CLASS(snd_soc_dapm_basic,
- TP_PROTO(struct snd_soc_card *card),
+ TP_PROTO(struct snd_soc_card *card, int event),
- TP_ARGS(card),
+ TP_ARGS(card, event),
TP_STRUCT__entry(
__string( name, card->name )
+ __field( int, event )
),
TP_fast_assign(
- __assign_str(name, card->name);
+ __assign_str(name);
+ __entry->event = event;
),
- TP_printk("card=%s", __get_str(name))
+ TP_printk("card=%s event=%d", __get_str(name), (int)__entry->event)
);
DEFINE_EVENT(snd_soc_dapm_basic, snd_soc_dapm_start,
- TP_PROTO(struct snd_soc_card *card),
+ TP_PROTO(struct snd_soc_card *card, int event),
- TP_ARGS(card)
+ TP_ARGS(card, event)
);
DEFINE_EVENT(snd_soc_dapm_basic, snd_soc_dapm_done,
- TP_PROTO(struct snd_soc_card *card),
+ TP_PROTO(struct snd_soc_card *card, int event),
- TP_ARGS(card)
+ TP_ARGS(card, event)
);
@@ -97,7 +105,7 @@ DECLARE_EVENT_CLASS(snd_soc_dapm_widget,
),
TP_fast_assign(
- __assign_str(name, w->name);
+ __assign_str(name);
__entry->val = val;
),
@@ -143,7 +151,7 @@ TRACE_EVENT(snd_soc_dapm_walk_done,
),
TP_fast_assign(
- __assign_str(name, card->name);
+ __assign_str(name);
__entry->power_checks = card->dapm_stats.power_checks;
__entry->path_checks = card->dapm_stats.path_checks;
__entry->neighbour_checks = card->dapm_stats.neighbour_checks;
@@ -172,9 +180,9 @@ TRACE_EVENT(snd_soc_dapm_path,
),
TP_fast_assign(
- __assign_str(wname, widget->name);
- __assign_str(pname, path->name ? path->name : DAPM_DIRECT);
- __assign_str(pnname, path->node[dir]->name);
+ __assign_str(wname);
+ __assign_str(pname);
+ __assign_str(pnname);
__entry->path_connect = path->connect;
__entry->path_node = (long)path->node[dir];
__entry->path_dir = dir;
@@ -205,7 +213,7 @@ TRACE_EVENT(snd_soc_dapm_connected,
),
TP_printk("%s: found %d paths",
- __entry->stream ? "capture" : "playback", __entry->paths)
+ snd_pcm_direction_name(__entry->stream), __entry->paths)
);
TRACE_EVENT(snd_soc_jack_irq,
@@ -219,7 +227,7 @@ TRACE_EVENT(snd_soc_jack_irq,
),
TP_fast_assign(
- __assign_str(name, name);
+ __assign_str(name);
),
TP_printk("%s", __get_str(name))
@@ -238,7 +246,7 @@ TRACE_EVENT(snd_soc_jack_report,
),
TP_fast_assign(
- __assign_str(name, jack->jack->id);
+ __assign_str(name);
__entry->mask = mask;
__entry->val = val;
),
@@ -259,7 +267,7 @@ TRACE_EVENT(snd_soc_jack_notify,
),
TP_fast_assign(
- __assign_str(name, jack->jack->id);
+ __assign_str(name);
__entry->val = val;
),
diff --git a/include/trace/events/avc.h b/include/trace/events/avc.h
index b55fda2e0773..fed0f141d5f6 100644
--- a/include/trace/events/avc.h
+++ b/include/trace/events/avc.h
@@ -36,9 +36,9 @@ TRACE_EVENT(selinux_audited,
__entry->denied = sad->denied;
__entry->audited = sad->audited;
__entry->result = sad->result;
- __assign_str(tcontext, tcontext);
- __assign_str(scontext, scontext);
- __assign_str(tclass, tclass);
+ __assign_str(tcontext);
+ __assign_str(scontext);
+ __assign_str(tclass);
),
TP_printk("requested=0x%x denied=0x%x audited=0x%x result=%d scontext=%s tcontext=%s tclass=%s",
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index 7f4dfbdf12a6..6aa79e2d799c 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -9,9 +9,18 @@
#include <linux/blkdev.h>
#include <linux/buffer_head.h>
#include <linux/tracepoint.h>
+#include <uapi/linux/ioprio.h>
-#define RWBS_LEN 8
+#define RWBS_LEN 10
+#define IOPRIO_CLASS_STRINGS \
+ { IOPRIO_CLASS_NONE, "none" }, \
+ { IOPRIO_CLASS_RT, "rt" }, \
+ { IOPRIO_CLASS_BE, "be" }, \
+ { IOPRIO_CLASS_IDLE, "idle" }, \
+ { IOPRIO_CLASS_INVALID, "invalid"}
+
+#ifdef CONFIG_BUFFER_HEAD
DECLARE_EVENT_CLASS(block_buffer,
TP_PROTO(struct buffer_head *bh),
@@ -61,6 +70,7 @@ DEFINE_EVENT(block_buffer, block_dirty_buffer,
TP_ARGS(bh)
);
+#endif /* CONFIG_BUFFER_HEAD */
/**
* block_rq_requeue - place block IO request back on a queue
@@ -80,6 +90,7 @@ TRACE_EVENT(block_rq_requeue,
__field( dev_t, dev )
__field( sector_t, sector )
__field( unsigned int, nr_sector )
+ __field( unsigned short, ioprio )
__array( char, rwbs, RWBS_LEN )
__dynamic_array( char, cmd, 1 )
),
@@ -88,16 +99,20 @@ TRACE_EVENT(block_rq_requeue,
__entry->dev = rq->q->disk ? disk_devt(rq->q->disk) : 0;
__entry->sector = blk_rq_trace_sector(rq);
__entry->nr_sector = blk_rq_trace_nr_sectors(rq);
+ __entry->ioprio = req_get_ioprio(rq);
blk_fill_rwbs(__entry->rwbs, rq->cmd_flags);
__get_str(cmd)[0] = '\0';
),
- TP_printk("%d,%d %s (%s) %llu + %u [%d]",
+ TP_printk("%d,%d %s (%s) %llu + %u %s,%u,%u [%d]",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->rwbs, __get_str(cmd),
- (unsigned long long)__entry->sector,
- __entry->nr_sector, 0)
+ (unsigned long long)__entry->sector, __entry->nr_sector,
+ __print_symbolic(IOPRIO_PRIO_CLASS(__entry->ioprio),
+ IOPRIO_CLASS_STRINGS),
+ IOPRIO_PRIO_HINT(__entry->ioprio),
+ IOPRIO_PRIO_LEVEL(__entry->ioprio), 0)
);
DECLARE_EVENT_CLASS(block_rq_completion,
@@ -111,6 +126,7 @@ DECLARE_EVENT_CLASS(block_rq_completion,
__field( sector_t, sector )
__field( unsigned int, nr_sector )
__field( int , error )
+ __field( unsigned short, ioprio )
__array( char, rwbs, RWBS_LEN )
__dynamic_array( char, cmd, 1 )
),
@@ -120,16 +136,20 @@ DECLARE_EVENT_CLASS(block_rq_completion,
__entry->sector = blk_rq_pos(rq);
__entry->nr_sector = nr_bytes >> 9;
__entry->error = blk_status_to_errno(error);
+ __entry->ioprio = req_get_ioprio(rq);
blk_fill_rwbs(__entry->rwbs, rq->cmd_flags);
__get_str(cmd)[0] = '\0';
),
- TP_printk("%d,%d %s (%s) %llu + %u [%d]",
+ TP_printk("%d,%d %s (%s) %llu + %u %s,%u,%u [%d]",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->rwbs, __get_str(cmd),
- (unsigned long long)__entry->sector,
- __entry->nr_sector, __entry->error)
+ (unsigned long long)__entry->sector, __entry->nr_sector,
+ __print_symbolic(IOPRIO_PRIO_CLASS(__entry->ioprio),
+ IOPRIO_CLASS_STRINGS),
+ IOPRIO_PRIO_HINT(__entry->ioprio),
+ IOPRIO_PRIO_LEVEL(__entry->ioprio), __entry->error)
);
/**
@@ -178,6 +198,7 @@ DECLARE_EVENT_CLASS(block_rq,
__field( sector_t, sector )
__field( unsigned int, nr_sector )
__field( unsigned int, bytes )
+ __field( unsigned short, ioprio )
__array( char, rwbs, RWBS_LEN )
__array( char, comm, TASK_COMM_LEN )
__dynamic_array( char, cmd, 1 )
@@ -188,17 +209,21 @@ DECLARE_EVENT_CLASS(block_rq,
__entry->sector = blk_rq_trace_sector(rq);
__entry->nr_sector = blk_rq_trace_nr_sectors(rq);
__entry->bytes = blk_rq_bytes(rq);
+ __entry->ioprio = req_get_ioprio(rq);
blk_fill_rwbs(__entry->rwbs, rq->cmd_flags);
__get_str(cmd)[0] = '\0';
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
),
- TP_printk("%d,%d %s %u (%s) %llu + %u [%s]",
+ TP_printk("%d,%d %s %u (%s) %llu + %u %s,%u,%u [%s]",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->rwbs, __entry->bytes, __get_str(cmd),
- (unsigned long long)__entry->sector,
- __entry->nr_sector, __entry->comm)
+ (unsigned long long)__entry->sector, __entry->nr_sector,
+ __print_symbolic(IOPRIO_PRIO_CLASS(__entry->ioprio),
+ IOPRIO_CLASS_STRINGS),
+ IOPRIO_PRIO_HINT(__entry->ioprio),
+ IOPRIO_PRIO_LEVEL(__entry->ioprio), __entry->comm)
);
/**
@@ -246,6 +271,32 @@ DEFINE_EVENT(block_rq, block_rq_merge,
);
/**
+ * block_io_start - insert a request for execution
+ * @rq: block IO operation request
+ *
+ * Called when block operation request @rq is queued for execution
+ */
+DEFINE_EVENT(block_rq, block_io_start,
+
+ TP_PROTO(struct request *rq),
+
+ TP_ARGS(rq)
+);
+
+/**
+ * block_io_done - block IO operation request completed
+ * @rq: block IO operation request
+ *
+ * Called when block operation request @rq is completed
+ */
+DEFINE_EVENT(block_rq, block_io_done,
+
+ TP_PROTO(struct request *rq),
+
+ TP_ARGS(rq)
+);
+
+/**
* block_bio_complete - completed all work on the block operation
* @q: queue holding the block operation
* @bio: block operation completed
@@ -310,21 +361,6 @@ DECLARE_EVENT_CLASS(block_bio,
);
/**
- * block_bio_bounce - used bounce buffer when processing block operation
- * @bio: block operation
- *
- * A bounce buffer was used to handle the block operation @bio in @q.
- * This occurs when hardware limitations prevent a direct transfer of
- * data between the @bio data memory area and the IO device. Use of a
- * bounce buffer requires extra copying of data and decreases
- * performance.
- */
-DEFINE_EVENT(block_bio, block_bio_bounce,
- TP_PROTO(struct bio *bio),
- TP_ARGS(bio)
-);
-
-/**
* block_bio_backmerge - merging block operation to the end of an existing operation
* @bio: new block operation to merge
*
@@ -369,6 +405,17 @@ DEFINE_EVENT(block_bio, block_getrq,
);
/**
+ * blk_zone_append_update_request_bio - update bio sector after zone append
+ * @rq: the completed request that sets the bio sector
+ *
+ * Update the bio's bi_sector after a zone append command has been completed.
+ */
+DEFINE_EVENT(block_rq, blk_zone_append_update_request_bio,
+ TP_PROTO(struct request *rq),
+ TP_ARGS(rq)
+);
+
+/**
* block_plug - keep operations requests in request queue
* @q: request queue to plug
*
@@ -552,6 +599,84 @@ TRACE_EVENT(block_rq_remap,
(unsigned long long)__entry->old_sector, __entry->nr_bios)
);
+/**
+ * blkdev_zone_mgmt - Execute a zone management operation on a range of zones
+ * @bio: The block IO operation sent down to the device
+ * @nr_sectors: The number of sectors affected by this operation
+ *
+ * Execute a zone management operation on a specified range of zones. This
+ * range is encoded in %nr_sectors, which has to be a multiple of the zone
+ * size.
+ */
+TRACE_EVENT(blkdev_zone_mgmt,
+
+ TP_PROTO(struct bio *bio, sector_t nr_sectors),
+
+ TP_ARGS(bio, nr_sectors),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( sector_t, sector )
+ __field( sector_t, nr_sectors )
+ __array( char, rwbs, RWBS_LEN)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = bio_dev(bio);
+ __entry->sector = bio->bi_iter.bi_sector;
+ __entry->nr_sectors = bio_sectors(bio);
+ blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
+ ),
+
+ TP_printk("%d,%d %s %llu + %llu",
+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
+ (unsigned long long)__entry->sector,
+ __entry->nr_sectors)
+);
+
+DECLARE_EVENT_CLASS(block_zwplug,
+
+ TP_PROTO(struct request_queue *q, unsigned int zno, sector_t sector,
+ unsigned int nr_sectors),
+
+ TP_ARGS(q, zno, sector, nr_sectors),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( unsigned int, zno )
+ __field( sector_t, sector )
+ __field( unsigned int, nr_sectors )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = disk_devt(q->disk);
+ __entry->zno = zno;
+ __entry->sector = sector;
+ __entry->nr_sectors = nr_sectors;
+ ),
+
+ TP_printk("%d,%d zone %u, BIO %llu + %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->zno,
+ (unsigned long long)__entry->sector,
+ __entry->nr_sectors)
+);
+
+DEFINE_EVENT(block_zwplug, disk_zone_wplug_add_bio,
+
+ TP_PROTO(struct request_queue *q, unsigned int zno, sector_t sector,
+ unsigned int nr_sectors),
+
+ TP_ARGS(q, zno, sector, nr_sectors)
+);
+
+DEFINE_EVENT(block_zwplug, blk_zone_wplug_bio,
+
+ TP_PROTO(struct request_queue *q, unsigned int zno, sector_t sector,
+ unsigned int nr_sectors),
+
+ TP_ARGS(q, zno, sector, nr_sectors)
+);
+
#endif /* _TRACE_BLOCK_H */
/* This part must be outside protection */
diff --git a/include/trace/events/bpf_test_run.h b/include/trace/events/bpf_test_run.h
index 265447e3f71a..0c924d39b7cb 100644
--- a/include/trace/events/bpf_test_run.h
+++ b/include/trace/events/bpf_test_run.h
@@ -7,6 +7,23 @@
#include <linux/tracepoint.h>
+TRACE_EVENT(bpf_trigger_tp,
+
+ TP_PROTO(int nonce),
+
+ TP_ARGS(nonce),
+
+ TP_STRUCT__entry(
+ __field(int, nonce)
+ ),
+
+ TP_fast_assign(
+ __entry->nonce = nonce;
+ ),
+
+ TP_printk("nonce %d", __entry->nonce)
+);
+
DECLARE_EVENT_CLASS(bpf_test_finish,
TP_PROTO(int *err),
diff --git a/include/trace/events/bridge.h b/include/trace/events/bridge.h
index 6b200059c2c5..3fe4725c83ff 100644
--- a/include/trace/events/bridge.h
+++ b/include/trace/events/bridge.h
@@ -25,7 +25,7 @@ TRACE_EVENT(br_fdb_add,
),
TP_fast_assign(
- __assign_str(dev, dev->name);
+ __assign_str(dev);
memcpy(__entry->addr, addr, ETH_ALEN);
__entry->vid = vid;
__entry->nlh_flags = nlh_flags;
@@ -54,8 +54,8 @@ TRACE_EVENT(br_fdb_external_learn_add,
),
TP_fast_assign(
- __assign_str(br_dev, br->dev->name);
- __assign_str(dev, p ? p->dev->name : "null");
+ __assign_str(br_dev);
+ __assign_str(dev);
memcpy(__entry->addr, addr, ETH_ALEN);
__entry->vid = vid;
),
@@ -80,8 +80,8 @@ TRACE_EVENT(fdb_delete,
),
TP_fast_assign(
- __assign_str(br_dev, br->dev->name);
- __assign_str(dev, f->dst ? f->dst->dev->name : "null");
+ __assign_str(br_dev);
+ __assign_str(dev);
memcpy(__entry->addr, f->key.addr.addr, ETH_ALEN);
__entry->vid = f->key.vlan_id;
),
@@ -108,8 +108,8 @@ TRACE_EVENT(br_fdb_update,
),
TP_fast_assign(
- __assign_str(br_dev, br->dev->name);
- __assign_str(dev, source->dev->name);
+ __assign_str(br_dev);
+ __assign_str(dev);
memcpy(__entry->addr, addr, ETH_ALEN);
__entry->vid = vid;
__entry->flags = flags;
@@ -122,6 +122,64 @@ TRACE_EVENT(br_fdb_update,
__entry->flags)
);
+TRACE_EVENT(br_mdb_full,
+
+ TP_PROTO(const struct net_device *dev,
+ const struct br_ip *group),
+
+ TP_ARGS(dev, group),
+
+ TP_STRUCT__entry(
+ __string(dev, dev->name)
+ __field(int, af)
+ __field(u16, vid)
+ __array(__u8, src, 16)
+ __array(__u8, grp, 16)
+ __array(__u8, grpmac, ETH_ALEN) /* For af == 0. */
+ ),
+
+ TP_fast_assign(
+ struct in6_addr *in6;
+
+ __assign_str(dev);
+ __entry->vid = group->vid;
+
+ if (!group->proto) {
+ __entry->af = 0;
+
+ memset(__entry->src, 0, sizeof(__entry->src));
+ memset(__entry->grp, 0, sizeof(__entry->grp));
+ memcpy(__entry->grpmac, group->dst.mac_addr, ETH_ALEN);
+ } else if (group->proto == htons(ETH_P_IP)) {
+ __entry->af = AF_INET;
+
+ in6 = (struct in6_addr *)__entry->src;
+ ipv6_addr_set_v4mapped(group->src.ip4, in6);
+
+ in6 = (struct in6_addr *)__entry->grp;
+ ipv6_addr_set_v4mapped(group->dst.ip4, in6);
+
+ memset(__entry->grpmac, 0, ETH_ALEN);
+
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ __entry->af = AF_INET6;
+
+ in6 = (struct in6_addr *)__entry->src;
+ *in6 = group->src.ip6;
+
+ in6 = (struct in6_addr *)__entry->grp;
+ *in6 = group->dst.ip6;
+
+ memset(__entry->grpmac, 0, ETH_ALEN);
+#endif
+ }
+ ),
+
+ TP_printk("dev %s af %u src %pI6c grp %pI6c/%pM vid %u",
+ __get_str(dev), __entry->af, __entry->src, __entry->grp,
+ __entry->grpmac, __entry->vid)
+);
#endif /* _TRACE_BRIDGE_H */
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 6548b5b5aa60..7e418f065b94 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -16,12 +16,10 @@ struct extent_map;
struct btrfs_file_extent_item;
struct btrfs_ordered_extent;
struct btrfs_delayed_ref_node;
-struct btrfs_delayed_tree_ref;
-struct btrfs_delayed_data_ref;
struct btrfs_delayed_ref_head;
struct btrfs_block_group;
struct btrfs_free_cluster;
-struct map_lookup;
+struct btrfs_chunk_map;
struct extent_buffer;
struct btrfs_work;
struct btrfs_workqueue;
@@ -32,12 +30,12 @@ struct prelim_ref;
struct btrfs_space_info;
struct btrfs_raid_bio;
struct raid56_bio_trace_info;
+struct find_free_extent_ctl;
#define show_ref_type(type) \
__print_symbolic(type, \
{ BTRFS_TREE_BLOCK_REF_KEY, "TREE_BLOCK_REF" }, \
{ BTRFS_EXTENT_DATA_REF_KEY, "EXTENT_DATA_REF" }, \
- { BTRFS_EXTENT_REF_V0_KEY, "EXTENT_REF_V0" }, \
{ BTRFS_SHARED_BLOCK_REF_KEY, "SHARED_BLOCK_REF" }, \
{ BTRFS_SHARED_DATA_REF_KEY, "SHARED_DATA_REF" })
@@ -102,7 +100,8 @@ struct raid56_bio_trace_info;
EM( ALLOC_CHUNK, "ALLOC_CHUNK") \
EM( ALLOC_CHUNK_FORCE, "ALLOC_CHUNK_FORCE") \
EM( RUN_DELAYED_IPUTS, "RUN_DELAYED_IPUTS") \
- EMe(COMMIT_TRANS, "COMMIT_TRANS")
+ EM( COMMIT_TRANS, "COMMIT_TRANS") \
+ EMe(RESET_ZONES, "RESET_ZONES")
/*
* First define the enums in the above macros to be exported to userspace via
@@ -144,9 +143,9 @@ FLUSH_STATES
#define EXTENT_FLAGS \
{ EXTENT_DIRTY, "DIRTY"}, \
- { EXTENT_UPTODATE, "UPTODATE"}, \
{ EXTENT_LOCKED, "LOCKED"}, \
- { EXTENT_NEW, "NEW"}, \
+ { EXTENT_DIRTY_LOG1, "DIRTY_LOG1"}, \
+ { EXTENT_DIRTY_LOG2, "DIRTY_LOG2"}, \
{ EXTENT_DELALLOC, "DELALLOC"}, \
{ EXTENT_DEFRAG, "DEFRAG"}, \
{ EXTENT_BOUNDARY, "BOUNDARY"}, \
@@ -225,8 +224,7 @@ DECLARE_EVENT_CLASS(btrfs__inode,
__entry->generation = BTRFS_I(inode)->generation;
__entry->last_trans = BTRFS_I(inode)->last_trans;
__entry->logged_trans = BTRFS_I(inode)->logged_trans;
- __entry->root_objectid =
- BTRFS_I(inode)->root->root_key.objectid;
+ __entry->root_objectid = btrfs_root_id(BTRFS_I(inode)->root);
),
TP_printk_btrfs("root=%llu(%s) gen=%llu ino=%llu blocks=%llu "
@@ -265,20 +263,19 @@ DEFINE_EVENT(btrfs__inode, btrfs_inode_evict,
__print_symbolic_u64(type, \
{ EXTENT_MAP_LAST_BYTE, "LAST_BYTE" }, \
{ EXTENT_MAP_HOLE, "HOLE" }, \
- { EXTENT_MAP_INLINE, "INLINE" }, \
- { EXTENT_MAP_DELALLOC, "DELALLOC" })
+ { EXTENT_MAP_INLINE, "INLINE" })
#define show_map_type(type) \
type, (type >= EXTENT_MAP_LAST_BYTE) ? "-" : __show_map_type(type)
#define show_map_flags(flag) \
__print_flags(flag, "|", \
- { (1 << EXTENT_FLAG_PINNED), "PINNED" },\
- { (1 << EXTENT_FLAG_COMPRESSED), "COMPRESSED" },\
- { (1 << EXTENT_FLAG_PREALLOC), "PREALLOC" },\
- { (1 << EXTENT_FLAG_LOGGING), "LOGGING" },\
- { (1 << EXTENT_FLAG_FILLING), "FILLING" },\
- { (1 << EXTENT_FLAG_FS_MAPPING), "FS_MAPPING" })
+ { EXTENT_FLAG_PINNED, "PINNED" },\
+ { EXTENT_FLAG_COMPRESS_ZLIB, "COMPRESS_ZLIB" },\
+ { EXTENT_FLAG_COMPRESS_LZO, "COMPRESS_LZO" },\
+ { EXTENT_FLAG_COMPRESS_ZSTD, "COMPRESS_ZSTD" },\
+ { EXTENT_FLAG_PREALLOC, "PREALLOC" },\
+ { EXTENT_FLAG_LOGGING, "LOGGING" })
TRACE_EVENT_CONDITION(btrfs_get_extent,
@@ -294,40 +291,26 @@ TRACE_EVENT_CONDITION(btrfs_get_extent,
__field( u64, ino )
__field( u64, start )
__field( u64, len )
- __field( u64, orig_start )
- __field( u64, block_start )
- __field( u64, block_len )
- __field( unsigned long, flags )
+ __field( u32, flags )
__field( int, refs )
- __field( unsigned int, compress_type )
),
TP_fast_assign_btrfs(root->fs_info,
- __entry->root_objectid = root->root_key.objectid;
+ __entry->root_objectid = btrfs_root_id(root);
__entry->ino = btrfs_ino(inode);
__entry->start = map->start;
__entry->len = map->len;
- __entry->orig_start = map->orig_start;
- __entry->block_start = map->block_start;
- __entry->block_len = map->block_len;
__entry->flags = map->flags;
__entry->refs = refcount_read(&map->refs);
- __entry->compress_type = map->compress_type;
),
- TP_printk_btrfs("root=%llu(%s) ino=%llu start=%llu len=%llu "
- "orig_start=%llu block_start=%llu(%s) "
- "block_len=%llu flags=%s refs=%u "
- "compress_type=%u",
+ TP_printk_btrfs("root=%llu(%s) ino=%llu start=%llu len=%llu flags=%s refs=%u",
show_root_type(__entry->root_objectid),
__entry->ino,
__entry->start,
__entry->len,
- __entry->orig_start,
- show_map_type(__entry->block_start),
- __entry->block_len,
show_map_flags(__entry->flags),
- __entry->refs, __entry->compress_type)
+ __entry->refs)
);
TRACE_EVENT(btrfs_handle_em_exist,
@@ -392,7 +375,7 @@ DECLARE_EVENT_CLASS(btrfs__file_extent_item_regular,
),
TP_fast_assign_btrfs(bi->root->fs_info,
- __entry->root_obj = bi->root->root_key.objectid;
+ __entry->root_obj = btrfs_root_id(bi->root);
__entry->ino = btrfs_ino(bi);
__entry->isize = bi->vfs_inode.i_size;
__entry->disk_isize = bi->disk_i_size;
@@ -443,7 +426,7 @@ DECLARE_EVENT_CLASS(
TP_fast_assign_btrfs(
bi->root->fs_info,
- __entry->root_obj = bi->root->root_key.objectid;
+ __entry->root_obj = btrfs_root_id(bi->root);
__entry->ino = btrfs_ino(bi);
__entry->isize = bi->vfs_inode.i_size;
__entry->disk_isize = bi->disk_i_size;
@@ -543,7 +526,7 @@ DECLARE_EVENT_CLASS(btrfs__ordered_extent,
__entry->flags = ordered->flags;
__entry->compress_type = ordered->compress_type;
__entry->refs = refcount_read(&ordered->refs);
- __entry->root_objectid = inode->root->root_key.objectid;
+ __entry->root_objectid = btrfs_root_id(inode->root);
__entry->truncated_len = ordered->truncated_len;
),
@@ -660,12 +643,41 @@ DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_mark_finished,
TP_ARGS(inode, ordered)
);
+TRACE_EVENT(btrfs_finish_ordered_extent,
+
+ TP_PROTO(const struct btrfs_inode *inode, u64 start, u64 len,
+ bool uptodate),
+
+ TP_ARGS(inode, start, len, uptodate),
+
+ TP_STRUCT__entry_btrfs(
+ __field( u64, ino )
+ __field( u64, start )
+ __field( u64, len )
+ __field( bool, uptodate )
+ __field( u64, root_objectid )
+ ),
+
+ TP_fast_assign_btrfs(inode->root->fs_info,
+ __entry->ino = btrfs_ino(inode);
+ __entry->start = start;
+ __entry->len = len;
+ __entry->uptodate = uptodate;
+ __entry->root_objectid = btrfs_root_id(inode->root);
+ ),
+
+ TP_printk_btrfs("root=%llu(%s) ino=%llu start=%llu len=%llu uptodate=%d",
+ show_root_type(__entry->root_objectid),
+ __entry->ino, __entry->start,
+ __entry->len, !!__entry->uptodate)
+);
+
DECLARE_EVENT_CLASS(btrfs__writepage,
- TP_PROTO(const struct page *page, const struct inode *inode,
+ TP_PROTO(const struct folio *folio, const struct inode *inode,
const struct writeback_control *wbc),
- TP_ARGS(page, inode, wbc),
+ TP_ARGS(folio, inode, wbc),
TP_STRUCT__entry_btrfs(
__field( u64, ino )
@@ -675,7 +687,6 @@ DECLARE_EVENT_CLASS(btrfs__writepage,
__field( loff_t, range_start )
__field( loff_t, range_end )
__field( char, for_kupdate )
- __field( char, for_reclaim )
__field( char, range_cyclic )
__field( unsigned long, writeback_index )
__field( u64, root_objectid )
@@ -683,38 +694,35 @@ DECLARE_EVENT_CLASS(btrfs__writepage,
TP_fast_assign_btrfs(btrfs_sb(inode->i_sb),
__entry->ino = btrfs_ino(BTRFS_I(inode));
- __entry->index = page->index;
+ __entry->index = folio->index;
__entry->nr_to_write = wbc->nr_to_write;
__entry->pages_skipped = wbc->pages_skipped;
__entry->range_start = wbc->range_start;
__entry->range_end = wbc->range_end;
__entry->for_kupdate = wbc->for_kupdate;
- __entry->for_reclaim = wbc->for_reclaim;
__entry->range_cyclic = wbc->range_cyclic;
__entry->writeback_index = inode->i_mapping->writeback_index;
- __entry->root_objectid =
- BTRFS_I(inode)->root->root_key.objectid;
+ __entry->root_objectid = btrfs_root_id(BTRFS_I(inode)->root);
),
TP_printk_btrfs("root=%llu(%s) ino=%llu page_index=%lu "
"nr_to_write=%ld pages_skipped=%ld range_start=%llu "
"range_end=%llu for_kupdate=%d "
- "for_reclaim=%d range_cyclic=%d writeback_index=%lu",
+ "range_cyclic=%d writeback_index=%lu",
show_root_type(__entry->root_objectid),
__entry->ino, __entry->index,
__entry->nr_to_write, __entry->pages_skipped,
__entry->range_start, __entry->range_end,
- __entry->for_kupdate,
- __entry->for_reclaim, __entry->range_cyclic,
+ __entry->for_kupdate, __entry->range_cyclic,
__entry->writeback_index)
);
-DEFINE_EVENT(btrfs__writepage, __extent_writepage,
+DEFINE_EVENT(btrfs__writepage, extent_writepage,
- TP_PROTO(const struct page *page, const struct inode *inode,
+ TP_PROTO(const struct folio *folio, const struct inode *inode,
const struct writeback_control *wbc),
- TP_ARGS(page, inode, wbc)
+ TP_ARGS(folio, inode, wbc)
);
TRACE_EVENT(btrfs_writepage_end_io_hook,
@@ -737,7 +745,7 @@ TRACE_EVENT(btrfs_writepage_end_io_hook,
__entry->start = start;
__entry->end = end;
__entry->uptodate = uptodate;
- __entry->root_objectid = inode->root->root_key.objectid;
+ __entry->root_objectid = btrfs_root_id(inode->root);
),
TP_printk_btrfs("root=%llu(%s) ino=%llu start=%llu end=%llu uptodate=%d",
@@ -767,8 +775,7 @@ TRACE_EVENT(btrfs_sync_file,
__entry->ino = btrfs_ino(BTRFS_I(inode));
__entry->parent = btrfs_ino(BTRFS_I(d_inode(dentry->d_parent)));
__entry->datasync = datasync;
- __entry->root_objectid =
- BTRFS_I(inode)->root->root_key.objectid;
+ __entry->root_objectid = btrfs_root_id(BTRFS_I(inode)->root);
),
TP_printk_btrfs("root=%llu(%s) ino=%llu parent=%llu datasync=%d",
@@ -843,11 +850,9 @@ TRACE_EVENT(btrfs_add_block_group,
DECLARE_EVENT_CLASS(btrfs_delayed_tree_ref,
TP_PROTO(const struct btrfs_fs_info *fs_info,
- const struct btrfs_delayed_ref_node *ref,
- const struct btrfs_delayed_tree_ref *full_ref,
- int action),
+ const struct btrfs_delayed_ref_node *ref),
- TP_ARGS(fs_info, ref, full_ref, action),
+ TP_ARGS(fs_info, ref),
TP_STRUCT__entry_btrfs(
__field( u64, bytenr )
@@ -863,10 +868,10 @@ DECLARE_EVENT_CLASS(btrfs_delayed_tree_ref,
TP_fast_assign_btrfs(fs_info,
__entry->bytenr = ref->bytenr;
__entry->num_bytes = ref->num_bytes;
- __entry->action = action;
- __entry->parent = full_ref->parent;
- __entry->ref_root = full_ref->root;
- __entry->level = full_ref->level;
+ __entry->action = ref->action;
+ __entry->parent = ref->parent;
+ __entry->ref_root = ref->ref_root;
+ __entry->level = ref->tree_ref.level;
__entry->type = ref->type;
__entry->seq = ref->seq;
),
@@ -886,31 +891,25 @@ DECLARE_EVENT_CLASS(btrfs_delayed_tree_ref,
DEFINE_EVENT(btrfs_delayed_tree_ref, add_delayed_tree_ref,
TP_PROTO(const struct btrfs_fs_info *fs_info,
- const struct btrfs_delayed_ref_node *ref,
- const struct btrfs_delayed_tree_ref *full_ref,
- int action),
+ const struct btrfs_delayed_ref_node *ref),
- TP_ARGS(fs_info, ref, full_ref, action)
+ TP_ARGS(fs_info, ref)
);
DEFINE_EVENT(btrfs_delayed_tree_ref, run_delayed_tree_ref,
TP_PROTO(const struct btrfs_fs_info *fs_info,
- const struct btrfs_delayed_ref_node *ref,
- const struct btrfs_delayed_tree_ref *full_ref,
- int action),
+ const struct btrfs_delayed_ref_node *ref),
- TP_ARGS(fs_info, ref, full_ref, action)
+ TP_ARGS(fs_info, ref)
);
DECLARE_EVENT_CLASS(btrfs_delayed_data_ref,
TP_PROTO(const struct btrfs_fs_info *fs_info,
- const struct btrfs_delayed_ref_node *ref,
- const struct btrfs_delayed_data_ref *full_ref,
- int action),
+ const struct btrfs_delayed_ref_node *ref),
- TP_ARGS(fs_info, ref, full_ref, action),
+ TP_ARGS(fs_info, ref),
TP_STRUCT__entry_btrfs(
__field( u64, bytenr )
@@ -927,11 +926,11 @@ DECLARE_EVENT_CLASS(btrfs_delayed_data_ref,
TP_fast_assign_btrfs(fs_info,
__entry->bytenr = ref->bytenr;
__entry->num_bytes = ref->num_bytes;
- __entry->action = action;
- __entry->parent = full_ref->parent;
- __entry->ref_root = full_ref->root;
- __entry->owner = full_ref->objectid;
- __entry->offset = full_ref->offset;
+ __entry->action = ref->action;
+ __entry->parent = ref->parent;
+ __entry->ref_root = ref->ref_root;
+ __entry->owner = ref->data_ref.objectid;
+ __entry->offset = ref->data_ref.offset;
__entry->type = ref->type;
__entry->seq = ref->seq;
),
@@ -953,21 +952,17 @@ DECLARE_EVENT_CLASS(btrfs_delayed_data_ref,
DEFINE_EVENT(btrfs_delayed_data_ref, add_delayed_data_ref,
TP_PROTO(const struct btrfs_fs_info *fs_info,
- const struct btrfs_delayed_ref_node *ref,
- const struct btrfs_delayed_data_ref *full_ref,
- int action),
+ const struct btrfs_delayed_ref_node *ref),
- TP_ARGS(fs_info, ref, full_ref, action)
+ TP_ARGS(fs_info, ref)
);
DEFINE_EVENT(btrfs_delayed_data_ref, run_delayed_data_ref,
TP_PROTO(const struct btrfs_fs_info *fs_info,
- const struct btrfs_delayed_ref_node *ref,
- const struct btrfs_delayed_data_ref *full_ref,
- int action),
+ const struct btrfs_delayed_ref_node *ref),
- TP_ARGS(fs_info, ref, full_ref, action)
+ TP_ARGS(fs_info, ref)
);
DECLARE_EVENT_CLASS(btrfs_delayed_ref_head,
@@ -1032,7 +1027,7 @@ DEFINE_EVENT(btrfs_delayed_ref_head, run_delayed_ref_head,
DECLARE_EVENT_CLASS(btrfs__chunk,
TP_PROTO(const struct btrfs_fs_info *fs_info,
- const struct map_lookup *map, u64 offset, u64 size),
+ const struct btrfs_chunk_map *map, u64 offset, u64 size),
TP_ARGS(fs_info, map, offset, size),
@@ -1051,7 +1046,7 @@ DECLARE_EVENT_CLASS(btrfs__chunk,
__entry->sub_stripes = map->sub_stripes;
__entry->offset = offset;
__entry->size = size;
- __entry->root_objectid = fs_info->chunk_root->root_key.objectid;
+ __entry->root_objectid = btrfs_root_id(fs_info->chunk_root);
),
TP_printk_btrfs("root=%llu(%s) offset=%llu size=%llu "
@@ -1066,7 +1061,7 @@ DECLARE_EVENT_CLASS(btrfs__chunk,
DEFINE_EVENT(btrfs__chunk, btrfs_chunk_alloc,
TP_PROTO(const struct btrfs_fs_info *fs_info,
- const struct map_lookup *map, u64 offset, u64 size),
+ const struct btrfs_chunk_map *map, u64 offset, u64 size),
TP_ARGS(fs_info, map, offset, size)
);
@@ -1074,7 +1069,7 @@ DEFINE_EVENT(btrfs__chunk, btrfs_chunk_alloc,
DEFINE_EVENT(btrfs__chunk, btrfs_chunk_free,
TP_PROTO(const struct btrfs_fs_info *fs_info,
- const struct map_lookup *map, u64 offset, u64 size),
+ const struct btrfs_chunk_map *map, u64 offset, u64 size),
TP_ARGS(fs_info, map, offset, size)
);
@@ -1096,9 +1091,9 @@ TRACE_EVENT(btrfs_cow_block,
),
TP_fast_assign_btrfs(root->fs_info,
- __entry->root_objectid = root->root_key.objectid;
+ __entry->root_objectid = btrfs_root_id(root);
__entry->buf_start = buf->start;
- __entry->refs = atomic_read(&buf->refs);
+ __entry->refs = refcount_read(&buf->refs);
__entry->cow_start = cow->start;
__entry->buf_level = btrfs_header_level(buf);
__entry->cow_level = btrfs_header_level(cow);
@@ -1129,7 +1124,7 @@ TRACE_EVENT(btrfs_space_reservation,
),
TP_fast_assign_btrfs(fs_info,
- __assign_str(type, type);
+ __assign_str(type);
__entry->val = val;
__entry->bytes = bytes;
__entry->reserve = reserve;
@@ -1158,7 +1153,7 @@ TRACE_EVENT(btrfs_trigger_flush,
__entry->flags = flags;
__entry->bytes = bytes;
__entry->flush = flush;
- __assign_str(reason, reason);
+ __assign_str(reason);
),
TP_printk_btrfs("%s: flush=%d(%s) flags=%llu(%s) bytes=%llu",
@@ -1239,78 +1234,158 @@ DEFINE_EVENT(btrfs__reserved_extent, btrfs_reserved_extent_free,
TP_ARGS(fs_info, start, len)
);
-TRACE_EVENT(find_free_extent,
+TRACE_EVENT(btrfs_find_free_extent,
- TP_PROTO(const struct btrfs_root *root, u64 num_bytes,
- u64 empty_size, u64 data),
+ TP_PROTO(const struct btrfs_root *root,
+ const struct find_free_extent_ctl *ffe_ctl),
- TP_ARGS(root, num_bytes, empty_size, data),
+ TP_ARGS(root, ffe_ctl),
TP_STRUCT__entry_btrfs(
__field( u64, root_objectid )
__field( u64, num_bytes )
__field( u64, empty_size )
- __field( u64, data )
+ __field( u64, flags )
),
TP_fast_assign_btrfs(root->fs_info,
- __entry->root_objectid = root->root_key.objectid;
- __entry->num_bytes = num_bytes;
- __entry->empty_size = empty_size;
- __entry->data = data;
+ __entry->root_objectid = btrfs_root_id(root);
+ __entry->num_bytes = ffe_ctl->num_bytes;
+ __entry->empty_size = ffe_ctl->empty_size;
+ __entry->flags = ffe_ctl->flags;
),
TP_printk_btrfs("root=%llu(%s) len=%llu empty_size=%llu flags=%llu(%s)",
show_root_type(__entry->root_objectid),
- __entry->num_bytes, __entry->empty_size, __entry->data,
- __print_flags((unsigned long)__entry->data, "|",
+ __entry->num_bytes, __entry->empty_size, __entry->flags,
+ __print_flags((unsigned long)__entry->flags, "|",
+ BTRFS_GROUP_FLAGS))
+);
+
+TRACE_EVENT(btrfs_find_free_extent_search_loop,
+
+ TP_PROTO(const struct btrfs_root *root,
+ const struct find_free_extent_ctl *ffe_ctl),
+
+ TP_ARGS(root, ffe_ctl),
+
+ TP_STRUCT__entry_btrfs(
+ __field( u64, root_objectid )
+ __field( u64, num_bytes )
+ __field( u64, empty_size )
+ __field( u64, flags )
+ __field( u64, loop )
+ ),
+
+ TP_fast_assign_btrfs(root->fs_info,
+ __entry->root_objectid = btrfs_root_id(root);
+ __entry->num_bytes = ffe_ctl->num_bytes;
+ __entry->empty_size = ffe_ctl->empty_size;
+ __entry->flags = ffe_ctl->flags;
+ __entry->loop = ffe_ctl->loop;
+ ),
+
+ TP_printk_btrfs("root=%llu(%s) len=%llu empty_size=%llu flags=%llu(%s) loop=%llu",
+ show_root_type(__entry->root_objectid),
+ __entry->num_bytes, __entry->empty_size, __entry->flags,
+ __print_flags((unsigned long)__entry->flags, "|", BTRFS_GROUP_FLAGS),
+ __entry->loop)
+);
+
+TRACE_EVENT(btrfs_find_free_extent_have_block_group,
+
+ TP_PROTO(const struct btrfs_root *root,
+ const struct find_free_extent_ctl *ffe_ctl,
+ const struct btrfs_block_group *block_group),
+
+ TP_ARGS(root, ffe_ctl, block_group),
+
+ TP_STRUCT__entry_btrfs(
+ __field( u64, root_objectid )
+ __field( u64, num_bytes )
+ __field( u64, empty_size )
+ __field( u64, flags )
+ __field( u64, loop )
+ __field( bool, hinted )
+ __field( u64, bg_start )
+ __field( u64, bg_flags )
+ ),
+
+ TP_fast_assign_btrfs(root->fs_info,
+ __entry->root_objectid = btrfs_root_id(root);
+ __entry->num_bytes = ffe_ctl->num_bytes;
+ __entry->empty_size = ffe_ctl->empty_size;
+ __entry->flags = ffe_ctl->flags;
+ __entry->loop = ffe_ctl->loop;
+ __entry->hinted = ffe_ctl->hinted;
+ __entry->bg_start = block_group->start;
+ __entry->bg_flags = block_group->flags;
+ ),
+
+ TP_printk_btrfs(
+"root=%llu(%s) len=%llu empty_size=%llu flags=%llu(%s) loop=%llu hinted=%d block_group=%llu bg_flags=%llu(%s)",
+ show_root_type(__entry->root_objectid),
+ __entry->num_bytes, __entry->empty_size, __entry->flags,
+ __print_flags((unsigned long)__entry->flags, "|", BTRFS_GROUP_FLAGS),
+ __entry->loop, __entry->hinted,
+ __entry->bg_start, __entry->bg_flags,
+ __print_flags((unsigned long)__entry->bg_flags, "|",
BTRFS_GROUP_FLAGS))
);
DECLARE_EVENT_CLASS(btrfs__reserve_extent,
- TP_PROTO(const struct btrfs_block_group *block_group, u64 start,
- u64 len),
+ TP_PROTO(const struct btrfs_block_group *block_group,
+ const struct find_free_extent_ctl *ffe_ctl),
- TP_ARGS(block_group, start, len),
+ TP_ARGS(block_group, ffe_ctl),
TP_STRUCT__entry_btrfs(
__field( u64, bg_objectid )
__field( u64, flags )
+ __field( int, bg_size_class )
__field( u64, start )
__field( u64, len )
+ __field( u64, loop )
+ __field( bool, hinted )
+ __field( int, size_class )
),
TP_fast_assign_btrfs(block_group->fs_info,
__entry->bg_objectid = block_group->start;
__entry->flags = block_group->flags;
- __entry->start = start;
- __entry->len = len;
+ __entry->bg_size_class = block_group->size_class;
+ __entry->start = ffe_ctl->search_start;
+ __entry->len = ffe_ctl->num_bytes;
+ __entry->loop = ffe_ctl->loop;
+ __entry->hinted = ffe_ctl->hinted;
+ __entry->size_class = ffe_ctl->size_class;
),
- TP_printk_btrfs("root=%llu(%s) block_group=%llu flags=%llu(%s) "
- "start=%llu len=%llu",
+ TP_printk_btrfs(
+"root=%llu(%s) block_group=%llu flags=%llu(%s) bg_size_class=%d start=%llu len=%llu loop=%llu hinted=%d size_class=%d",
show_root_type(BTRFS_EXTENT_TREE_OBJECTID),
__entry->bg_objectid,
__entry->flags, __print_flags((unsigned long)__entry->flags,
"|", BTRFS_GROUP_FLAGS),
- __entry->start, __entry->len)
+ __entry->bg_size_class, __entry->start, __entry->len,
+ __entry->loop, __entry->hinted, __entry->size_class)
);
DEFINE_EVENT(btrfs__reserve_extent, btrfs_reserve_extent,
- TP_PROTO(const struct btrfs_block_group *block_group, u64 start,
- u64 len),
+ TP_PROTO(const struct btrfs_block_group *block_group,
+ const struct find_free_extent_ctl *ffe_ctl),
- TP_ARGS(block_group, start, len)
+ TP_ARGS(block_group, ffe_ctl)
);
DEFINE_EVENT(btrfs__reserve_extent, btrfs_reserve_extent_cluster,
- TP_PROTO(const struct btrfs_block_group *block_group, u64 start,
- u64 len),
+ TP_PROTO(const struct btrfs_block_group *block_group,
+ const struct find_free_extent_ctl *ffe_ctl),
- TP_ARGS(block_group, start, len)
+ TP_ARGS(block_group, ffe_ctl)
);
TRACE_EVENT(btrfs_find_cluster,
@@ -1399,7 +1474,7 @@ TRACE_EVENT(btrfs_setup_cluster,
);
struct extent_state;
-TRACE_EVENT(alloc_extent_state,
+TRACE_EVENT(btrfs_alloc_extent_state,
TP_PROTO(const struct extent_state *state,
gfp_t mask, unsigned long IP),
@@ -1422,7 +1497,7 @@ TRACE_EVENT(alloc_extent_state,
show_gfp_flags(__entry->mask), __entry->ip)
);
-TRACE_EVENT(free_extent_state,
+TRACE_EVENT(btrfs_free_extent_state,
TP_PROTO(const struct extent_state *state, unsigned long IP),
@@ -1452,7 +1527,6 @@ DECLARE_EVENT_CLASS(btrfs__work,
__field( const void *, wq )
__field( const void *, func )
__field( const void *, ordered_func )
- __field( const void *, ordered_free )
__field( const void *, normal_work )
),
@@ -1461,14 +1535,12 @@ DECLARE_EVENT_CLASS(btrfs__work,
__entry->wq = work->wq;
__entry->func = work->func;
__entry->ordered_func = work->ordered_func;
- __entry->ordered_free = work->ordered_free;
__entry->normal_work = &work->normal_work;
),
- TP_printk_btrfs("work=%p (normal_work=%p) wq=%p func=%ps ordered_func=%p "
- "ordered_free=%p",
+ TP_printk_btrfs("work=%p (normal_work=%p) wq=%p func=%ps ordered_func=%p",
__entry->work, __entry->normal_work, __entry->wq,
- __entry->func, __entry->ordered_func, __entry->ordered_free)
+ __entry->func, __entry->ordered_func)
);
/*
@@ -1534,7 +1606,7 @@ DECLARE_EVENT_CLASS(btrfs_workqueue,
TP_fast_assign_btrfs(btrfs_workqueue_owner(wq),
__entry->wq = wq;
- __assign_str(name, name);
+ __assign_str(name);
),
TP_printk_btrfs("name=%s wq=%p", __get_str(name),
@@ -1594,8 +1666,7 @@ DECLARE_EVENT_CLASS(btrfs__qgroup_rsv_data,
),
TP_fast_assign_btrfs(btrfs_sb(inode->i_sb),
- __entry->rootid =
- BTRFS_I(inode)->root->root_key.objectid;
+ __entry->rootid = btrfs_root_id(BTRFS_I(inode)->root);
__entry->ino = btrfs_ino(BTRFS_I(inode));
__entry->start = start;
__entry->len = len;
@@ -1629,9 +1700,10 @@ DEFINE_EVENT(btrfs__qgroup_rsv_data, btrfs_qgroup_release_data,
DECLARE_EVENT_CLASS(btrfs_qgroup_extent,
TP_PROTO(const struct btrfs_fs_info *fs_info,
- const struct btrfs_qgroup_extent_record *rec),
+ const struct btrfs_qgroup_extent_record *rec,
+ u64 bytenr),
- TP_ARGS(fs_info, rec),
+ TP_ARGS(fs_info, rec, bytenr),
TP_STRUCT__entry_btrfs(
__field( u64, bytenr )
@@ -1639,7 +1711,7 @@ DECLARE_EVENT_CLASS(btrfs_qgroup_extent,
),
TP_fast_assign_btrfs(fs_info,
- __entry->bytenr = rec->bytenr,
+ __entry->bytenr = bytenr;
__entry->num_bytes = rec->num_bytes;
),
@@ -1650,20 +1722,22 @@ DECLARE_EVENT_CLASS(btrfs_qgroup_extent,
DEFINE_EVENT(btrfs_qgroup_extent, btrfs_qgroup_account_extents,
TP_PROTO(const struct btrfs_fs_info *fs_info,
- const struct btrfs_qgroup_extent_record *rec),
+ const struct btrfs_qgroup_extent_record *rec,
+ u64 bytenr),
- TP_ARGS(fs_info, rec)
+ TP_ARGS(fs_info, rec, bytenr)
);
DEFINE_EVENT(btrfs_qgroup_extent, btrfs_qgroup_trace_extent,
TP_PROTO(const struct btrfs_fs_info *fs_info,
- const struct btrfs_qgroup_extent_record *rec),
+ const struct btrfs_qgroup_extent_record *rec,
+ u64 bytenr),
- TP_ARGS(fs_info, rec)
+ TP_ARGS(fs_info, rec, bytenr)
);
-TRACE_EVENT(qgroup_num_dirty_extents,
+TRACE_EVENT(btrfs_qgroup_num_dirty_extents,
TP_PROTO(const struct btrfs_fs_info *fs_info, u64 transid,
u64 num_dirty_extents),
@@ -1717,7 +1791,7 @@ TRACE_EVENT(btrfs_qgroup_account_extent,
__entry->nr_new_roots)
);
-TRACE_EVENT(qgroup_update_counters,
+TRACE_EVENT(btrfs_qgroup_update_counters,
TP_PROTO(const struct btrfs_fs_info *fs_info,
const struct btrfs_qgroup *qgroup,
@@ -1746,9 +1820,9 @@ TRACE_EVENT(qgroup_update_counters,
__entry->cur_old_count, __entry->cur_new_count)
);
-TRACE_EVENT(qgroup_update_reserve,
+TRACE_EVENT(btrfs_qgroup_update_reserve,
- TP_PROTO(struct btrfs_fs_info *fs_info, struct btrfs_qgroup *qgroup,
+ TP_PROTO(const struct btrfs_fs_info *fs_info, const struct btrfs_qgroup *qgroup,
s64 diff, int type),
TP_ARGS(fs_info, qgroup, diff, type),
@@ -1772,9 +1846,9 @@ TRACE_EVENT(qgroup_update_reserve,
__entry->cur_reserved, __entry->diff)
);
-TRACE_EVENT(qgroup_meta_reserve,
+TRACE_EVENT(btrfs_qgroup_meta_reserve,
- TP_PROTO(struct btrfs_root *root, s64 diff, int type),
+ TP_PROTO(const struct btrfs_root *root, s64 diff, int type),
TP_ARGS(root, diff, type),
@@ -1785,7 +1859,7 @@ TRACE_EVENT(qgroup_meta_reserve,
),
TP_fast_assign_btrfs(root->fs_info,
- __entry->refroot = root->root_key.objectid;
+ __entry->refroot = btrfs_root_id(root);
__entry->diff = diff;
__entry->type = type;
),
@@ -1795,9 +1869,9 @@ TRACE_EVENT(qgroup_meta_reserve,
__print_symbolic(__entry->type, QGROUP_RSV_TYPES), __entry->diff)
);
-TRACE_EVENT(qgroup_meta_convert,
+TRACE_EVENT(btrfs_qgroup_meta_convert,
- TP_PROTO(struct btrfs_root *root, s64 diff),
+ TP_PROTO(const struct btrfs_root *root, s64 diff),
TP_ARGS(root, diff),
@@ -1807,7 +1881,7 @@ TRACE_EVENT(qgroup_meta_convert,
),
TP_fast_assign_btrfs(root->fs_info,
- __entry->refroot = root->root_key.objectid;
+ __entry->refroot = btrfs_root_id(root);
__entry->diff = diff;
),
@@ -1818,7 +1892,7 @@ TRACE_EVENT(qgroup_meta_convert,
__entry->diff)
);
-TRACE_EVENT(qgroup_meta_free_all_pertrans,
+TRACE_EVENT(btrfs_qgroup_meta_free_all_pertrans,
TP_PROTO(struct btrfs_root *root),
@@ -1831,7 +1905,7 @@ TRACE_EVENT(qgroup_meta_free_all_pertrans,
),
TP_fast_assign_btrfs(root->fs_info,
- __entry->refroot = root->root_key.objectid;
+ __entry->refroot = btrfs_root_id(root);
spin_lock(&root->qgroup_meta_rsv_lock);
__entry->diff = -(s64)root->qgroup_meta_rsv_pertrans;
spin_unlock(&root->qgroup_meta_rsv_lock);
@@ -1847,7 +1921,7 @@ DECLARE_EVENT_CLASS(btrfs__prelim_ref,
TP_PROTO(const struct btrfs_fs_info *fs_info,
const struct prelim_ref *oldref,
const struct prelim_ref *newref, u64 tree_size),
- TP_ARGS(fs_info, newref, oldref, tree_size),
+ TP_ARGS(fs_info, oldref, newref, tree_size),
TP_STRUCT__entry_btrfs(
__field( u64, root_id )
@@ -1901,25 +1975,27 @@ DEFINE_EVENT(btrfs__prelim_ref, btrfs_prelim_ref_insert,
);
TRACE_EVENT(btrfs_inode_mod_outstanding_extents,
- TP_PROTO(const struct btrfs_root *root, u64 ino, int mod),
+ TP_PROTO(const struct btrfs_root *root, u64 ino, int mod, unsigned outstanding),
- TP_ARGS(root, ino, mod),
+ TP_ARGS(root, ino, mod, outstanding),
TP_STRUCT__entry_btrfs(
__field( u64, root_objectid )
__field( u64, ino )
__field( int, mod )
+ __field( unsigned, outstanding )
),
TP_fast_assign_btrfs(root->fs_info,
- __entry->root_objectid = root->root_key.objectid;
+ __entry->root_objectid = btrfs_root_id(root);
__entry->ino = ino;
__entry->mod = mod;
+ __entry->outstanding = outstanding;
),
- TP_printk_btrfs("root=%llu(%s) ino=%llu mod=%d",
+ TP_printk_btrfs("root=%llu(%s) ino=%llu mod=%d outstanding=%u",
show_root_type(__entry->root_objectid),
- __entry->ino, __entry->mod)
+ __entry->ino, __entry->mod, __entry->outstanding)
);
DECLARE_EVENT_CLASS(btrfs__block_group,
@@ -1991,17 +2067,12 @@ TRACE_EVENT(btrfs_set_extent_bit,
__field( unsigned, set_bits)
),
- TP_fast_assign_btrfs(tree->fs_info,
- __entry->owner = tree->owner;
- if (tree->inode) {
- const struct btrfs_inode *inode = tree->inode;
+ TP_fast_assign_btrfs(btrfs_extent_io_tree_to_fs_info(tree),
+ const struct btrfs_inode *inode = btrfs_extent_io_tree_to_inode(tree);
- __entry->ino = btrfs_ino(inode);
- __entry->rootid = inode->root->root_key.objectid;
- } else {
- __entry->ino = 0;
- __entry->rootid = 0;
- }
+ __entry->owner = tree->owner;
+ __entry->ino = inode ? btrfs_ino(inode) : 0;
+ __entry->rootid = inode ? btrfs_root_id(inode->root) : 0;
__entry->start = start;
__entry->len = len;
__entry->set_bits = set_bits;
@@ -2029,17 +2100,12 @@ TRACE_EVENT(btrfs_clear_extent_bit,
__field( unsigned, clear_bits)
),
- TP_fast_assign_btrfs(tree->fs_info,
- __entry->owner = tree->owner;
- if (tree->inode) {
- const struct btrfs_inode *inode = tree->inode;
+ TP_fast_assign_btrfs(btrfs_extent_io_tree_to_fs_info(tree),
+ const struct btrfs_inode *inode = btrfs_extent_io_tree_to_inode(tree);
- __entry->ino = btrfs_ino(inode);
- __entry->rootid = inode->root->root_key.objectid;
- } else {
- __entry->ino = 0;
- __entry->rootid = 0;
- }
+ __entry->owner = tree->owner;
+ __entry->ino = inode ? btrfs_ino(inode) : 0;
+ __entry->rootid = inode ? btrfs_root_id(inode->root) : 0;
__entry->start = start;
__entry->len = len;
__entry->clear_bits = clear_bits;
@@ -2068,17 +2134,12 @@ TRACE_EVENT(btrfs_convert_extent_bit,
__field( unsigned, clear_bits)
),
- TP_fast_assign_btrfs(tree->fs_info,
- __entry->owner = tree->owner;
- if (tree->inode) {
- const struct btrfs_inode *inode = tree->inode;
+ TP_fast_assign_btrfs(btrfs_extent_io_tree_to_fs_info(tree),
+ const struct btrfs_inode *inode = btrfs_extent_io_tree_to_inode(tree);
- __entry->ino = btrfs_ino(inode);
- __entry->rootid = inode->root->root_key.objectid;
- } else {
- __entry->ino = 0;
- __entry->rootid = 0;
- }
+ __entry->owner = tree->owner;
+ __entry->ino = inode ? btrfs_ino(inode) : 0;
+ __entry->rootid = inode ? btrfs_root_id(inode->root) : 0;
__entry->start = start;
__entry->len = len;
__entry->set_bits = set_bits;
@@ -2273,12 +2334,7 @@ DEFINE_EVENT(btrfs_locking_events, name, \
DEFINE_BTRFS_LOCK_EVENT(btrfs_tree_unlock);
DEFINE_BTRFS_LOCK_EVENT(btrfs_tree_read_unlock);
-DEFINE_BTRFS_LOCK_EVENT(btrfs_tree_read_unlock_blocking);
-DEFINE_BTRFS_LOCK_EVENT(btrfs_set_lock_blocking_read);
-DEFINE_BTRFS_LOCK_EVENT(btrfs_set_lock_blocking_write);
DEFINE_BTRFS_LOCK_EVENT(btrfs_try_tree_read_lock);
-DEFINE_BTRFS_LOCK_EVENT(btrfs_try_tree_write_lock);
-DEFINE_BTRFS_LOCK_EVENT(btrfs_tree_read_lock_atomic);
DECLARE_EVENT_CLASS(btrfs__space_info_update,
@@ -2319,6 +2375,14 @@ DEFINE_EVENT(btrfs__space_info_update, update_bytes_pinned,
TP_ARGS(fs_info, sinfo, old, diff)
);
+DEFINE_EVENT(btrfs__space_info_update, update_bytes_zone_unusable,
+
+ TP_PROTO(const struct btrfs_fs_info *fs_info,
+ const struct btrfs_space_info *sinfo, u64 old, s64 diff),
+
+ TP_ARGS(fs_info, sinfo, old, diff)
+);
+
DECLARE_EVENT_CLASS(btrfs_raid56_bio,
TP_PROTO(const struct btrfs_raid_bio *rbio,
@@ -2341,7 +2405,7 @@ DECLARE_EVENT_CLASS(btrfs_raid56_bio,
),
TP_fast_assign_btrfs(rbio->bioc->fs_info,
- __entry->full_stripe = rbio->bioc->raid_map[0];
+ __entry->full_stripe = rbio->bioc->full_stripe_logical;
__entry->physical = bio->bi_iter.bi_sector << SECTOR_SHIFT;
__entry->len = bio->bi_iter.bi_size;
__entry->opf = bio_op(bio);
@@ -2370,7 +2434,7 @@ DECLARE_EVENT_CLASS(btrfs_raid56_bio,
__entry->offset, __entry->opf, __entry->physical, __entry->len)
);
-DEFINE_EVENT(btrfs_raid56_bio, raid56_read_partial,
+DEFINE_EVENT(btrfs_raid56_bio, raid56_read,
TP_PROTO(const struct btrfs_raid_bio *rbio,
const struct bio *bio,
const struct raid56_bio_trace_info *trace_info),
@@ -2378,7 +2442,7 @@ DEFINE_EVENT(btrfs_raid56_bio, raid56_read_partial,
TP_ARGS(rbio, bio, trace_info)
);
-DEFINE_EVENT(btrfs_raid56_bio, raid56_write_stripe,
+DEFINE_EVENT(btrfs_raid56_bio, raid56_write,
TP_PROTO(const struct btrfs_raid_bio *rbio,
const struct bio *bio,
const struct raid56_bio_trace_info *trace_info),
@@ -2386,29 +2450,176 @@ DEFINE_EVENT(btrfs_raid56_bio, raid56_write_stripe,
TP_ARGS(rbio, bio, trace_info)
);
+TRACE_EVENT(btrfs_insert_one_raid_extent,
-DEFINE_EVENT(btrfs_raid56_bio, raid56_scrub_write_stripe,
- TP_PROTO(const struct btrfs_raid_bio *rbio,
- const struct bio *bio,
- const struct raid56_bio_trace_info *trace_info),
+ TP_PROTO(const struct btrfs_fs_info *fs_info, u64 logical, u64 length,
+ int num_stripes),
- TP_ARGS(rbio, bio, trace_info)
+ TP_ARGS(fs_info, logical, length, num_stripes),
+
+ TP_STRUCT__entry_btrfs(
+ __field( u64, logical )
+ __field( u64, length )
+ __field( int, num_stripes )
+ ),
+
+ TP_fast_assign_btrfs(fs_info,
+ __entry->logical = logical;
+ __entry->length = length;
+ __entry->num_stripes = num_stripes;
+ ),
+
+ TP_printk_btrfs("logical=%llu length=%llu num_stripes=%d",
+ __entry->logical, __entry->length,
+ __entry->num_stripes)
);
-DEFINE_EVENT(btrfs_raid56_bio, raid56_scrub_read,
- TP_PROTO(const struct btrfs_raid_bio *rbio,
- const struct bio *bio,
- const struct raid56_bio_trace_info *trace_info),
+TRACE_EVENT(btrfs_raid_extent_delete,
- TP_ARGS(rbio, bio, trace_info)
+ TP_PROTO(const struct btrfs_fs_info *fs_info, u64 start, u64 end,
+ u64 found_start, u64 found_end),
+
+ TP_ARGS(fs_info, start, end, found_start, found_end),
+
+ TP_STRUCT__entry_btrfs(
+ __field( u64, start )
+ __field( u64, end )
+ __field( u64, found_start )
+ __field( u64, found_end )
+ ),
+
+ TP_fast_assign_btrfs(fs_info,
+ __entry->start = start;
+ __entry->end = end;
+ __entry->found_start = found_start;
+ __entry->found_end = found_end;
+ ),
+
+ TP_printk_btrfs("start=%llu end=%llu found_start=%llu found_end=%llu",
+ __entry->start, __entry->end, __entry->found_start,
+ __entry->found_end)
);
-DEFINE_EVENT(btrfs_raid56_bio, raid56_scrub_read_recover,
- TP_PROTO(const struct btrfs_raid_bio *rbio,
- const struct bio *bio,
- const struct raid56_bio_trace_info *trace_info),
+TRACE_EVENT(btrfs_get_raid_extent_offset,
- TP_ARGS(rbio, bio, trace_info)
+ TP_PROTO(const struct btrfs_fs_info *fs_info, u64 logical, u64 length,
+ u64 physical, u64 devid),
+
+ TP_ARGS(fs_info, logical, length, physical, devid),
+
+ TP_STRUCT__entry_btrfs(
+ __field( u64, logical )
+ __field( u64, length )
+ __field( u64, physical )
+ __field( u64, devid )
+ ),
+
+ TP_fast_assign_btrfs(fs_info,
+ __entry->logical = logical;
+ __entry->length = length;
+ __entry->physical = physical;
+ __entry->devid = devid;
+ ),
+
+ TP_printk_btrfs("logical=%llu length=%llu physical=%llu devid=%llu",
+ __entry->logical, __entry->length, __entry->physical,
+ __entry->devid)
+);
+
+TRACE_EVENT(btrfs_extent_map_shrinker_count,
+
+ TP_PROTO(const struct btrfs_fs_info *fs_info, long nr),
+
+ TP_ARGS(fs_info, nr),
+
+ TP_STRUCT__entry_btrfs(
+ __field( long, nr )
+ ),
+
+ TP_fast_assign_btrfs(fs_info,
+ __entry->nr = nr;
+ ),
+
+ TP_printk_btrfs("nr=%ld", __entry->nr)
+);
+
+TRACE_EVENT(btrfs_extent_map_shrinker_scan_enter,
+
+ TP_PROTO(const struct btrfs_fs_info *fs_info, long nr),
+
+ TP_ARGS(fs_info, nr),
+
+ TP_STRUCT__entry_btrfs(
+ __field( long, nr_to_scan )
+ __field( long, nr )
+ __field( u64, last_root_id )
+ __field( u64, last_ino )
+ ),
+
+ TP_fast_assign_btrfs(fs_info,
+ __entry->nr_to_scan = \
+ atomic64_read(&fs_info->em_shrinker_nr_to_scan);
+ __entry->nr = nr;
+ __entry->last_root_id = fs_info->em_shrinker_last_root;
+ __entry->last_ino = fs_info->em_shrinker_last_ino;
+ ),
+
+ TP_printk_btrfs("nr_to_scan=%ld nr=%ld last_root=%llu(%s) last_ino=%llu",
+ __entry->nr_to_scan, __entry->nr,
+ show_root_type(__entry->last_root_id), __entry->last_ino)
+);
+
+TRACE_EVENT(btrfs_extent_map_shrinker_scan_exit,
+
+ TP_PROTO(const struct btrfs_fs_info *fs_info, long nr_dropped, long nr),
+
+ TP_ARGS(fs_info, nr_dropped, nr),
+
+ TP_STRUCT__entry_btrfs(
+ __field( long, nr_dropped )
+ __field( long, nr )
+ __field( u64, last_root_id )
+ __field( u64, last_ino )
+ ),
+
+ TP_fast_assign_btrfs(fs_info,
+ __entry->nr_dropped = nr_dropped;
+ __entry->nr = nr;
+ __entry->last_root_id = fs_info->em_shrinker_last_root;
+ __entry->last_ino = fs_info->em_shrinker_last_ino;
+ ),
+
+ TP_printk_btrfs("nr_dropped=%ld nr=%ld last_root=%llu(%s) last_ino=%llu",
+ __entry->nr_dropped, __entry->nr,
+ show_root_type(__entry->last_root_id), __entry->last_ino)
+);
+
+TRACE_EVENT(btrfs_extent_map_shrinker_remove_em,
+
+ TP_PROTO(const struct btrfs_inode *inode, const struct extent_map *em),
+
+ TP_ARGS(inode, em),
+
+ TP_STRUCT__entry_btrfs(
+ __field( u64, ino )
+ __field( u64, root_id )
+ __field( u64, start )
+ __field( u64, len )
+ __field( u32, flags )
+ ),
+
+ TP_fast_assign_btrfs(inode->root->fs_info,
+ __entry->ino = btrfs_ino(inode);
+ __entry->root_id = btrfs_root_id(inode->root);
+ __entry->start = em->start;
+ __entry->len = em->len;
+ __entry->flags = em->flags;
+ ),
+
+ TP_printk_btrfs("ino=%llu root=%llu(%s) start=%llu len=%llu flags=%s",
+ __entry->ino, show_root_type(__entry->root_id),
+ __entry->start, __entry->len,
+ show_map_flags(__entry->flags))
);
#endif /* _TRACE_BTRFS_H */
diff --git a/include/trace/events/cachefiles.h b/include/trace/events/cachefiles.h
index cf4b98b9a9ed..a743b2a35ea7 100644
--- a/include/trace/events/cachefiles.h
+++ b/include/trace/events/cachefiles.h
@@ -33,6 +33,8 @@ enum cachefiles_obj_ref_trace {
cachefiles_obj_see_withdrawal,
cachefiles_obj_get_ondemand_fd,
cachefiles_obj_put_ondemand_fd,
+ cachefiles_obj_get_read_req,
+ cachefiles_obj_put_read_req,
};
enum fscache_why_object_killed {
@@ -127,7 +129,11 @@ enum cachefiles_error_trace {
EM(cachefiles_obj_see_lookup_cookie, "SEE lookup_cookie") \
EM(cachefiles_obj_see_lookup_failed, "SEE lookup_failed") \
EM(cachefiles_obj_see_withdraw_cookie, "SEE withdraw_cookie") \
- E_(cachefiles_obj_see_withdrawal, "SEE withdrawal")
+ EM(cachefiles_obj_see_withdrawal, "SEE withdrawal") \
+ EM(cachefiles_obj_get_ondemand_fd, "GET ondemand_fd") \
+ EM(cachefiles_obj_put_ondemand_fd, "PUT ondemand_fd") \
+ EM(cachefiles_obj_get_read_req, "GET read_req") \
+ E_(cachefiles_obj_put_read_req, "PUT read_req")
#define cachefiles_coherency_traces \
EM(cachefiles_coherency_check_aux, "BAD aux ") \
@@ -217,10 +223,10 @@ TRACE_EVENT(cachefiles_ref,
/* Note that obj may be NULL */
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(unsigned int, cookie )
- __field(enum cachefiles_obj_ref_trace, why )
- __field(int, usage )
+ __field(unsigned int, obj)
+ __field(unsigned int, cookie)
+ __field(enum cachefiles_obj_ref_trace, why)
+ __field(int, usage)
),
TP_fast_assign(
@@ -243,10 +249,10 @@ TRACE_EVENT(cachefiles_lookup,
TP_ARGS(obj, dir, de),
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(short, error )
- __field(unsigned long, dino )
- __field(unsigned long, ino )
+ __field(unsigned int, obj)
+ __field(short, error)
+ __field(unsigned long, dino)
+ __field(unsigned long, ino)
),
TP_fast_assign(
@@ -267,8 +273,8 @@ TRACE_EVENT(cachefiles_mkdir,
TP_ARGS(dir, subdir),
TP_STRUCT__entry(
- __field(unsigned int, dir )
- __field(unsigned int, subdir )
+ __field(unsigned int, dir)
+ __field(unsigned int, subdir)
),
TP_fast_assign(
@@ -287,8 +293,8 @@ TRACE_EVENT(cachefiles_tmpfile,
TP_ARGS(obj, backer),
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(unsigned int, backer )
+ __field(unsigned int, obj)
+ __field(unsigned int, backer)
),
TP_fast_assign(
@@ -307,8 +313,8 @@ TRACE_EVENT(cachefiles_link,
TP_ARGS(obj, backer),
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(unsigned int, backer )
+ __field(unsigned int, obj)
+ __field(unsigned int, backer)
),
TP_fast_assign(
@@ -330,9 +336,9 @@ TRACE_EVENT(cachefiles_unlink,
/* Note that obj may be NULL */
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(unsigned int, ino )
- __field(enum fscache_why_object_killed, why )
+ __field(unsigned int, obj)
+ __field(unsigned int, ino)
+ __field(enum fscache_why_object_killed, why)
),
TP_fast_assign(
@@ -355,9 +361,9 @@ TRACE_EVENT(cachefiles_rename,
/* Note that obj may be NULL */
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(unsigned int, ino )
- __field(enum fscache_why_object_killed, why )
+ __field(unsigned int, obj)
+ __field(unsigned int, ino)
+ __field(enum fscache_why_object_killed, why)
),
TP_fast_assign(
@@ -374,17 +380,20 @@ TRACE_EVENT(cachefiles_rename,
TRACE_EVENT(cachefiles_coherency,
TP_PROTO(struct cachefiles_object *obj,
ino_t ino,
+ u64 disk_aux,
enum cachefiles_content content,
enum cachefiles_coherency_trace why),
- TP_ARGS(obj, ino, content, why),
+ TP_ARGS(obj, ino, disk_aux, content, why),
/* Note that obj may be NULL */
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(enum cachefiles_coherency_trace, why )
- __field(enum cachefiles_content, content )
- __field(u64, ino )
+ __field(unsigned int, obj)
+ __field(enum cachefiles_coherency_trace, why)
+ __field(enum cachefiles_content, content)
+ __field(u64, ino)
+ __field(u64, aux)
+ __field(u64, disk_aux)
),
TP_fast_assign(
@@ -392,13 +401,17 @@ TRACE_EVENT(cachefiles_coherency,
__entry->why = why;
__entry->content = content;
__entry->ino = ino;
+ __entry->aux = be64_to_cpup((__be64 *)obj->cookie->inline_aux);
+ __entry->disk_aux = disk_aux;
),
- TP_printk("o=%08x %s B=%llx c=%u",
+ TP_printk("o=%08x %s B=%llx c=%u aux=%llx dsk=%llx",
__entry->obj,
__print_symbolic(__entry->why, cachefiles_coherency_traces),
__entry->ino,
- __entry->content)
+ __entry->content,
+ __entry->aux,
+ __entry->disk_aux)
);
TRACE_EVENT(cachefiles_vol_coherency,
@@ -410,9 +423,9 @@ TRACE_EVENT(cachefiles_vol_coherency,
/* Note that obj may be NULL */
TP_STRUCT__entry(
- __field(unsigned int, vol )
- __field(enum cachefiles_coherency_trace, why )
- __field(u64, ino )
+ __field(unsigned int, vol)
+ __field(enum cachefiles_coherency_trace, why)
+ __field(u64, ino)
),
TP_fast_assign(
@@ -439,14 +452,14 @@ TRACE_EVENT(cachefiles_prep_read,
TP_ARGS(obj, start, len, flags, source, why, cache_inode, netfs_inode),
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(unsigned short, flags )
- __field(enum netfs_io_source, source )
- __field(enum cachefiles_prepare_read_trace, why )
- __field(size_t, len )
- __field(loff_t, start )
- __field(unsigned int, netfs_inode )
- __field(unsigned int, cache_inode )
+ __field(unsigned int, obj)
+ __field(unsigned short, flags)
+ __field(enum netfs_io_source, source)
+ __field(enum cachefiles_prepare_read_trace, why)
+ __field(size_t, len)
+ __field(loff_t, start)
+ __field(unsigned int, netfs_inode)
+ __field(unsigned int, cache_inode)
),
TP_fast_assign(
@@ -478,10 +491,10 @@ TRACE_EVENT(cachefiles_read,
TP_ARGS(obj, backer, start, len),
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(unsigned int, backer )
- __field(size_t, len )
- __field(loff_t, start )
+ __field(unsigned int, obj)
+ __field(unsigned int, backer)
+ __field(size_t, len)
+ __field(loff_t, start)
),
TP_fast_assign(
@@ -507,10 +520,10 @@ TRACE_EVENT(cachefiles_write,
TP_ARGS(obj, backer, start, len),
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(unsigned int, backer )
- __field(size_t, len )
- __field(loff_t, start )
+ __field(unsigned int, obj)
+ __field(unsigned int, backer)
+ __field(size_t, len)
+ __field(loff_t, start)
),
TP_fast_assign(
@@ -534,11 +547,11 @@ TRACE_EVENT(cachefiles_trunc,
TP_ARGS(obj, backer, from, to, why),
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(unsigned int, backer )
- __field(enum cachefiles_trunc_trace, why )
- __field(loff_t, from )
- __field(loff_t, to )
+ __field(unsigned int, obj)
+ __field(unsigned int, backer)
+ __field(enum cachefiles_trunc_trace, why)
+ __field(loff_t, from)
+ __field(loff_t, to)
),
TP_fast_assign(
@@ -565,8 +578,8 @@ TRACE_EVENT(cachefiles_mark_active,
/* Note that obj may be NULL */
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(ino_t, inode )
+ __field(unsigned int, obj)
+ __field(ino_t, inode)
),
TP_fast_assign(
@@ -586,8 +599,8 @@ TRACE_EVENT(cachefiles_mark_failed,
/* Note that obj may be NULL */
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(ino_t, inode )
+ __field(unsigned int, obj)
+ __field(ino_t, inode)
),
TP_fast_assign(
@@ -607,8 +620,8 @@ TRACE_EVENT(cachefiles_mark_inactive,
/* Note that obj may be NULL */
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(ino_t, inode )
+ __field(unsigned int, obj)
+ __field(ino_t, inode)
),
TP_fast_assign(
@@ -627,10 +640,10 @@ TRACE_EVENT(cachefiles_vfs_error,
TP_ARGS(obj, backer, error, where),
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(unsigned int, backer )
- __field(enum cachefiles_error_trace, where )
- __field(short, error )
+ __field(unsigned int, obj)
+ __field(unsigned int, backer)
+ __field(enum cachefiles_error_trace, where)
+ __field(short, error)
),
TP_fast_assign(
@@ -654,10 +667,10 @@ TRACE_EVENT(cachefiles_io_error,
TP_ARGS(obj, backer, error, where),
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(unsigned int, backer )
- __field(enum cachefiles_error_trace, where )
- __field(short, error )
+ __field(unsigned int, obj)
+ __field(unsigned int, backer)
+ __field(enum cachefiles_error_trace, where)
+ __field(short, error)
),
TP_fast_assign(
@@ -681,11 +694,11 @@ TRACE_EVENT(cachefiles_ondemand_open,
TP_ARGS(obj, msg, load),
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(unsigned int, msg_id )
- __field(unsigned int, object_id )
- __field(unsigned int, fd )
- __field(unsigned int, flags )
+ __field(unsigned int, obj)
+ __field(unsigned int, msg_id)
+ __field(unsigned int, object_id)
+ __field(unsigned int, fd)
+ __field(unsigned int, flags)
),
TP_fast_assign(
@@ -711,9 +724,9 @@ TRACE_EVENT(cachefiles_ondemand_copen,
TP_ARGS(obj, msg_id, len),
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(unsigned int, msg_id )
- __field(long, len )
+ __field(unsigned int, obj)
+ __field(unsigned int, msg_id)
+ __field(long, len)
),
TP_fast_assign(
@@ -734,9 +747,9 @@ TRACE_EVENT(cachefiles_ondemand_close,
TP_ARGS(obj, msg),
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(unsigned int, msg_id )
- __field(unsigned int, object_id )
+ __field(unsigned int, obj)
+ __field(unsigned int, msg_id)
+ __field(unsigned int, object_id)
),
TP_fast_assign(
@@ -758,11 +771,11 @@ TRACE_EVENT(cachefiles_ondemand_read,
TP_ARGS(obj, msg, load),
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(unsigned int, msg_id )
- __field(unsigned int, object_id )
- __field(loff_t, start )
- __field(size_t, len )
+ __field(unsigned int, obj)
+ __field(unsigned int, msg_id)
+ __field(unsigned int, object_id)
+ __field(loff_t, start)
+ __field(size_t, len)
),
TP_fast_assign(
@@ -787,8 +800,8 @@ TRACE_EVENT(cachefiles_ondemand_cread,
TP_ARGS(obj, msg_id),
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(unsigned int, msg_id )
+ __field(unsigned int, obj)
+ __field(unsigned int, msg_id)
),
TP_fast_assign(
@@ -808,10 +821,10 @@ TRACE_EVENT(cachefiles_ondemand_fd_write,
TP_ARGS(obj, backer, start, len),
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(unsigned int, backer )
- __field(loff_t, start )
- __field(size_t, len )
+ __field(unsigned int, obj)
+ __field(unsigned int, backer)
+ __field(loff_t, start)
+ __field(size_t, len)
),
TP_fast_assign(
@@ -834,8 +847,8 @@ TRACE_EVENT(cachefiles_ondemand_fd_release,
TP_ARGS(obj, object_id),
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(unsigned int, object_id )
+ __field(unsigned int, obj)
+ __field(unsigned int, object_id)
),
TP_fast_assign(
diff --git a/include/trace/events/capability.h b/include/trace/events/capability.h
new file mode 100644
index 000000000000..17340257946c
--- /dev/null
+++ b/include/trace/events/capability.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM capability
+
+#if !defined(_TRACE_CAPABILITY_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_CAPABILITY_H
+
+#include <linux/cred.h>
+#include <linux/tracepoint.h>
+#include <linux/user_namespace.h>
+
+/**
+ * cap_capable - called after it's determined if a task has a particular
+ * effective capability
+ *
+ * @cred: The credentials used
+ * @target_ns: The user namespace of the resource being accessed
+ * @capable_ns: The user namespace in which the credential provides the
+ * capability to access the targeted resource.
+ * This will be NULL if ret is not 0.
+ * @cap: The capability to check for
+ * @ret: The return value of the check: 0 if it does, -ve if it does not
+ *
+ * Allows to trace calls to cap_capable in commoncap.c
+ */
+TRACE_EVENT(cap_capable,
+
+ TP_PROTO(const struct cred *cred, struct user_namespace *target_ns,
+ const struct user_namespace *capable_ns, int cap, int ret),
+
+ TP_ARGS(cred, target_ns, capable_ns, cap, ret),
+
+ TP_STRUCT__entry(
+ __field(const struct cred *, cred)
+ __field(struct user_namespace *, target_ns)
+ __field(const struct user_namespace *, capable_ns)
+ __field(int, cap)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->cred = cred;
+ __entry->target_ns = target_ns;
+ __entry->capable_ns = ret == 0 ? capable_ns : NULL;
+ __entry->cap = cap;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("cred %p, target_ns %p, capable_ns %p, cap %d, ret %d",
+ __entry->cred, __entry->target_ns, __entry->capable_ns, __entry->cap,
+ __entry->ret)
+);
+
+#endif /* _TRACE_CAPABILITY_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/cgroup.h b/include/trace/events/cgroup.h
index dd7d7c9efecd..ba9229af9a34 100644
--- a/include/trace/events/cgroup.h
+++ b/include/trace/events/cgroup.h
@@ -23,7 +23,7 @@ DECLARE_EVENT_CLASS(cgroup_root,
TP_fast_assign(
__entry->root = root->hierarchy_id;
__entry->ss_mask = root->subsys_mask;
- __assign_str(name, root->name);
+ __assign_str(name);
),
TP_printk("root=%d ss_mask=%#x name=%s",
@@ -68,7 +68,7 @@ DECLARE_EVENT_CLASS(cgroup,
__entry->root = cgrp->root->hierarchy_id;
__entry->id = cgroup_id(cgrp);
__entry->level = cgrp->level;
- __assign_str(path, path);
+ __assign_str(path);
),
TP_printk("root=%d id=%llu level=%d path=%s",
@@ -137,9 +137,9 @@ DECLARE_EVENT_CLASS(cgroup_migrate,
__entry->dst_root = dst_cgrp->root->hierarchy_id;
__entry->dst_id = cgroup_id(dst_cgrp);
__entry->dst_level = dst_cgrp->level;
- __assign_str(dst_path, path);
+ __assign_str(dst_path);
__entry->pid = task->pid;
- __assign_str(comm, task->comm);
+ __assign_str(comm);
),
TP_printk("dst_root=%d dst_id=%llu dst_level=%d dst_path=%s pid=%d comm=%s",
@@ -181,7 +181,7 @@ DECLARE_EVENT_CLASS(cgroup_event,
__entry->root = cgrp->root->hierarchy_id;
__entry->id = cgroup_id(cgrp);
__entry->level = cgrp->level;
- __assign_str(path, path);
+ __assign_str(path);
__entry->val = val;
),
@@ -204,6 +204,59 @@ DEFINE_EVENT(cgroup_event, cgroup_notify_frozen,
TP_ARGS(cgrp, path, val)
);
+DECLARE_EVENT_CLASS(cgroup_rstat,
+
+ TP_PROTO(struct cgroup *cgrp, int cpu, bool contended),
+
+ TP_ARGS(cgrp, cpu, contended),
+
+ TP_STRUCT__entry(
+ __field( int, root )
+ __field( int, level )
+ __field( u64, id )
+ __field( int, cpu )
+ __field( bool, contended )
+ ),
+
+ TP_fast_assign(
+ __entry->root = cgrp->root->hierarchy_id;
+ __entry->id = cgroup_id(cgrp);
+ __entry->level = cgrp->level;
+ __entry->cpu = cpu;
+ __entry->contended = contended;
+ ),
+
+ TP_printk("root=%d id=%llu level=%d cpu=%d lock contended:%d",
+ __entry->root, __entry->id, __entry->level,
+ __entry->cpu, __entry->contended)
+);
+
+/*
+ * Related to locks:
+ * global rstat_base_lock for base stats
+ * cgroup_subsys::rstat_ss_lock for subsystem stats
+ */
+DEFINE_EVENT(cgroup_rstat, cgroup_rstat_lock_contended,
+
+ TP_PROTO(struct cgroup *cgrp, int cpu, bool contended),
+
+ TP_ARGS(cgrp, cpu, contended)
+);
+
+DEFINE_EVENT(cgroup_rstat, cgroup_rstat_locked,
+
+ TP_PROTO(struct cgroup *cgrp, int cpu, bool contended),
+
+ TP_ARGS(cgrp, cpu, contended)
+);
+
+DEFINE_EVENT(cgroup_rstat, cgroup_rstat_unlock,
+
+ TP_PROTO(struct cgroup *cgrp, int cpu, bool contended),
+
+ TP_ARGS(cgrp, cpu, contended)
+);
+
#endif /* _TRACE_CGROUP_H */
/* This part must be outside protection */
diff --git a/include/trace/events/clk.h b/include/trace/events/clk.h
index daed3c7a48c1..759f7371a6dc 100644
--- a/include/trace/events/clk.h
+++ b/include/trace/events/clk.h
@@ -23,7 +23,7 @@ DECLARE_EVENT_CLASS(clk,
),
TP_fast_assign(
- __assign_str(name, core->name);
+ __assign_str(name);
),
TP_printk("%s", __get_str(name))
@@ -97,7 +97,7 @@ DECLARE_EVENT_CLASS(clk_rate,
),
TP_fast_assign(
- __assign_str(name, core->name);
+ __assign_str(name);
__entry->rate = rate;
),
@@ -145,7 +145,7 @@ DECLARE_EVENT_CLASS(clk_rate_range,
),
TP_fast_assign(
- __assign_str(name, core->name);
+ __assign_str(name);
__entry->min = min;
__entry->max = max;
),
@@ -174,8 +174,8 @@ DECLARE_EVENT_CLASS(clk_parent,
),
TP_fast_assign(
- __assign_str(name, core->name);
- __assign_str(pname, parent ? parent->name : "none");
+ __assign_str(name);
+ __assign_str(pname);
),
TP_printk("%s %s", __get_str(name), __get_str(pname))
@@ -207,7 +207,7 @@ DECLARE_EVENT_CLASS(clk_phase,
),
TP_fast_assign(
- __assign_str(name, core->name);
+ __assign_str(name);
__entry->phase = phase;
),
@@ -241,7 +241,7 @@ DECLARE_EVENT_CLASS(clk_duty_cycle,
),
TP_fast_assign(
- __assign_str(name, core->name);
+ __assign_str(name);
__entry->num = duty->num;
__entry->den = duty->den;
),
@@ -279,8 +279,8 @@ DECLARE_EVENT_CLASS(clk_rate_request,
),
TP_fast_assign(
- __assign_str(name, req->core ? req->core->name : "none");
- __assign_str(pname, req->best_parent_hw ? clk_hw_get_name(req->best_parent_hw) : "none");
+ __assign_str(name);
+ __assign_str(pname);
__entry->min = req->min_rate;
__entry->max = req->max_rate;
__entry->prate = req->best_parent_rate;
diff --git a/include/trace/events/cma.h b/include/trace/events/cma.h
index 3d708dae1542..37195edf2498 100644
--- a/include/trace/events/cma.h
+++ b/include/trace/events/cma.h
@@ -8,105 +8,130 @@
#include <linux/types.h>
#include <linux/tracepoint.h>
-DECLARE_EVENT_CLASS(cma_alloc_class,
+TRACE_EVENT(cma_release,
TP_PROTO(const char *name, unsigned long pfn, const struct page *page,
- unsigned long count, unsigned int align),
+ unsigned long count),
- TP_ARGS(name, pfn, page, count, align),
+ TP_ARGS(name, pfn, page, count),
TP_STRUCT__entry(
__string(name, name)
__field(unsigned long, pfn)
__field(const struct page *, page)
__field(unsigned long, count)
- __field(unsigned int, align)
),
TP_fast_assign(
- __assign_str(name, name);
+ __assign_str(name);
__entry->pfn = pfn;
__entry->page = page;
__entry->count = count;
- __entry->align = align;
),
- TP_printk("name=%s pfn=0x%lx page=%p count=%lu align=%u",
+ TP_printk("name=%s pfn=0x%lx page=%p count=%lu",
__get_str(name),
__entry->pfn,
__entry->page,
- __entry->count,
+ __entry->count)
+);
+
+TRACE_EVENT(cma_alloc_start,
+
+ TP_PROTO(const char *name, unsigned long request_count, unsigned long available_count,
+ unsigned long total_count, unsigned int align),
+
+ TP_ARGS(name, request_count, available_count, total_count, align),
+
+ TP_STRUCT__entry(
+ __string(name, name)
+ __field(unsigned long, request_count)
+ __field(unsigned long, available_count)
+ __field(unsigned long, total_count)
+ __field(unsigned int, align)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name);
+ __entry->request_count = request_count;
+ __entry->available_count = available_count;
+ __entry->total_count = total_count;
+ __entry->align = align;
+ ),
+
+ TP_printk("name=%s request_count=%lu available_count=%lu total_count=%lu align=%u",
+ __get_str(name),
+ __entry->request_count,
+ __entry->available_count,
+ __entry->total_count,
__entry->align)
);
-TRACE_EVENT(cma_release,
+TRACE_EVENT(cma_alloc_finish,
TP_PROTO(const char *name, unsigned long pfn, const struct page *page,
- unsigned long count),
+ unsigned long count, unsigned int align, int errorno),
- TP_ARGS(name, pfn, page, count),
+ TP_ARGS(name, pfn, page, count, align, errorno),
TP_STRUCT__entry(
__string(name, name)
__field(unsigned long, pfn)
__field(const struct page *, page)
__field(unsigned long, count)
+ __field(unsigned int, align)
+ __field(int, errorno)
),
TP_fast_assign(
- __assign_str(name, name);
+ __assign_str(name);
__entry->pfn = pfn;
__entry->page = page;
__entry->count = count;
+ __entry->align = align;
+ __entry->errorno = errorno;
),
- TP_printk("name=%s pfn=0x%lx page=%p count=%lu",
+ TP_printk("name=%s pfn=0x%lx page=%p count=%lu align=%u errorno=%d",
__get_str(name),
__entry->pfn,
__entry->page,
- __entry->count)
+ __entry->count,
+ __entry->align,
+ __entry->errorno)
);
-TRACE_EVENT(cma_alloc_start,
+TRACE_EVENT(cma_alloc_busy_retry,
- TP_PROTO(const char *name, unsigned long count, unsigned int align),
+ TP_PROTO(const char *name, unsigned long pfn, const struct page *page,
+ unsigned long count, unsigned int align),
- TP_ARGS(name, count, align),
+ TP_ARGS(name, pfn, page, count, align),
TP_STRUCT__entry(
__string(name, name)
+ __field(unsigned long, pfn)
+ __field(const struct page *, page)
__field(unsigned long, count)
__field(unsigned int, align)
),
TP_fast_assign(
- __assign_str(name, name);
+ __assign_str(name);
+ __entry->pfn = pfn;
+ __entry->page = page;
__entry->count = count;
__entry->align = align;
),
- TP_printk("name=%s count=%lu align=%u",
+ TP_printk("name=%s pfn=0x%lx page=%p count=%lu align=%u",
__get_str(name),
+ __entry->pfn,
+ __entry->page,
__entry->count,
__entry->align)
);
-DEFINE_EVENT(cma_alloc_class, cma_alloc_finish,
-
- TP_PROTO(const char *name, unsigned long pfn, const struct page *page,
- unsigned long count, unsigned int align),
-
- TP_ARGS(name, pfn, page, count, align)
-);
-
-DEFINE_EVENT(cma_alloc_class, cma_alloc_busy_retry,
-
- TP_PROTO(const char *name, unsigned long pfn, const struct page *page,
- unsigned long count, unsigned int align),
-
- TP_ARGS(name, pfn, page, count, align)
-);
-
#endif /* _TRACE_CMA_H */
/* This part must be outside protection */
diff --git a/include/trace/events/compaction.h b/include/trace/events/compaction.h
index 3313eb83c117..d05759d18538 100644
--- a/include/trace/events/compaction.h
+++ b/include/trace/events/compaction.h
@@ -64,13 +64,24 @@ DEFINE_EVENT(mm_compaction_isolate_template, mm_compaction_isolate_freepages,
TP_ARGS(start_pfn, end_pfn, nr_scanned, nr_taken)
);
+DEFINE_EVENT(mm_compaction_isolate_template, mm_compaction_fast_isolate_freepages,
+
+ TP_PROTO(
+ unsigned long start_pfn,
+ unsigned long end_pfn,
+ unsigned long nr_scanned,
+ unsigned long nr_taken),
+
+ TP_ARGS(start_pfn, end_pfn, nr_scanned, nr_taken)
+);
+
#ifdef CONFIG_COMPACTION
TRACE_EVENT(mm_compaction_migratepages,
- TP_PROTO(struct compact_control *cc,
+ TP_PROTO(unsigned int nr_migratepages,
unsigned int nr_succeeded),
- TP_ARGS(cc, nr_succeeded),
+ TP_ARGS(nr_migratepages, nr_succeeded),
TP_STRUCT__entry(
__field(unsigned long, nr_migrated)
@@ -79,7 +90,7 @@ TRACE_EVENT(mm_compaction_migratepages,
TP_fast_assign(
__entry->nr_migrated = nr_succeeded;
- __entry->nr_failed = cc->nr_migratepages - nr_succeeded;
+ __entry->nr_failed = nr_migratepages - nr_succeeded;
),
TP_printk("nr_migrated=%lu nr_failed=%lu",
diff --git a/include/trace/events/csd.h b/include/trace/events/csd.h
new file mode 100644
index 000000000000..58cc83b99c34
--- /dev/null
+++ b/include/trace/events/csd.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM csd
+
+#if !defined(_TRACE_CSD_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_CSD_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(csd_queue_cpu,
+
+ TP_PROTO(const unsigned int cpu,
+ unsigned long callsite,
+ smp_call_func_t func,
+ call_single_data_t *csd),
+
+ TP_ARGS(cpu, callsite, func, csd),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, cpu)
+ __field(void *, callsite)
+ __field(void *, func)
+ __field(void *, csd)
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->callsite = (void *)callsite;
+ __entry->func = func;
+ __entry->csd = csd;
+ ),
+
+ TP_printk("cpu=%u callsite=%pS func=%ps csd=%p",
+ __entry->cpu, __entry->callsite, __entry->func, __entry->csd)
+ );
+
+/*
+ * Tracepoints for a function which is called as an effect of smp_call_function.*
+ */
+DECLARE_EVENT_CLASS(csd_function,
+
+ TP_PROTO(smp_call_func_t func, call_single_data_t *csd),
+
+ TP_ARGS(func, csd),
+
+ TP_STRUCT__entry(
+ __field(void *, func)
+ __field(void *, csd)
+ ),
+
+ TP_fast_assign(
+ __entry->func = func;
+ __entry->csd = csd;
+ ),
+
+ TP_printk("func=%ps, csd=%p", __entry->func, __entry->csd)
+);
+
+DEFINE_EVENT(csd_function, csd_function_entry,
+ TP_PROTO(smp_call_func_t func, call_single_data_t *csd),
+ TP_ARGS(func, csd)
+);
+
+DEFINE_EVENT(csd_function, csd_function_exit,
+ TP_PROTO(smp_call_func_t func, call_single_data_t *csd),
+ TP_ARGS(func, csd)
+);
+
+#endif /* _TRACE_CSD_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/cxl.h b/include/trace/events/cxl.h
deleted file mode 100644
index ad085a2534ef..000000000000
--- a/include/trace/events/cxl.h
+++ /dev/null
@@ -1,112 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM cxl
-
-#if !defined(_CXL_EVENTS_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _CXL_EVENTS_H
-
-#include <linux/tracepoint.h>
-
-#define CXL_HEADERLOG_SIZE SZ_512
-#define CXL_HEADERLOG_SIZE_U32 SZ_512 / sizeof(u32)
-
-#define CXL_RAS_UC_CACHE_DATA_PARITY BIT(0)
-#define CXL_RAS_UC_CACHE_ADDR_PARITY BIT(1)
-#define CXL_RAS_UC_CACHE_BE_PARITY BIT(2)
-#define CXL_RAS_UC_CACHE_DATA_ECC BIT(3)
-#define CXL_RAS_UC_MEM_DATA_PARITY BIT(4)
-#define CXL_RAS_UC_MEM_ADDR_PARITY BIT(5)
-#define CXL_RAS_UC_MEM_BE_PARITY BIT(6)
-#define CXL_RAS_UC_MEM_DATA_ECC BIT(7)
-#define CXL_RAS_UC_REINIT_THRESH BIT(8)
-#define CXL_RAS_UC_RSVD_ENCODE BIT(9)
-#define CXL_RAS_UC_POISON BIT(10)
-#define CXL_RAS_UC_RECV_OVERFLOW BIT(11)
-#define CXL_RAS_UC_INTERNAL_ERR BIT(14)
-#define CXL_RAS_UC_IDE_TX_ERR BIT(15)
-#define CXL_RAS_UC_IDE_RX_ERR BIT(16)
-
-#define show_uc_errs(status) __print_flags(status, " | ", \
- { CXL_RAS_UC_CACHE_DATA_PARITY, "Cache Data Parity Error" }, \
- { CXL_RAS_UC_CACHE_ADDR_PARITY, "Cache Address Parity Error" }, \
- { CXL_RAS_UC_CACHE_BE_PARITY, "Cache Byte Enable Parity Error" }, \
- { CXL_RAS_UC_CACHE_DATA_ECC, "Cache Data ECC Error" }, \
- { CXL_RAS_UC_MEM_DATA_PARITY, "Memory Data Parity Error" }, \
- { CXL_RAS_UC_MEM_ADDR_PARITY, "Memory Address Parity Error" }, \
- { CXL_RAS_UC_MEM_BE_PARITY, "Memory Byte Enable Parity Error" }, \
- { CXL_RAS_UC_MEM_DATA_ECC, "Memory Data ECC Error" }, \
- { CXL_RAS_UC_REINIT_THRESH, "REINIT Threshold Hit" }, \
- { CXL_RAS_UC_RSVD_ENCODE, "Received Unrecognized Encoding" }, \
- { CXL_RAS_UC_POISON, "Received Poison From Peer" }, \
- { CXL_RAS_UC_RECV_OVERFLOW, "Receiver Overflow" }, \
- { CXL_RAS_UC_INTERNAL_ERR, "Component Specific Error" }, \
- { CXL_RAS_UC_IDE_TX_ERR, "IDE Tx Error" }, \
- { CXL_RAS_UC_IDE_RX_ERR, "IDE Rx Error" } \
-)
-
-TRACE_EVENT(cxl_aer_uncorrectable_error,
- TP_PROTO(const struct device *dev, u32 status, u32 fe, u32 *hl),
- TP_ARGS(dev, status, fe, hl),
- TP_STRUCT__entry(
- __string(dev_name, dev_name(dev))
- __field(u32, status)
- __field(u32, first_error)
- __array(u32, header_log, CXL_HEADERLOG_SIZE_U32)
- ),
- TP_fast_assign(
- __assign_str(dev_name, dev_name(dev));
- __entry->status = status;
- __entry->first_error = fe;
- /*
- * Embed the 512B headerlog data for user app retrieval and
- * parsing, but no need to print this in the trace buffer.
- */
- memcpy(__entry->header_log, hl, CXL_HEADERLOG_SIZE);
- ),
- TP_printk("%s: status: '%s' first_error: '%s'",
- __get_str(dev_name),
- show_uc_errs(__entry->status),
- show_uc_errs(__entry->first_error)
- )
-);
-
-#define CXL_RAS_CE_CACHE_DATA_ECC BIT(0)
-#define CXL_RAS_CE_MEM_DATA_ECC BIT(1)
-#define CXL_RAS_CE_CRC_THRESH BIT(2)
-#define CLX_RAS_CE_RETRY_THRESH BIT(3)
-#define CXL_RAS_CE_CACHE_POISON BIT(4)
-#define CXL_RAS_CE_MEM_POISON BIT(5)
-#define CXL_RAS_CE_PHYS_LAYER_ERR BIT(6)
-
-#define show_ce_errs(status) __print_flags(status, " | ", \
- { CXL_RAS_CE_CACHE_DATA_ECC, "Cache Data ECC Error" }, \
- { CXL_RAS_CE_MEM_DATA_ECC, "Memory Data ECC Error" }, \
- { CXL_RAS_CE_CRC_THRESH, "CRC Threshold Hit" }, \
- { CLX_RAS_CE_RETRY_THRESH, "Retry Threshold" }, \
- { CXL_RAS_CE_CACHE_POISON, "Received Cache Poison From Peer" }, \
- { CXL_RAS_CE_MEM_POISON, "Received Memory Poison From Peer" }, \
- { CXL_RAS_CE_PHYS_LAYER_ERR, "Received Error From Physical Layer" } \
-)
-
-TRACE_EVENT(cxl_aer_correctable_error,
- TP_PROTO(const struct device *dev, u32 status),
- TP_ARGS(dev, status),
- TP_STRUCT__entry(
- __string(dev_name, dev_name(dev))
- __field(u32, status)
- ),
- TP_fast_assign(
- __assign_str(dev_name, dev_name(dev));
- __entry->status = status;
- ),
- TP_printk("%s: status: '%s'",
- __get_str(dev_name), show_ce_errs(__entry->status)
- )
-);
-
-#endif /* _CXL_EVENTS_H */
-
-/* This part must be outside protection */
-#undef TRACE_INCLUDE_FILE
-#define TRACE_INCLUDE_FILE cxl
-#include <trace/define_trace.h>
diff --git a/include/trace/events/damon.h b/include/trace/events/damon.h
index c79f1d4c39af..852d725afea2 100644
--- a/include/trace/events/damon.h
+++ b/include/trace/events/damon.h
@@ -9,12 +9,92 @@
#include <linux/types.h>
#include <linux/tracepoint.h>
+TRACE_EVENT(damos_esz,
+
+ TP_PROTO(unsigned int context_idx, unsigned int scheme_idx,
+ unsigned long esz),
+
+ TP_ARGS(context_idx, scheme_idx, esz),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, context_idx)
+ __field(unsigned int, scheme_idx)
+ __field(unsigned long, esz)
+ ),
+
+ TP_fast_assign(
+ __entry->context_idx = context_idx;
+ __entry->scheme_idx = scheme_idx;
+ __entry->esz = esz;
+ ),
+
+ TP_printk("ctx_idx=%u scheme_idx=%u esz=%lu",
+ __entry->context_idx, __entry->scheme_idx,
+ __entry->esz)
+);
+
+TRACE_EVENT_CONDITION(damos_before_apply,
+
+ TP_PROTO(unsigned int context_idx, unsigned int scheme_idx,
+ unsigned int target_idx, struct damon_region *r,
+ unsigned int nr_regions, bool do_trace),
+
+ TP_ARGS(context_idx, scheme_idx, target_idx, r, nr_regions, do_trace),
+
+ TP_CONDITION(do_trace),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, context_idx)
+ __field(unsigned int, scheme_idx)
+ __field(unsigned long, target_idx)
+ __field(unsigned long, start)
+ __field(unsigned long, end)
+ __field(unsigned int, nr_accesses)
+ __field(unsigned int, age)
+ __field(unsigned int, nr_regions)
+ ),
+
+ TP_fast_assign(
+ __entry->context_idx = context_idx;
+ __entry->scheme_idx = scheme_idx;
+ __entry->target_idx = target_idx;
+ __entry->start = r->ar.start;
+ __entry->end = r->ar.end;
+ __entry->nr_accesses = r->nr_accesses_bp / 10000;
+ __entry->age = r->age;
+ __entry->nr_regions = nr_regions;
+ ),
+
+ TP_printk("ctx_idx=%u scheme_idx=%u target_idx=%lu nr_regions=%u %lu-%lu: %u %u",
+ __entry->context_idx, __entry->scheme_idx,
+ __entry->target_idx, __entry->nr_regions,
+ __entry->start, __entry->end,
+ __entry->nr_accesses, __entry->age)
+);
+
+TRACE_EVENT(damon_monitor_intervals_tune,
+
+ TP_PROTO(unsigned long sample_us),
+
+ TP_ARGS(sample_us),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, sample_us)
+ ),
+
+ TP_fast_assign(
+ __entry->sample_us = sample_us;
+ ),
+
+ TP_printk("sample_us=%lu", __entry->sample_us)
+);
+
TRACE_EVENT(damon_aggregated,
- TP_PROTO(struct damon_target *t, unsigned int target_id,
- struct damon_region *r, unsigned int nr_regions),
+ TP_PROTO(unsigned int target_id, struct damon_region *r,
+ unsigned int nr_regions),
- TP_ARGS(t, target_id, r, nr_regions),
+ TP_ARGS(target_id, r, nr_regions),
TP_STRUCT__entry(
__field(unsigned long, target_id)
diff --git a/include/trace/events/devfreq.h b/include/trace/events/devfreq.h
index 7627c620bbda..6cbc4d59fd96 100644
--- a/include/trace/events/devfreq.h
+++ b/include/trace/events/devfreq.h
@@ -23,7 +23,7 @@ TRACE_EVENT(devfreq_frequency,
),
TP_fast_assign(
- __assign_str(dev_name, dev_name(&devfreq->dev));
+ __assign_str(dev_name);
__entry->freq = freq;
__entry->prev_freq = prev_freq;
__entry->busy_time = devfreq->last_status.busy_time;
@@ -54,7 +54,7 @@ TRACE_EVENT(devfreq_monitor,
__entry->busy_time = devfreq->last_status.busy_time;
__entry->total_time = devfreq->last_status.total_time;
__entry->polling_ms = devfreq->profile->polling_ms;
- __assign_str(dev_name, dev_name(&devfreq->dev));
+ __assign_str(dev_name);
),
TP_printk("dev_name=%-30s freq=%-12lu polling_ms=%-3u load=%-2lu",
diff --git a/include/trace/events/devlink.h b/include/trace/events/devlink.h
index 24969184c534..f241e204fe6b 100644
--- a/include/trace/events/devlink.h
+++ b/include/trace/events/devlink.h
@@ -31,9 +31,9 @@ TRACE_EVENT(devlink_hwmsg,
),
TP_fast_assign(
- __assign_str(bus_name, devlink_to_dev(devlink)->bus->name);
- __assign_str(dev_name, dev_name(devlink_to_dev(devlink)));
- __assign_str(driver_name, devlink_to_dev(devlink)->driver->name);
+ __assign_str(bus_name);
+ __assign_str(dev_name);
+ __assign_str(driver_name);
__entry->incoming = incoming;
__entry->type = type;
memcpy(__get_dynamic_array(buf), buf, len);
@@ -63,11 +63,11 @@ TRACE_EVENT(devlink_hwerr,
),
TP_fast_assign(
- __assign_str(bus_name, devlink_to_dev(devlink)->bus->name);
- __assign_str(dev_name, dev_name(devlink_to_dev(devlink)));
- __assign_str(driver_name, devlink_to_dev(devlink)->driver->name);
+ __assign_str(bus_name);
+ __assign_str(dev_name);
+ __assign_str(driver_name);
__entry->err = err;
- __assign_str(msg, msg);
+ __assign_str(msg);
),
TP_printk("bus_name=%s dev_name=%s driver_name=%s err=%d %s",
@@ -88,16 +88,16 @@ TRACE_EVENT(devlink_health_report,
__string(bus_name, devlink_to_dev(devlink)->bus->name)
__string(dev_name, dev_name(devlink_to_dev(devlink)))
__string(driver_name, devlink_to_dev(devlink)->driver->name)
- __string(reporter_name, msg)
+ __string(reporter_name, reporter_name)
__string(msg, msg)
),
TP_fast_assign(
- __assign_str(bus_name, devlink_to_dev(devlink)->bus->name);
- __assign_str(dev_name, dev_name(devlink_to_dev(devlink)));
- __assign_str(driver_name, devlink_to_dev(devlink)->driver->name);
- __assign_str(reporter_name, reporter_name);
- __assign_str(msg, msg);
+ __assign_str(bus_name);
+ __assign_str(dev_name);
+ __assign_str(driver_name);
+ __assign_str(reporter_name);
+ __assign_str(msg);
),
TP_printk("bus_name=%s dev_name=%s driver_name=%s reporter_name=%s: %s",
@@ -125,10 +125,10 @@ TRACE_EVENT(devlink_health_recover_aborted,
),
TP_fast_assign(
- __assign_str(bus_name, devlink_to_dev(devlink)->bus->name);
- __assign_str(dev_name, dev_name(devlink_to_dev(devlink)));
- __assign_str(driver_name, devlink_to_dev(devlink)->driver->name);
- __assign_str(reporter_name, reporter_name);
+ __assign_str(bus_name);
+ __assign_str(dev_name);
+ __assign_str(driver_name);
+ __assign_str(reporter_name);
__entry->health_state = health_state;
__entry->time_since_last_recover = time_since_last_recover;
),
@@ -158,10 +158,10 @@ TRACE_EVENT(devlink_health_reporter_state_update,
),
TP_fast_assign(
- __assign_str(bus_name, devlink_to_dev(devlink)->bus->name);
- __assign_str(dev_name, dev_name(devlink_to_dev(devlink)));
- __assign_str(driver_name, devlink_to_dev(devlink)->driver->name);
- __assign_str(reporter_name, reporter_name);
+ __assign_str(bus_name);
+ __assign_str(dev_name);
+ __assign_str(driver_name);
+ __assign_str(reporter_name);
__entry->new_state = new_state;
),
@@ -192,11 +192,11 @@ TRACE_EVENT(devlink_trap_report,
TP_fast_assign(
struct net_device *input_dev = metadata->input_dev;
- __assign_str(bus_name, devlink_to_dev(devlink)->bus->name);
- __assign_str(dev_name, dev_name(devlink_to_dev(devlink)));
- __assign_str(driver_name, devlink_to_dev(devlink)->driver->name);
- __assign_str(trap_name, metadata->trap_name);
- __assign_str(trap_group_name, metadata->trap_group_name);
+ __assign_str(bus_name);
+ __assign_str(dev_name);
+ __assign_str(driver_name);
+ __assign_str(trap_name);
+ __assign_str(trap_group_name);
strscpy(__entry->input_dev_name, input_dev ? input_dev->name : "NULL", IFNAMSIZ);
),
diff --git a/include/trace/events/dlm.h b/include/trace/events/dlm.h
index 37eb79e29b28..af160082c9e3 100644
--- a/include/trace/events/dlm.h
+++ b/include/trace/events/dlm.h
@@ -7,6 +7,7 @@
#include <linux/dlm.h>
#include <linux/dlmconstants.h>
+#include <uapi/linux/dlm_plock.h>
#include <linux/tracepoint.h>
#include "../../../fs/dlm/dlm_internal.h"
@@ -47,16 +48,8 @@
{ DLM_SBF_ALTMODE, "ALTMODE" })
#define show_lkb_flags(flags) __print_flags(flags, "|", \
- { DLM_IFL_MSTCPY, "MSTCPY" }, \
- { DLM_IFL_RESEND, "RESEND" }, \
- { DLM_IFL_DEAD, "DEAD" }, \
- { DLM_IFL_OVERLAP_UNLOCK, "OVERLAP_UNLOCK" }, \
- { DLM_IFL_OVERLAP_CANCEL, "OVERLAP_CANCEL" }, \
- { DLM_IFL_ENDOFLIFE, "ENDOFLIFE" }, \
- { DLM_IFL_DEADLOCK_CANCEL, "DEADLOCK_CANCEL" }, \
- { DLM_IFL_STUB_MS, "STUB_MS" }, \
- { DLM_IFL_USER, "USER" }, \
- { DLM_IFL_ORPHAN, "ORPHAN" })
+ { BIT(DLM_DFL_USER_BIT), "USER" }, \
+ { BIT(DLM_DFL_ORPHAN_BIT), "ORPHAN" })
#define show_header_cmd(cmd) __print_symbolic(cmd, \
{ DLM_MSG, "MSG"}, \
@@ -196,29 +189,25 @@ TRACE_EVENT(dlm_lock_end,
TRACE_EVENT(dlm_bast,
- TP_PROTO(struct dlm_ls *ls, struct dlm_lkb *lkb, int mode),
+ TP_PROTO(__u32 ls_id, __u32 lkb_id, int mode,
+ const char *res_name, size_t res_length),
- TP_ARGS(ls, lkb, mode),
+ TP_ARGS(ls_id, lkb_id, mode, res_name, res_length),
TP_STRUCT__entry(
__field(__u32, ls_id)
__field(__u32, lkb_id)
__field(int, mode)
- __dynamic_array(unsigned char, res_name,
- lkb->lkb_resource ? lkb->lkb_resource->res_length : 0)
+ __dynamic_array(unsigned char, res_name, res_length)
),
TP_fast_assign(
- struct dlm_rsb *r;
-
- __entry->ls_id = ls->ls_global_id;
- __entry->lkb_id = lkb->lkb_id;
+ __entry->ls_id = ls_id;
+ __entry->lkb_id = lkb_id;
__entry->mode = mode;
- r = lkb->lkb_resource;
- if (r)
- memcpy(__get_dynamic_array(res_name), r->res_name,
- __get_dynamic_array_len(res_name));
+ memcpy(__get_dynamic_array(res_name), res_name,
+ __get_dynamic_array_len(res_name));
),
TP_printk("ls_id=%u lkb_id=%x mode=%s res_name=%s",
@@ -231,31 +220,27 @@ TRACE_EVENT(dlm_bast,
TRACE_EVENT(dlm_ast,
- TP_PROTO(struct dlm_ls *ls, struct dlm_lkb *lkb),
+ TP_PROTO(__u32 ls_id, __u32 lkb_id, __u8 sb_flags, int sb_status,
+ const char *res_name, size_t res_length),
- TP_ARGS(ls, lkb),
+ TP_ARGS(ls_id, lkb_id, sb_flags, sb_status, res_name, res_length),
TP_STRUCT__entry(
__field(__u32, ls_id)
__field(__u32, lkb_id)
- __field(u8, sb_flags)
+ __field(__u8, sb_flags)
__field(int, sb_status)
- __dynamic_array(unsigned char, res_name,
- lkb->lkb_resource ? lkb->lkb_resource->res_length : 0)
+ __dynamic_array(unsigned char, res_name, res_length)
),
TP_fast_assign(
- struct dlm_rsb *r;
+ __entry->ls_id = ls_id;
+ __entry->lkb_id = lkb_id;
+ __entry->sb_flags = sb_flags;
+ __entry->sb_status = sb_status;
- __entry->ls_id = ls->ls_global_id;
- __entry->lkb_id = lkb->lkb_id;
- __entry->sb_flags = lkb->lkb_lksb->sb_flags;
- __entry->sb_status = lkb->lkb_lksb->sb_status;
-
- r = lkb->lkb_resource;
- if (r)
- memcpy(__get_dynamic_array(res_name), r->res_name,
- __get_dynamic_array_len(res_name));
+ memcpy(__get_dynamic_array(res_name), res_name,
+ __get_dynamic_array_len(res_name));
),
TP_printk("ls_id=%u lkb_id=%x sb_flags=%s sb_status=%d res_name=%s",
@@ -593,6 +578,56 @@ TRACE_EVENT(dlm_recv_message,
);
+DECLARE_EVENT_CLASS(dlm_plock_template,
+
+ TP_PROTO(const struct dlm_plock_info *info),
+
+ TP_ARGS(info),
+
+ TP_STRUCT__entry(
+ __field(uint8_t, optype)
+ __field(uint8_t, ex)
+ __field(uint8_t, wait)
+ __field(uint8_t, flags)
+ __field(uint32_t, pid)
+ __field(int32_t, nodeid)
+ __field(int32_t, rv)
+ __field(uint32_t, fsid)
+ __field(uint64_t, number)
+ __field(uint64_t, start)
+ __field(uint64_t, end)
+ __field(uint64_t, owner)
+ ),
+
+ TP_fast_assign(
+ __entry->optype = info->optype;
+ __entry->ex = info->ex;
+ __entry->wait = info->wait;
+ __entry->flags = info->flags;
+ __entry->pid = info->pid;
+ __entry->nodeid = info->nodeid;
+ __entry->rv = info->rv;
+ __entry->fsid = info->fsid;
+ __entry->number = info->number;
+ __entry->start = info->start;
+ __entry->end = info->end;
+ __entry->owner = info->owner;
+ ),
+
+ TP_printk("fsid=%u number=%llx owner=%llx optype=%d ex=%d wait=%d flags=%x pid=%u nodeid=%d rv=%d start=%llx end=%llx",
+ __entry->fsid, __entry->number, __entry->owner,
+ __entry->optype, __entry->ex, __entry->wait,
+ __entry->flags, __entry->pid, __entry->nodeid,
+ __entry->rv, __entry->start, __entry->end)
+
+);
+
+DEFINE_EVENT(dlm_plock_template, dlm_plock_read,
+ TP_PROTO(const struct dlm_plock_info *info), TP_ARGS(info));
+
+DEFINE_EVENT(dlm_plock_template, dlm_plock_write,
+ TP_PROTO(const struct dlm_plock_info *info), TP_ARGS(info));
+
TRACE_EVENT(dlm_send,
TP_PROTO(int nodeid, int ret),
diff --git a/include/trace/events/dma.h b/include/trace/events/dma.h
new file mode 100644
index 000000000000..b3fef140ae15
--- /dev/null
+++ b/include/trace/events/dma.h
@@ -0,0 +1,475 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM dma
+
+#if !defined(_TRACE_DMA_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_DMA_H
+
+#include <linux/tracepoint.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <trace/events/mmflags.h>
+
+TRACE_DEFINE_ENUM(DMA_BIDIRECTIONAL);
+TRACE_DEFINE_ENUM(DMA_TO_DEVICE);
+TRACE_DEFINE_ENUM(DMA_FROM_DEVICE);
+TRACE_DEFINE_ENUM(DMA_NONE);
+
+#define decode_dma_data_direction(dir) \
+ __print_symbolic(dir, \
+ { DMA_BIDIRECTIONAL, "BIDIRECTIONAL" }, \
+ { DMA_TO_DEVICE, "TO_DEVICE" }, \
+ { DMA_FROM_DEVICE, "FROM_DEVICE" }, \
+ { DMA_NONE, "NONE" })
+
+#define decode_dma_attrs(attrs) \
+ __print_flags(attrs, "|", \
+ { DMA_ATTR_WEAK_ORDERING, "WEAK_ORDERING" }, \
+ { DMA_ATTR_WRITE_COMBINE, "WRITE_COMBINE" }, \
+ { DMA_ATTR_NO_KERNEL_MAPPING, "NO_KERNEL_MAPPING" }, \
+ { DMA_ATTR_SKIP_CPU_SYNC, "SKIP_CPU_SYNC" }, \
+ { DMA_ATTR_FORCE_CONTIGUOUS, "FORCE_CONTIGUOUS" }, \
+ { DMA_ATTR_ALLOC_SINGLE_PAGES, "ALLOC_SINGLE_PAGES" }, \
+ { DMA_ATTR_NO_WARN, "NO_WARN" }, \
+ { DMA_ATTR_PRIVILEGED, "PRIVILEGED" }, \
+ { DMA_ATTR_MMIO, "MMIO" })
+
+DECLARE_EVENT_CLASS(dma_map,
+ TP_PROTO(struct device *dev, phys_addr_t phys_addr, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs),
+ TP_ARGS(dev, phys_addr, dma_addr, size, dir, attrs),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(dev))
+ __field(u64, phys_addr)
+ __field(u64, dma_addr)
+ __field(size_t, size)
+ __field(enum dma_data_direction, dir)
+ __field(unsigned long, attrs)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device);
+ __entry->phys_addr = phys_addr;
+ __entry->dma_addr = dma_addr;
+ __entry->size = size;
+ __entry->dir = dir;
+ __entry->attrs = attrs;
+ ),
+
+ TP_printk("%s dir=%s dma_addr=%llx size=%zu phys_addr=%llx attrs=%s",
+ __get_str(device),
+ decode_dma_data_direction(__entry->dir),
+ __entry->dma_addr,
+ __entry->size,
+ __entry->phys_addr,
+ decode_dma_attrs(__entry->attrs))
+);
+
+#define DEFINE_MAP_EVENT(name) \
+DEFINE_EVENT(dma_map, name, \
+ TP_PROTO(struct device *dev, phys_addr_t phys_addr, dma_addr_t dma_addr, \
+ size_t size, enum dma_data_direction dir, unsigned long attrs), \
+ TP_ARGS(dev, phys_addr, dma_addr, size, dir, attrs))
+
+DEFINE_MAP_EVENT(dma_map_phys);
+
+DECLARE_EVENT_CLASS(dma_unmap,
+ TP_PROTO(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir, unsigned long attrs),
+ TP_ARGS(dev, addr, size, dir, attrs),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(dev))
+ __field(u64, addr)
+ __field(size_t, size)
+ __field(enum dma_data_direction, dir)
+ __field(unsigned long, attrs)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device);
+ __entry->addr = addr;
+ __entry->size = size;
+ __entry->dir = dir;
+ __entry->attrs = attrs;
+ ),
+
+ TP_printk("%s dir=%s dma_addr=%llx size=%zu attrs=%s",
+ __get_str(device),
+ decode_dma_data_direction(__entry->dir),
+ __entry->addr,
+ __entry->size,
+ decode_dma_attrs(__entry->attrs))
+);
+
+#define DEFINE_UNMAP_EVENT(name) \
+DEFINE_EVENT(dma_unmap, name, \
+ TP_PROTO(struct device *dev, dma_addr_t addr, size_t size, \
+ enum dma_data_direction dir, unsigned long attrs), \
+ TP_ARGS(dev, addr, size, dir, attrs))
+
+DEFINE_UNMAP_EVENT(dma_unmap_phys);
+
+DECLARE_EVENT_CLASS(dma_alloc_class,
+ TP_PROTO(struct device *dev, void *virt_addr, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction dir, gfp_t flags,
+ unsigned long attrs),
+ TP_ARGS(dev, virt_addr, dma_addr, size, dir, flags, attrs),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(dev))
+ __field(void *, virt_addr)
+ __field(u64, dma_addr)
+ __field(size_t, size)
+ __field(gfp_t, flags)
+ __field(enum dma_data_direction, dir)
+ __field(unsigned long, attrs)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device);
+ __entry->virt_addr = virt_addr;
+ __entry->dma_addr = dma_addr;
+ __entry->size = size;
+ __entry->flags = flags;
+ __entry->dir = dir;
+ __entry->attrs = attrs;
+ ),
+
+ TP_printk("%s dir=%s dma_addr=%llx size=%zu virt_addr=%p flags=%s attrs=%s",
+ __get_str(device),
+ decode_dma_data_direction(__entry->dir),
+ __entry->dma_addr,
+ __entry->size,
+ __entry->virt_addr,
+ show_gfp_flags(__entry->flags),
+ decode_dma_attrs(__entry->attrs))
+);
+
+#define DEFINE_ALLOC_EVENT(name) \
+DEFINE_EVENT(dma_alloc_class, name, \
+ TP_PROTO(struct device *dev, void *virt_addr, dma_addr_t dma_addr, \
+ size_t size, enum dma_data_direction dir, gfp_t flags, \
+ unsigned long attrs), \
+ TP_ARGS(dev, virt_addr, dma_addr, size, dir, flags, attrs))
+
+DEFINE_ALLOC_EVENT(dma_alloc);
+DEFINE_ALLOC_EVENT(dma_alloc_pages);
+DEFINE_ALLOC_EVENT(dma_alloc_sgt_err);
+
+TRACE_EVENT(dma_alloc_sgt,
+ TP_PROTO(struct device *dev, struct sg_table *sgt, size_t size,
+ enum dma_data_direction dir, gfp_t flags, unsigned long attrs),
+ TP_ARGS(dev, sgt, size, dir, flags, attrs),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(dev))
+ __dynamic_array(u64, phys_addrs, sgt->orig_nents)
+ __field(u64, dma_addr)
+ __field(size_t, size)
+ __field(enum dma_data_direction, dir)
+ __field(gfp_t, flags)
+ __field(unsigned long, attrs)
+ ),
+
+ TP_fast_assign(
+ struct scatterlist *sg;
+ int i;
+
+ __assign_str(device);
+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i)
+ ((u64 *)__get_dynamic_array(phys_addrs))[i] = sg_phys(sg);
+ __entry->dma_addr = sg_dma_address(sgt->sgl);
+ __entry->size = size;
+ __entry->dir = dir;
+ __entry->flags = flags;
+ __entry->attrs = attrs;
+ ),
+
+ TP_printk("%s dir=%s dma_addr=%llx size=%zu phys_addrs=%s flags=%s attrs=%s",
+ __get_str(device),
+ decode_dma_data_direction(__entry->dir),
+ __entry->dma_addr,
+ __entry->size,
+ __print_array(__get_dynamic_array(phys_addrs),
+ __get_dynamic_array_len(phys_addrs) /
+ sizeof(u64), sizeof(u64)),
+ show_gfp_flags(__entry->flags),
+ decode_dma_attrs(__entry->attrs))
+);
+
+DECLARE_EVENT_CLASS(dma_free_class,
+ TP_PROTO(struct device *dev, void *virt_addr, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs),
+ TP_ARGS(dev, virt_addr, dma_addr, size, dir, attrs),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(dev))
+ __field(void *, virt_addr)
+ __field(u64, dma_addr)
+ __field(size_t, size)
+ __field(enum dma_data_direction, dir)
+ __field(unsigned long, attrs)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device);
+ __entry->virt_addr = virt_addr;
+ __entry->dma_addr = dma_addr;
+ __entry->size = size;
+ __entry->dir = dir;
+ __entry->attrs = attrs;
+ ),
+
+ TP_printk("%s dir=%s dma_addr=%llx size=%zu virt_addr=%p attrs=%s",
+ __get_str(device),
+ decode_dma_data_direction(__entry->dir),
+ __entry->dma_addr,
+ __entry->size,
+ __entry->virt_addr,
+ decode_dma_attrs(__entry->attrs))
+);
+
+#define DEFINE_FREE_EVENT(name) \
+DEFINE_EVENT(dma_free_class, name, \
+ TP_PROTO(struct device *dev, void *virt_addr, dma_addr_t dma_addr, \
+ size_t size, enum dma_data_direction dir, unsigned long attrs), \
+ TP_ARGS(dev, virt_addr, dma_addr, size, dir, attrs))
+
+DEFINE_FREE_EVENT(dma_free);
+DEFINE_FREE_EVENT(dma_free_pages);
+
+TRACE_EVENT(dma_free_sgt,
+ TP_PROTO(struct device *dev, struct sg_table *sgt, size_t size,
+ enum dma_data_direction dir),
+ TP_ARGS(dev, sgt, size, dir),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(dev))
+ __dynamic_array(u64, phys_addrs, sgt->orig_nents)
+ __field(u64, dma_addr)
+ __field(size_t, size)
+ __field(enum dma_data_direction, dir)
+ ),
+
+ TP_fast_assign(
+ struct scatterlist *sg;
+ int i;
+
+ __assign_str(device);
+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i)
+ ((u64 *)__get_dynamic_array(phys_addrs))[i] = sg_phys(sg);
+ __entry->dma_addr = sg_dma_address(sgt->sgl);
+ __entry->size = size;
+ __entry->dir = dir;
+ ),
+
+ TP_printk("%s dir=%s dma_addr=%llx size=%zu phys_addrs=%s",
+ __get_str(device),
+ decode_dma_data_direction(__entry->dir),
+ __entry->dma_addr,
+ __entry->size,
+ __print_array(__get_dynamic_array(phys_addrs),
+ __get_dynamic_array_len(phys_addrs) /
+ sizeof(u64), sizeof(u64)))
+);
+
+TRACE_EVENT(dma_map_sg,
+ TP_PROTO(struct device *dev, struct scatterlist *sgl, int nents,
+ int ents, enum dma_data_direction dir, unsigned long attrs),
+ TP_ARGS(dev, sgl, nents, ents, dir, attrs),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(dev))
+ __dynamic_array(u64, phys_addrs, nents)
+ __dynamic_array(u64, dma_addrs, ents)
+ __dynamic_array(unsigned int, lengths, ents)
+ __field(enum dma_data_direction, dir)
+ __field(unsigned long, attrs)
+ ),
+
+ TP_fast_assign(
+ struct scatterlist *sg;
+ int i;
+
+ __assign_str(device);
+ for_each_sg(sgl, sg, nents, i)
+ ((u64 *)__get_dynamic_array(phys_addrs))[i] = sg_phys(sg);
+ for_each_sg(sgl, sg, ents, i) {
+ ((u64 *)__get_dynamic_array(dma_addrs))[i] =
+ sg_dma_address(sg);
+ ((unsigned int *)__get_dynamic_array(lengths))[i] =
+ sg_dma_len(sg);
+ }
+ __entry->dir = dir;
+ __entry->attrs = attrs;
+ ),
+
+ TP_printk("%s dir=%s dma_addrs=%s sizes=%s phys_addrs=%s attrs=%s",
+ __get_str(device),
+ decode_dma_data_direction(__entry->dir),
+ __print_array(__get_dynamic_array(dma_addrs),
+ __get_dynamic_array_len(dma_addrs) /
+ sizeof(u64), sizeof(u64)),
+ __print_array(__get_dynamic_array(lengths),
+ __get_dynamic_array_len(lengths) /
+ sizeof(unsigned int), sizeof(unsigned int)),
+ __print_array(__get_dynamic_array(phys_addrs),
+ __get_dynamic_array_len(phys_addrs) /
+ sizeof(u64), sizeof(u64)),
+ decode_dma_attrs(__entry->attrs))
+);
+
+TRACE_EVENT(dma_map_sg_err,
+ TP_PROTO(struct device *dev, struct scatterlist *sgl, int nents,
+ int err, enum dma_data_direction dir, unsigned long attrs),
+ TP_ARGS(dev, sgl, nents, err, dir, attrs),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(dev))
+ __dynamic_array(u64, phys_addrs, nents)
+ __field(int, err)
+ __field(enum dma_data_direction, dir)
+ __field(unsigned long, attrs)
+ ),
+
+ TP_fast_assign(
+ struct scatterlist *sg;
+ int i;
+
+ __assign_str(device);
+ for_each_sg(sgl, sg, nents, i)
+ ((u64 *)__get_dynamic_array(phys_addrs))[i] = sg_phys(sg);
+ __entry->err = err;
+ __entry->dir = dir;
+ __entry->attrs = attrs;
+ ),
+
+ TP_printk("%s dir=%s dma_addrs=%s err=%d attrs=%s",
+ __get_str(device),
+ decode_dma_data_direction(__entry->dir),
+ __print_array(__get_dynamic_array(phys_addrs),
+ __get_dynamic_array_len(phys_addrs) /
+ sizeof(u64), sizeof(u64)),
+ __entry->err,
+ decode_dma_attrs(__entry->attrs))
+);
+
+TRACE_EVENT(dma_unmap_sg,
+ TP_PROTO(struct device *dev, struct scatterlist *sgl, int nents,
+ enum dma_data_direction dir, unsigned long attrs),
+ TP_ARGS(dev, sgl, nents, dir, attrs),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(dev))
+ __dynamic_array(u64, addrs, nents)
+ __field(enum dma_data_direction, dir)
+ __field(unsigned long, attrs)
+ ),
+
+ TP_fast_assign(
+ struct scatterlist *sg;
+ int i;
+
+ __assign_str(device);
+ for_each_sg(sgl, sg, nents, i)
+ ((u64 *)__get_dynamic_array(addrs))[i] = sg_phys(sg);
+ __entry->dir = dir;
+ __entry->attrs = attrs;
+ ),
+
+ TP_printk("%s dir=%s phys_addrs=%s attrs=%s",
+ __get_str(device),
+ decode_dma_data_direction(__entry->dir),
+ __print_array(__get_dynamic_array(addrs),
+ __get_dynamic_array_len(addrs) /
+ sizeof(u64), sizeof(u64)),
+ decode_dma_attrs(__entry->attrs))
+);
+
+DECLARE_EVENT_CLASS(dma_sync_single,
+ TP_PROTO(struct device *dev, dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction dir),
+ TP_ARGS(dev, dma_addr, size, dir),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(dev))
+ __field(u64, dma_addr)
+ __field(size_t, size)
+ __field(enum dma_data_direction, dir)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device);
+ __entry->dma_addr = dma_addr;
+ __entry->size = size;
+ __entry->dir = dir;
+ ),
+
+ TP_printk("%s dir=%s dma_addr=%llx size=%zu",
+ __get_str(device),
+ decode_dma_data_direction(__entry->dir),
+ __entry->dma_addr,
+ __entry->size)
+);
+
+#define DEFINE_SYNC_SINGLE_EVENT(name) \
+DEFINE_EVENT(dma_sync_single, name, \
+ TP_PROTO(struct device *dev, dma_addr_t dma_addr, size_t size, \
+ enum dma_data_direction dir), \
+ TP_ARGS(dev, dma_addr, size, dir))
+
+DEFINE_SYNC_SINGLE_EVENT(dma_sync_single_for_cpu);
+DEFINE_SYNC_SINGLE_EVENT(dma_sync_single_for_device);
+
+DECLARE_EVENT_CLASS(dma_sync_sg,
+ TP_PROTO(struct device *dev, struct scatterlist *sgl, int nents,
+ enum dma_data_direction dir),
+ TP_ARGS(dev, sgl, nents, dir),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(dev))
+ __dynamic_array(u64, dma_addrs, nents)
+ __dynamic_array(unsigned int, lengths, nents)
+ __field(enum dma_data_direction, dir)
+ ),
+
+ TP_fast_assign(
+ struct scatterlist *sg;
+ int i;
+
+ __assign_str(device);
+ for_each_sg(sgl, sg, nents, i) {
+ ((u64 *)__get_dynamic_array(dma_addrs))[i] =
+ sg_dma_address(sg);
+ ((unsigned int *)__get_dynamic_array(lengths))[i] =
+ sg_dma_len(sg);
+ }
+ __entry->dir = dir;
+ ),
+
+ TP_printk("%s dir=%s dma_addrs=%s sizes=%s",
+ __get_str(device),
+ decode_dma_data_direction(__entry->dir),
+ __print_array(__get_dynamic_array(dma_addrs),
+ __get_dynamic_array_len(dma_addrs) /
+ sizeof(u64), sizeof(u64)),
+ __print_array(__get_dynamic_array(lengths),
+ __get_dynamic_array_len(lengths) /
+ sizeof(unsigned int), sizeof(unsigned int)))
+);
+
+#define DEFINE_SYNC_SG_EVENT(name) \
+DEFINE_EVENT(dma_sync_sg, name, \
+ TP_PROTO(struct device *dev, struct scatterlist *sg, int nents, \
+ enum dma_data_direction dir), \
+ TP_ARGS(dev, sg, nents, dir))
+
+DEFINE_SYNC_SG_EVENT(dma_sync_sg_for_cpu);
+DEFINE_SYNC_SG_EVENT(dma_sync_sg_for_device);
+
+#endif /* _TRACE_DMA_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/dma_fence.h b/include/trace/events/dma_fence.h
index 3963e79ca7b4..4814a65b68dc 100644
--- a/include/trace/events/dma_fence.h
+++ b/include/trace/events/dma_fence.h
@@ -16,6 +16,36 @@ DECLARE_EVENT_CLASS(dma_fence,
TP_ARGS(fence),
TP_STRUCT__entry(
+ __string(driver, dma_fence_driver_name(fence))
+ __string(timeline, dma_fence_timeline_name(fence))
+ __field(unsigned int, context)
+ __field(unsigned int, seqno)
+ ),
+
+ TP_fast_assign(
+ __assign_str(driver);
+ __assign_str(timeline);
+ __entry->context = fence->context;
+ __entry->seqno = fence->seqno;
+ ),
+
+ TP_printk("driver=%s timeline=%s context=%u seqno=%u",
+ __get_str(driver), __get_str(timeline), __entry->context,
+ __entry->seqno)
+);
+
+/*
+ * Safe only for call sites which are guaranteed to not race with fence
+ * signaling,holding the fence->lock and having checked for not signaled, or the
+ * signaling path itself.
+ */
+DECLARE_EVENT_CLASS(dma_fence_unsignaled,
+
+ TP_PROTO(struct dma_fence *fence),
+
+ TP_ARGS(fence),
+
+ TP_STRUCT__entry(
__string(driver, fence->ops->get_driver_name(fence))
__string(timeline, fence->ops->get_timeline_name(fence))
__field(unsigned int, context)
@@ -23,8 +53,8 @@ DECLARE_EVENT_CLASS(dma_fence,
),
TP_fast_assign(
- __assign_str(driver, fence->ops->get_driver_name(fence));
- __assign_str(timeline, fence->ops->get_timeline_name(fence));
+ __assign_str(driver);
+ __assign_str(timeline);
__entry->context = fence->context;
__entry->seqno = fence->seqno;
),
@@ -34,14 +64,14 @@ DECLARE_EVENT_CLASS(dma_fence,
__entry->seqno)
);
-DEFINE_EVENT(dma_fence, dma_fence_emit,
+DEFINE_EVENT(dma_fence_unsignaled, dma_fence_emit,
TP_PROTO(struct dma_fence *fence),
TP_ARGS(fence)
);
-DEFINE_EVENT(dma_fence, dma_fence_init,
+DEFINE_EVENT(dma_fence_unsignaled, dma_fence_init,
TP_PROTO(struct dma_fence *fence),
@@ -55,14 +85,14 @@ DEFINE_EVENT(dma_fence, dma_fence_destroy,
TP_ARGS(fence)
);
-DEFINE_EVENT(dma_fence, dma_fence_enable_signal,
+DEFINE_EVENT(dma_fence_unsignaled, dma_fence_enable_signal,
TP_PROTO(struct dma_fence *fence),
TP_ARGS(fence)
);
-DEFINE_EVENT(dma_fence, dma_fence_signaled,
+DEFINE_EVENT(dma_fence_unsignaled, dma_fence_signaled,
TP_PROTO(struct dma_fence *fence),
diff --git a/include/trace/events/erofs.h b/include/trace/events/erofs.h
index 4f4c44ea3a65..dad7360f42f9 100644
--- a/include/trace/events/erofs.h
+++ b/include/trace/events/erofs.h
@@ -19,12 +19,17 @@ struct erofs_map_blocks;
{ 1, "DIR" })
#define show_map_flags(flags) __print_flags(flags, "|", \
- { EROFS_GET_BLOCKS_RAW, "RAW" })
+ { EROFS_GET_BLOCKS_FIEMAP, "FIEMAP" }, \
+ { EROFS_GET_BLOCKS_READMORE, "READMORE" }, \
+ { EROFS_GET_BLOCKS_FINDTAIL, "FINDTAIL" })
#define show_mflags(flags) __print_flags(flags, "", \
- { EROFS_MAP_MAPPED, "M" }, \
- { EROFS_MAP_META, "I" }, \
- { EROFS_MAP_ENCODED, "E" })
+ { EROFS_MAP_MAPPED, "M" }, \
+ { EROFS_MAP_META, "I" }, \
+ { EROFS_MAP_ENCODED, "E" }, \
+ { EROFS_MAP_FULL_MAPPED, "F" }, \
+ { EROFS_MAP_FRAGMENT, "R" }, \
+ { EROFS_MAP_PARTIAL_REF, "P" })
TRACE_EVENT(erofs_lookup,
@@ -42,7 +47,7 @@ TRACE_EVENT(erofs_lookup,
TP_fast_assign(
__entry->dev = dir->i_sb->s_dev;
__entry->nid = EROFS_I(dir)->nid;
- __assign_str(name, dentry->d_name.name);
+ __assign_str(name);
__entry->flags = flags;
),
@@ -66,20 +71,20 @@ TRACE_EVENT(erofs_fill_inode,
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->nid = EROFS_I(inode)->nid;
- __entry->blkaddr = erofs_blknr(iloc(EROFS_I_SB(inode), __entry->nid));
- __entry->ofs = erofs_blkoff(iloc(EROFS_I_SB(inode), __entry->nid));
+ __entry->blkaddr = erofs_blknr(inode->i_sb, erofs_iloc(inode));
+ __entry->ofs = erofs_blkoff(inode->i_sb, erofs_iloc(inode));
),
- TP_printk("dev = (%d,%d), nid = %llu, blkaddr %u ofs %u",
+ TP_printk("dev = (%d,%d), nid = %llu, blkaddr %llu ofs %u",
show_dev_nid(__entry),
__entry->blkaddr, __entry->ofs)
);
-TRACE_EVENT(erofs_readpage,
+TRACE_EVENT(erofs_read_folio,
- TP_PROTO(struct page *page, bool raw),
+ TP_PROTO(struct folio *folio, bool raw),
- TP_ARGS(page, raw),
+ TP_ARGS(folio, raw),
TP_STRUCT__entry(
__field(dev_t, dev )
@@ -91,11 +96,11 @@ TRACE_EVENT(erofs_readpage,
),
TP_fast_assign(
- __entry->dev = page->mapping->host->i_sb->s_dev;
- __entry->nid = EROFS_I(page->mapping->host)->nid;
- __entry->dir = S_ISDIR(page->mapping->host->i_mode);
- __entry->index = page->index;
- __entry->uptodate = PageUptodate(page);
+ __entry->dev = folio->mapping->host->i_sb->s_dev;
+ __entry->nid = EROFS_I(folio->mapping->host)->nid;
+ __entry->dir = S_ISDIR(folio->mapping->host->i_mode);
+ __entry->index = folio->index;
+ __entry->uptodate = folio_test_uptodate(folio);
__entry->raw = raw;
),
@@ -108,7 +113,7 @@ TRACE_EVENT(erofs_readpage,
__entry->raw)
);
-TRACE_EVENT(erofs_readpages,
+TRACE_EVENT(erofs_readahead,
TP_PROTO(struct inode *inode, pgoff_t start, unsigned int nrpage,
bool raw),
@@ -138,7 +143,8 @@ TRACE_EVENT(erofs_readpages,
__entry->raw)
);
-DECLARE_EVENT_CLASS(erofs__map_blocks_enter,
+TRACE_EVENT(erofs_map_blocks_enter,
+
TP_PROTO(struct inode *inode, struct erofs_map_blocks *map,
unsigned int flags),
@@ -166,21 +172,8 @@ DECLARE_EVENT_CLASS(erofs__map_blocks_enter,
__entry->flags ? show_map_flags(__entry->flags) : "NULL")
);
-DEFINE_EVENT(erofs__map_blocks_enter, erofs_map_blocks_enter,
- TP_PROTO(struct inode *inode, struct erofs_map_blocks *map,
- unsigned flags),
-
- TP_ARGS(inode, map, flags)
-);
-
-DEFINE_EVENT(erofs__map_blocks_enter, z_erofs_map_blocks_iter_enter,
- TP_PROTO(struct inode *inode, struct erofs_map_blocks *map,
- unsigned int flags),
-
- TP_ARGS(inode, map, flags)
-);
+TRACE_EVENT(erofs_map_blocks_exit,
-DECLARE_EVENT_CLASS(erofs__map_blocks_exit,
TP_PROTO(struct inode *inode, struct erofs_map_blocks *map,
unsigned int flags, int ret),
@@ -218,38 +211,6 @@ DECLARE_EVENT_CLASS(erofs__map_blocks_exit,
show_mflags(__entry->mflags), __entry->ret)
);
-DEFINE_EVENT(erofs__map_blocks_exit, erofs_map_blocks_exit,
- TP_PROTO(struct inode *inode, struct erofs_map_blocks *map,
- unsigned flags, int ret),
-
- TP_ARGS(inode, map, flags, ret)
-);
-
-DEFINE_EVENT(erofs__map_blocks_exit, z_erofs_map_blocks_iter_exit,
- TP_PROTO(struct inode *inode, struct erofs_map_blocks *map,
- unsigned int flags, int ret),
-
- TP_ARGS(inode, map, flags, ret)
-);
-
-TRACE_EVENT(erofs_destroy_inode,
- TP_PROTO(struct inode *inode),
-
- TP_ARGS(inode),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( erofs_nid_t, nid )
- ),
-
- TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->nid = EROFS_I(inode)->nid;
- ),
-
- TP_printk("dev = (%d,%d), nid = %llu", show_dev_nid(__entry))
-);
-
#endif /* _TRACE_EROFS_H */
/* This part must be outside protection */
diff --git a/include/trace/events/exceptions.h b/include/trace/events/exceptions.h
new file mode 100644
index 000000000000..a631f8de8917
--- /dev/null
+++ b/include/trace/events/exceptions.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM exceptions
+
+#if !defined(_TRACE_PAGE_FAULT_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_PAGE_FAULT_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(exceptions,
+
+ TP_PROTO(unsigned long address, struct pt_regs *regs,
+ unsigned long error_code),
+
+ TP_ARGS(address, regs, error_code),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, address )
+ __field( unsigned long, ip )
+ __field( unsigned long, error_code )
+ ),
+
+ TP_fast_assign(
+ __entry->address = address;
+ __entry->ip = instruction_pointer(regs);
+ __entry->error_code = error_code;
+ ),
+
+ TP_printk("address=%ps ip=%ps error_code=0x%lx",
+ (void *)__entry->address, (void *)__entry->ip,
+ __entry->error_code) );
+
+DEFINE_EVENT(exceptions, page_fault_user,
+ TP_PROTO(unsigned long address, struct pt_regs *regs, unsigned long error_code),
+ TP_ARGS(address, regs, error_code));
+DEFINE_EVENT(exceptions, page_fault_kernel,
+ TP_PROTO(unsigned long address, struct pt_regs *regs, unsigned long error_code),
+ TP_ARGS(address, regs, error_code));
+
+#endif /* _TRACE_PAGE_FAULT_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index 77b426ae0064..fd76d14c2776 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -23,10 +23,7 @@ struct partial_cluster;
#define show_mballoc_flags(flags) __print_flags(flags, "|", \
{ EXT4_MB_HINT_MERGE, "HINT_MERGE" }, \
- { EXT4_MB_HINT_RESERVED, "HINT_RESV" }, \
- { EXT4_MB_HINT_METADATA, "HINT_MDATA" }, \
{ EXT4_MB_HINT_FIRST, "HINT_FIRST" }, \
- { EXT4_MB_HINT_BEST, "HINT_BEST" }, \
{ EXT4_MB_HINT_DATA, "HINT_DATA" }, \
{ EXT4_MB_HINT_NOPREALLOC, "HINT_NOPREALLOC" }, \
{ EXT4_MB_HINT_GROUP_ALLOC, "HINT_GRP_ALLOC" }, \
@@ -42,7 +39,7 @@ struct partial_cluster;
{ EXT4_GET_BLOCKS_CREATE, "CREATE" }, \
{ EXT4_GET_BLOCKS_UNWRIT_EXT, "UNWRIT" }, \
{ EXT4_GET_BLOCKS_DELALLOC_RESERVE, "DELALLOC" }, \
- { EXT4_GET_BLOCKS_PRE_IO, "PRE_IO" }, \
+ { EXT4_GET_BLOCKS_SPLIT_NOMERGE, "SPLIT_NOMERGE" }, \
{ EXT4_GET_BLOCKS_CONVERT, "CONVERT" }, \
{ EXT4_GET_BLOCKS_METADATA_NOFAIL, "METADATA_NOFAIL" }, \
{ EXT4_GET_BLOCKS_NO_NORMALIZE, "NO_NORMALIZE" }, \
@@ -91,9 +88,9 @@ TRACE_DEFINE_ENUM(ES_REFERENCED_B);
#define show_falloc_mode(mode) __print_flags(mode, "|", \
{ FALLOC_FL_KEEP_SIZE, "KEEP_SIZE"}, \
{ FALLOC_FL_PUNCH_HOLE, "PUNCH_HOLE"}, \
- { FALLOC_FL_NO_HIDE_STALE, "NO_HIDE_STALE"}, \
{ FALLOC_FL_COLLAPSE_RANGE, "COLLAPSE_RANGE"}, \
- { FALLOC_FL_ZERO_RANGE, "ZERO_RANGE"})
+ { FALLOC_FL_ZERO_RANGE, "ZERO_RANGE"}, \
+ { FALLOC_FL_WRITE_ZEROES, "WRITE_ZEROES"})
TRACE_DEFINE_ENUM(EXT4_FC_REASON_XATTR);
TRACE_DEFINE_ENUM(EXT4_FC_REASON_CROSS_RENAME);
@@ -120,6 +117,20 @@ TRACE_DEFINE_ENUM(EXT4_FC_REASON_MAX);
{ EXT4_FC_REASON_INODE_JOURNAL_DATA, "INODE_JOURNAL_DATA"}, \
{ EXT4_FC_REASON_ENCRYPTED_FILENAME, "ENCRYPTED_FILENAME"})
+TRACE_DEFINE_ENUM(CR_POWER2_ALIGNED);
+TRACE_DEFINE_ENUM(CR_GOAL_LEN_FAST);
+TRACE_DEFINE_ENUM(CR_BEST_AVAIL_LEN);
+TRACE_DEFINE_ENUM(CR_GOAL_LEN_SLOW);
+TRACE_DEFINE_ENUM(CR_ANY_FREE);
+
+#define show_criteria(cr) \
+ __print_symbolic(cr, \
+ { CR_POWER2_ALIGNED, "CR_POWER2_ALIGNED" }, \
+ { CR_GOAL_LEN_FAST, "CR_GOAL_LEN_FAST" }, \
+ { CR_BEST_AVAIL_LEN, "CR_BEST_AVAIL_LEN" }, \
+ { CR_GOAL_LEN_SLOW, "CR_GOAL_LEN_SLOW" }, \
+ { CR_ANY_FREE, "CR_ANY_FREE" })
+
TRACE_EVENT(ext4_other_inode_update_time,
TP_PROTO(struct inode *inode, ino_t orig_ino),
@@ -469,16 +480,17 @@ TRACE_EVENT(ext4_writepages,
(unsigned long) __entry->writeback_index)
);
-TRACE_EVENT(ext4_da_write_pages,
- TP_PROTO(struct inode *inode, pgoff_t first_page,
+TRACE_EVENT(ext4_da_write_folios_start,
+ TP_PROTO(struct inode *inode, loff_t start_pos, loff_t next_pos,
struct writeback_control *wbc),
- TP_ARGS(inode, first_page, wbc),
+ TP_ARGS(inode, start_pos, next_pos, wbc),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
- __field( pgoff_t, first_page )
+ __field( loff_t, start_pos )
+ __field( loff_t, next_pos )
__field( long, nr_to_write )
__field( int, sync_mode )
),
@@ -486,18 +498,48 @@ TRACE_EVENT(ext4_da_write_pages,
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
- __entry->first_page = first_page;
+ __entry->start_pos = start_pos;
+ __entry->next_pos = next_pos;
__entry->nr_to_write = wbc->nr_to_write;
__entry->sync_mode = wbc->sync_mode;
),
- TP_printk("dev %d,%d ino %lu first_page %lu nr_to_write %ld "
- "sync_mode %d",
+ TP_printk("dev %d,%d ino %lu start_pos 0x%llx next_pos 0x%llx nr_to_write %ld sync_mode %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino, __entry->first_page,
+ (unsigned long) __entry->ino, __entry->start_pos, __entry->next_pos,
__entry->nr_to_write, __entry->sync_mode)
);
+TRACE_EVENT(ext4_da_write_folios_end,
+ TP_PROTO(struct inode *inode, loff_t start_pos, loff_t next_pos,
+ struct writeback_control *wbc, int ret),
+
+ TP_ARGS(inode, start_pos, next_pos, wbc, ret),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( ino_t, ino )
+ __field( loff_t, start_pos )
+ __field( loff_t, next_pos )
+ __field( long, nr_to_write )
+ __field( int, ret )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->start_pos = start_pos;
+ __entry->next_pos = next_pos;
+ __entry->nr_to_write = wbc->nr_to_write;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("dev %d,%d ino %lu start_pos 0x%llx next_pos 0x%llx nr_to_write %ld ret %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino, __entry->start_pos, __entry->next_pos,
+ __entry->nr_to_write, __entry->ret)
+);
+
TRACE_EVENT(ext4_da_write_pages_extent,
TP_PROTO(struct inode *inode, struct ext4_map_blocks *map),
@@ -560,10 +602,10 @@ TRACE_EVENT(ext4_writepages_result,
(unsigned long) __entry->writeback_index)
);
-DECLARE_EVENT_CLASS(ext4__page_op,
- TP_PROTO(struct page *page),
+DECLARE_EVENT_CLASS(ext4__folio_op,
+ TP_PROTO(struct inode *inode, struct folio *folio),
- TP_ARGS(page),
+ TP_ARGS(inode, folio),
TP_STRUCT__entry(
__field( dev_t, dev )
@@ -573,36 +615,29 @@ DECLARE_EVENT_CLASS(ext4__page_op,
),
TP_fast_assign(
- __entry->dev = page->mapping->host->i_sb->s_dev;
- __entry->ino = page->mapping->host->i_ino;
- __entry->index = page->index;
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->index = folio->index;
),
- TP_printk("dev %d,%d ino %lu page_index %lu",
+ TP_printk("dev %d,%d ino %lu folio_index %lu",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
(unsigned long) __entry->index)
);
-DEFINE_EVENT(ext4__page_op, ext4_writepage,
+DEFINE_EVENT(ext4__folio_op, ext4_read_folio,
- TP_PROTO(struct page *page),
+ TP_PROTO(struct inode *inode, struct folio *folio),
- TP_ARGS(page)
+ TP_ARGS(inode, folio)
);
-DEFINE_EVENT(ext4__page_op, ext4_readpage,
+DEFINE_EVENT(ext4__folio_op, ext4_release_folio,
- TP_PROTO(struct page *page),
+ TP_PROTO(struct inode *inode, struct folio *folio),
- TP_ARGS(page)
-);
-
-DEFINE_EVENT(ext4__page_op, ext4_releasepage,
-
- TP_PROTO(struct page *page),
-
- TP_ARGS(page)
+ TP_ARGS(inode, folio)
);
DECLARE_EVENT_CLASS(ext4_invalidate_folio_op,
@@ -765,15 +800,14 @@ TRACE_EVENT(ext4_mb_release_group_pa,
);
TRACE_EVENT(ext4_discard_preallocations,
- TP_PROTO(struct inode *inode, unsigned int len, unsigned int needed),
+ TP_PROTO(struct inode *inode, unsigned int len),
- TP_ARGS(inode, len, needed),
+ TP_ARGS(inode, len),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
__field( unsigned int, len )
- __field( unsigned int, needed )
),
@@ -781,13 +815,11 @@ TRACE_EVENT(ext4_discard_preallocations,
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
__entry->len = len;
- __entry->needed = needed;
),
- TP_printk("dev %d,%d ino %lu len: %u needed %u",
+ TP_printk("dev %d,%d ino %lu len: %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino, __entry->len,
- __entry->needed)
+ (unsigned long) __entry->ino, __entry->len)
);
TRACE_EVENT(ext4_mb_discard_preallocations,
@@ -1070,7 +1102,7 @@ TRACE_EVENT(ext4_mballoc_alloc,
),
TP_printk("dev %d,%d inode %lu orig %u/%d/%u@%u goal %u/%d/%u@%u "
- "result %u/%d/%u@%u blks %u grps %u cr %u flags %s "
+ "result %u/%d/%u@%u blks %u grps %u cr %s flags %s "
"tail %u broken %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
@@ -1080,7 +1112,7 @@ TRACE_EVENT(ext4_mballoc_alloc,
__entry->goal_len, __entry->goal_logical,
__entry->result_group, __entry->result_start,
__entry->result_len, __entry->result_logical,
- __entry->found, __entry->groups, __entry->cr,
+ __entry->found, __entry->groups, show_criteria(__entry->cr),
show_mballoc_flags(__entry->flags), __entry->tail,
__entry->buddy ? 1 << __entry->buddy : 0)
);
@@ -1242,14 +1274,15 @@ TRACE_EVENT(ext4_da_update_reserve_space,
);
TRACE_EVENT(ext4_da_reserve_space,
- TP_PROTO(struct inode *inode),
+ TP_PROTO(struct inode *inode, int nr_resv),
- TP_ARGS(inode),
+ TP_ARGS(inode, nr_resv),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
__field( __u64, i_blocks )
+ __field( int, reserve_blocks )
__field( int, reserved_data_blocks )
__field( __u16, mode )
),
@@ -1258,16 +1291,17 @@ TRACE_EVENT(ext4_da_reserve_space,
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
__entry->i_blocks = inode->i_blocks;
+ __entry->reserve_blocks = nr_resv;
__entry->reserved_data_blocks = EXT4_I(inode)->i_reserved_data_blocks;
__entry->mode = inode->i_mode;
),
- TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu "
+ TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu reserve_blocks %d"
"reserved_data_blocks %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
__entry->mode, __entry->i_blocks,
- __entry->reserved_data_blocks)
+ __entry->reserve_blocks, __entry->reserved_data_blocks)
);
TRACE_EVENT(ext4_da_release_space,
@@ -2176,7 +2210,8 @@ DECLARE_EVENT_CLASS(ext4__es_extent,
__field( ext4_lblk_t, lblk )
__field( ext4_lblk_t, len )
__field( ext4_fsblk_t, pblk )
- __field( char, status )
+ __field( char, status )
+ __field( u64, seq )
),
TP_fast_assign(
@@ -2186,13 +2221,15 @@ DECLARE_EVENT_CLASS(ext4__es_extent,
__entry->len = es->es_len;
__entry->pblk = ext4_es_show_pblock(es);
__entry->status = ext4_es_status(es);
+ __entry->seq = EXT4_I(inode)->i_es_seq;
),
- TP_printk("dev %d,%d ino %lu es [%u/%u) mapped %llu status %s",
+ TP_printk("dev %d,%d ino %lu es [%u/%u) mapped %llu status %s seq %llu",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
__entry->lblk, __entry->len,
- __entry->pblk, show_extent_status(__entry->status))
+ __entry->pblk, show_extent_status(__entry->status),
+ __entry->seq)
);
DEFINE_EVENT(ext4__es_extent, ext4_es_insert_extent,
@@ -2217,6 +2254,7 @@ TRACE_EVENT(ext4_es_remove_extent,
__field( ino_t, ino )
__field( loff_t, lblk )
__field( loff_t, len )
+ __field( u64, seq )
),
TP_fast_assign(
@@ -2224,12 +2262,13 @@ TRACE_EVENT(ext4_es_remove_extent,
__entry->ino = inode->i_ino;
__entry->lblk = lblk;
__entry->len = len;
+ __entry->seq = EXT4_I(inode)->i_es_seq;
),
- TP_printk("dev %d,%d ino %lu es [%lld/%lld)",
+ TP_printk("dev %d,%d ino %lu es [%lld/%lld) seq %llu",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
- __entry->lblk, __entry->len)
+ __entry->lblk, __entry->len, __entry->seq)
);
TRACE_EVENT(ext4_es_find_extent_range_enter,
@@ -2474,11 +2513,11 @@ TRACE_EVENT(ext4_es_shrink,
__entry->scan_time, __entry->nr_skipped, __entry->retried)
);
-TRACE_EVENT(ext4_es_insert_delayed_block,
+TRACE_EVENT(ext4_es_insert_delayed_extent,
TP_PROTO(struct inode *inode, struct extent_status *es,
- bool allocated),
+ bool lclu_allocated, bool end_allocated),
- TP_ARGS(inode, es, allocated),
+ TP_ARGS(inode, es, lclu_allocated, end_allocated),
TP_STRUCT__entry(
__field( dev_t, dev )
@@ -2487,7 +2526,9 @@ TRACE_EVENT(ext4_es_insert_delayed_block,
__field( ext4_lblk_t, len )
__field( ext4_fsblk_t, pblk )
__field( char, status )
- __field( bool, allocated )
+ __field( bool, lclu_allocated )
+ __field( bool, end_allocated )
+ __field( u64, seq )
),
TP_fast_assign(
@@ -2497,16 +2538,18 @@ TRACE_EVENT(ext4_es_insert_delayed_block,
__entry->len = es->es_len;
__entry->pblk = ext4_es_show_pblock(es);
__entry->status = ext4_es_status(es);
- __entry->allocated = allocated;
+ __entry->lclu_allocated = lclu_allocated;
+ __entry->end_allocated = end_allocated;
+ __entry->seq = EXT4_I(inode)->i_es_seq;
),
- TP_printk("dev %d,%d ino %lu es [%u/%u) mapped %llu status %s "
- "allocated %d",
+ TP_printk("dev %d,%d ino %lu es [%u/%u) mapped %llu status %s allocated %d %d seq %llu",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
__entry->lblk, __entry->len,
__entry->pblk, show_extent_status(__entry->status),
- __entry->allocated)
+ __entry->lclu_allocated, __entry->end_allocated,
+ __entry->seq)
);
/* fsmap traces */
@@ -2973,6 +3016,80 @@ TRACE_EVENT(ext4_update_sb,
__entry->fsblk, __entry->flags)
);
+TRACE_EVENT(ext4_move_extent_enter,
+ TP_PROTO(struct inode *orig_inode, struct ext4_map_blocks *orig_map,
+ struct inode *donor_inode, ext4_lblk_t donor_lblk),
+
+ TP_ARGS(orig_inode, orig_map, donor_inode, donor_lblk),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, orig_ino)
+ __field(ext4_lblk_t, orig_lblk)
+ __field(unsigned int, orig_flags)
+ __field(ino_t, donor_ino)
+ __field(ext4_lblk_t, donor_lblk)
+ __field(unsigned int, len)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = orig_inode->i_sb->s_dev;
+ __entry->orig_ino = orig_inode->i_ino;
+ __entry->orig_lblk = orig_map->m_lblk;
+ __entry->orig_flags = orig_map->m_flags;
+ __entry->donor_ino = donor_inode->i_ino;
+ __entry->donor_lblk = donor_lblk;
+ __entry->len = orig_map->m_len;
+ ),
+
+ TP_printk("dev %d,%d origin ino %lu lblk %u flags %s donor ino %lu lblk %u len %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->orig_ino, __entry->orig_lblk,
+ show_mflags(__entry->orig_flags),
+ (unsigned long) __entry->donor_ino, __entry->donor_lblk,
+ __entry->len)
+);
+
+TRACE_EVENT(ext4_move_extent_exit,
+ TP_PROTO(struct inode *orig_inode, ext4_lblk_t orig_lblk,
+ struct inode *donor_inode, ext4_lblk_t donor_lblk,
+ unsigned int m_len, u64 move_len, int move_type, int ret),
+
+ TP_ARGS(orig_inode, orig_lblk, donor_inode, donor_lblk, m_len,
+ move_len, move_type, ret),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, orig_ino)
+ __field(ext4_lblk_t, orig_lblk)
+ __field(ino_t, donor_ino)
+ __field(ext4_lblk_t, donor_lblk)
+ __field(unsigned int, m_len)
+ __field(u64, move_len)
+ __field(int, move_type)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = orig_inode->i_sb->s_dev;
+ __entry->orig_ino = orig_inode->i_ino;
+ __entry->orig_lblk = orig_lblk;
+ __entry->donor_ino = donor_inode->i_ino;
+ __entry->donor_lblk = donor_lblk;
+ __entry->m_len = m_len;
+ __entry->move_len = move_len;
+ __entry->move_type = move_type;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("dev %d,%d origin ino %lu lblk %u donor ino %lu lblk %u m_len %u, move_len %llu type %d ret %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->orig_ino, __entry->orig_lblk,
+ (unsigned long) __entry->donor_ino, __entry->donor_lblk,
+ __entry->m_len, __entry->move_len, __entry->move_type,
+ __entry->ret)
+);
+
#endif /* _TRACE_EXT4_H */
/* This part must be outside protection */
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index 31d994e6b4ca..df4017dcc701 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -50,6 +50,9 @@ TRACE_DEFINE_ENUM(CP_PAUSE);
TRACE_DEFINE_ENUM(CP_RESIZE);
TRACE_DEFINE_ENUM(EX_READ);
TRACE_DEFINE_ENUM(EX_BLOCK_AGE);
+TRACE_DEFINE_ENUM(CP_PHASE_START_BLOCK_OPS);
+TRACE_DEFINE_ENUM(CP_PHASE_FINISH_BLOCK_OPS);
+TRACE_DEFINE_ENUM(CP_PHASE_FINISH_CHECKPOINT);
#define show_block_type(type) \
__print_symbolic(type, \
@@ -139,7 +142,8 @@ TRACE_DEFINE_ENUM(EX_BLOCK_AGE);
{ CP_NODE_NEED_CP, "node needs cp" }, \
{ CP_FASTBOOT_MODE, "fastboot mode" }, \
{ CP_SPEC_LOG_NUM, "log type is 2" }, \
- { CP_RECOVER_DIR, "dir needs recovery" })
+ { CP_RECOVER_DIR, "dir needs recovery" }, \
+ { CP_XATTR_DIR, "dir's xattr updated" })
#define show_shutdown_mode(type) \
__print_symbolic(type, \
@@ -161,6 +165,25 @@ TRACE_DEFINE_ENUM(EX_BLOCK_AGE);
{ EX_READ, "Read" }, \
{ EX_BLOCK_AGE, "Block Age" })
+#define show_inode_type(x) \
+ __print_symbolic(x, \
+ { S_IFLNK, "symbolic" }, \
+ { S_IFREG, "regular" }, \
+ { S_IFDIR, "directory" }, \
+ { S_IFCHR, "character" }, \
+ { S_IFBLK, "block" }, \
+ { S_IFIFO, "fifo" }, \
+ { S_IFSOCK, "sock" })
+
+#define S_ALL_PERM (S_ISUID | S_ISGID | S_ISVTX | \
+ S_IRWXU | S_IRWXG | S_IRWXO)
+
+#define show_cp_phase(phase) \
+ __print_symbolic(phase, \
+ { CP_PHASE_START_BLOCK_OPS, "start block_ops" }, \
+ { CP_PHASE_FINISH_BLOCK_OPS, "finish block_ops" }, \
+ { CP_PHASE_FINISH_CHECKPOINT, "finish checkpoint" })
+
struct f2fs_sb_info;
struct f2fs_io_info;
struct extent_info;
@@ -190,7 +213,7 @@ DECLARE_EVENT_CLASS(f2fs__inode,
__entry->pino = F2FS_I(inode)->i_pino;
__entry->mode = inode->i_mode;
__entry->nlink = inode->i_nlink;
- __entry->size = inode->i_size;
+ __entry->size = i_size_read(inode);
__entry->blocks = inode->i_blocks;
__entry->advise = F2FS_I(inode)->i_advise;
),
@@ -215,17 +238,21 @@ DECLARE_EVENT_CLASS(f2fs__inode_exit,
TP_STRUCT__entry(
__field(dev_t, dev)
__field(ino_t, ino)
+ __field(umode_t, mode)
__field(int, ret)
),
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
+ __entry->mode = inode->i_mode;
__entry->ret = ret;
),
- TP_printk("dev = (%d,%d), ino = %lu, ret = %d",
+ TP_printk("dev = (%d,%d), ino = %lu, type: %s, mode = 0%o, ret = %d",
show_dev_ino(__entry),
+ show_inode_type(__entry->mode & S_IFMT),
+ __entry->mode & S_ALL_PERM,
__entry->ret)
);
@@ -335,9 +362,9 @@ TRACE_EVENT(f2fs_unlink_enter,
TP_fast_assign(
__entry->dev = dir->i_sb->s_dev;
__entry->ino = dir->i_ino;
- __entry->size = dir->i_size;
+ __entry->size = i_size_read(dir);
__entry->blocks = dir->i_blocks;
- __assign_str(name, dentry->d_name.name);
+ __assign_str(name);
),
TP_printk("dev = (%d,%d), dir ino = %lu, i_size = %lld, "
@@ -415,7 +442,7 @@ DECLARE_EVENT_CLASS(f2fs__truncate_op,
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
- __entry->size = inode->i_size;
+ __entry->size = i_size_read(inode);
__entry->blocks = inode->i_blocks;
__entry->from = from;
),
@@ -512,7 +539,7 @@ TRACE_EVENT(f2fs_truncate_partial_nodes,
TP_STRUCT__entry(
__field(dev_t, dev)
__field(ino_t, ino)
- __field(nid_t, nid[3])
+ __array(nid_t, nid, 3)
__field(int, depth)
__field(int, err)
),
@@ -568,11 +595,43 @@ TRACE_EVENT(f2fs_file_write_iter,
__entry->ret)
);
+TRACE_EVENT(f2fs_fadvise,
+
+ TP_PROTO(struct inode *inode, loff_t offset, loff_t len, int advice),
+
+ TP_ARGS(inode, offset, len, advice),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(loff_t, size)
+ __field(loff_t, offset)
+ __field(loff_t, len)
+ __field(int, advice)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->size = i_size_read(inode);
+ __entry->offset = offset;
+ __entry->len = len;
+ __entry->advice = advice;
+ ),
+
+ TP_printk("dev = (%d,%d), ino = %lu, i_size = %lld offset:%llu, len:%llu, advise:%d",
+ show_dev_ino(__entry),
+ (unsigned long long)__entry->size,
+ __entry->offset,
+ __entry->len,
+ __entry->advice)
+);
+
TRACE_EVENT(f2fs_map_blocks,
- TP_PROTO(struct inode *inode, struct f2fs_map_blocks *map,
- int create, int flag, int ret),
+ TP_PROTO(struct inode *inode, struct f2fs_map_blocks *map, int flag,
+ int ret),
- TP_ARGS(inode, map, create, flag, ret),
+ TP_ARGS(inode, map, flag, ret),
TP_STRUCT__entry(
__field(dev_t, dev)
@@ -584,7 +643,6 @@ TRACE_EVENT(f2fs_map_blocks,
__field(int, m_seg_type)
__field(bool, m_may_create)
__field(bool, m_multidev_dio)
- __field(int, create)
__field(int, flag)
__field(int, ret)
),
@@ -599,7 +657,6 @@ TRACE_EVENT(f2fs_map_blocks,
__entry->m_seg_type = map->m_seg_type;
__entry->m_may_create = map->m_may_create;
__entry->m_multidev_dio = map->m_multidev_dio;
- __entry->create = create;
__entry->flag = flag;
__entry->ret = ret;
),
@@ -607,7 +664,7 @@ TRACE_EVENT(f2fs_map_blocks,
TP_printk("dev = (%d,%d), ino = %lu, file offset = %llu, "
"start blkaddr = 0x%llx, len = 0x%llx, flags = %u, "
"seg_type = %d, may_create = %d, multidevice = %d, "
- "create = %d, flag = %d, err = %d",
+ "flag = %d, err = %d",
show_dev_ino(__entry),
(unsigned long long)__entry->m_lblk,
(unsigned long long)__entry->m_pblk,
@@ -616,7 +673,6 @@ TRACE_EVENT(f2fs_map_blocks,
__entry->m_seg_type,
__entry->m_may_create,
__entry->m_multidev_dio,
- __entry->create,
__entry->flag,
__entry->ret)
);
@@ -829,7 +885,7 @@ TRACE_EVENT(f2fs_lookup_start,
TP_fast_assign(
__entry->dev = dir->i_sb->s_dev;
__entry->ino = dir->i_ino;
- __assign_str(name, dentry->d_name.name);
+ __assign_str(name);
__entry->flags = flags;
),
@@ -857,7 +913,7 @@ TRACE_EVENT(f2fs_lookup_end,
TP_fast_assign(
__entry->dev = dir->i_sb->s_dev;
__entry->ino = dir->i_ino;
- __assign_str(name, dentry->d_name.name);
+ __assign_str(name);
__entry->cino = ino;
__entry->err = err;
),
@@ -869,6 +925,75 @@ TRACE_EVENT(f2fs_lookup_end,
__entry->err)
);
+TRACE_EVENT(f2fs_rename_start,
+
+ TP_PROTO(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry,
+ unsigned int flags),
+
+ TP_ARGS(old_dir, old_dentry, new_dir, new_dentry, flags),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __string(old_name, old_dentry->d_name.name)
+ __field(ino_t, new_pino)
+ __string(new_name, new_dentry->d_name.name)
+ __field(unsigned int, flags)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = old_dir->i_sb->s_dev;
+ __entry->ino = old_dir->i_ino;
+ __assign_str(old_name);
+ __entry->new_pino = new_dir->i_ino;
+ __assign_str(new_name);
+ __entry->flags = flags;
+ ),
+
+ TP_printk("dev = (%d,%d), old_dir = %lu, old_name: %s, "
+ "new_dir = %lu, new_name: %s, flags = %u",
+ show_dev_ino(__entry),
+ __get_str(old_name),
+ __entry->new_pino,
+ __get_str(new_name),
+ __entry->flags)
+);
+
+TRACE_EVENT(f2fs_rename_end,
+
+ TP_PROTO(struct dentry *old_dentry, struct dentry *new_dentry,
+ unsigned int flags, int ret),
+
+ TP_ARGS(old_dentry, new_dentry, flags, ret),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __string(old_name, old_dentry->d_name.name)
+ __string(new_name, new_dentry->d_name.name)
+ __field(unsigned int, flags)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = old_dentry->d_sb->s_dev;
+ __entry->ino = old_dentry->d_inode->i_ino;
+ __assign_str(old_name);
+ __assign_str(new_name);
+ __entry->flags = flags;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("dev = (%d,%d), ino = %lu, old_name: %s, "
+ "new_name: %s, flags = %u, ret = %d",
+ show_dev_ino(__entry),
+ __get_str(old_name),
+ __get_str(new_name),
+ __entry->flags,
+ __entry->ret)
+);
+
TRACE_EVENT(f2fs_readdir,
TP_PROTO(struct inode *dir, loff_t start_pos, loff_t end_pos, int err),
@@ -922,7 +1047,7 @@ TRACE_EVENT(f2fs_fallocate,
__entry->mode = mode;
__entry->offset = offset;
__entry->len = len;
- __entry->size = inode->i_size;
+ __entry->size = i_size_read(inode);
__entry->blocks = inode->i_blocks;
__entry->ret = ret;
),
@@ -1035,11 +1160,11 @@ TRACE_EVENT(f2fs_reserve_new_blocks,
(unsigned long long)__entry->count)
);
-DECLARE_EVENT_CLASS(f2fs__submit_page_bio,
+DECLARE_EVENT_CLASS(f2fs__submit_folio_bio,
- TP_PROTO(struct page *page, struct f2fs_io_info *fio),
+ TP_PROTO(struct folio *folio, struct f2fs_io_info *fio),
- TP_ARGS(page, fio),
+ TP_ARGS(folio, fio),
TP_STRUCT__entry(
__field(dev_t, dev)
@@ -1054,9 +1179,9 @@ DECLARE_EVENT_CLASS(f2fs__submit_page_bio,
),
TP_fast_assign(
- __entry->dev = page_file_mapping(page)->host->i_sb->s_dev;
- __entry->ino = page_file_mapping(page)->host->i_ino;
- __entry->index = page->index;
+ __entry->dev = folio->mapping->host->i_sb->s_dev;
+ __entry->ino = folio->mapping->host->i_ino;
+ __entry->index = folio->index;
__entry->old_blkaddr = fio->old_blkaddr;
__entry->new_blkaddr = fio->new_blkaddr;
__entry->op = fio->op;
@@ -1065,7 +1190,7 @@ DECLARE_EVENT_CLASS(f2fs__submit_page_bio,
__entry->type = fio->type;
),
- TP_printk("dev = (%d,%d), ino = %lu, page_index = 0x%lx, "
+ TP_printk("dev = (%d,%d), ino = %lu, folio_index = 0x%lx, "
"oldaddr = 0x%llx, newaddr = 0x%llx, rw = %s(%s), type = %s_%s",
show_dev_ino(__entry),
(unsigned long)__entry->index,
@@ -1076,22 +1201,22 @@ DECLARE_EVENT_CLASS(f2fs__submit_page_bio,
show_block_type(__entry->type))
);
-DEFINE_EVENT_CONDITION(f2fs__submit_page_bio, f2fs_submit_page_bio,
+DEFINE_EVENT_CONDITION(f2fs__submit_folio_bio, f2fs_submit_folio_bio,
- TP_PROTO(struct page *page, struct f2fs_io_info *fio),
+ TP_PROTO(struct folio *folio, struct f2fs_io_info *fio),
- TP_ARGS(page, fio),
+ TP_ARGS(folio, fio),
- TP_CONDITION(page->mapping)
+ TP_CONDITION(folio->mapping)
);
-DEFINE_EVENT_CONDITION(f2fs__submit_page_bio, f2fs_submit_page_write,
+DEFINE_EVENT_CONDITION(f2fs__submit_folio_bio, f2fs_submit_folio_write,
- TP_PROTO(struct page *page, struct f2fs_io_info *fio),
+ TP_PROTO(struct folio *folio, struct f2fs_io_info *fio),
- TP_ARGS(page, fio),
+ TP_ARGS(folio, fio),
- TP_CONDITION(page->mapping)
+ TP_CONDITION(folio->mapping)
);
DECLARE_EVENT_CLASS(f2fs__bio,
@@ -1221,11 +1346,11 @@ TRACE_EVENT(f2fs_write_end,
__entry->copied)
);
-DECLARE_EVENT_CLASS(f2fs__page,
+DECLARE_EVENT_CLASS(f2fs__folio,
- TP_PROTO(struct page *page, int type),
+ TP_PROTO(struct folio *folio, int type),
- TP_ARGS(page, type),
+ TP_ARGS(folio, type),
TP_STRUCT__entry(
__field(dev_t, dev)
@@ -1238,14 +1363,13 @@ DECLARE_EVENT_CLASS(f2fs__page,
),
TP_fast_assign(
- __entry->dev = page_file_mapping(page)->host->i_sb->s_dev;
- __entry->ino = page_file_mapping(page)->host->i_ino;
+ __entry->dev = folio->mapping->host->i_sb->s_dev;
+ __entry->ino = folio->mapping->host->i_ino;
__entry->type = type;
- __entry->dir =
- S_ISDIR(page_file_mapping(page)->host->i_mode);
- __entry->index = page->index;
- __entry->dirty = PageDirty(page);
- __entry->uptodate = PageUptodate(page);
+ __entry->dir = S_ISDIR(folio->mapping->host->i_mode);
+ __entry->index = folio->index;
+ __entry->dirty = folio_test_dirty(folio);
+ __entry->uptodate = folio_test_uptodate(folio);
),
TP_printk("dev = (%d,%d), ino = %lu, %s, %s, index = %lu, "
@@ -1258,65 +1382,115 @@ DECLARE_EVENT_CLASS(f2fs__page,
__entry->uptodate)
);
-DEFINE_EVENT(f2fs__page, f2fs_writepage,
+DEFINE_EVENT(f2fs__folio, f2fs_writepage,
- TP_PROTO(struct page *page, int type),
+ TP_PROTO(struct folio *folio, int type),
- TP_ARGS(page, type)
+ TP_ARGS(folio, type)
);
-DEFINE_EVENT(f2fs__page, f2fs_do_write_data_page,
+DEFINE_EVENT(f2fs__folio, f2fs_do_write_data_page,
- TP_PROTO(struct page *page, int type),
+ TP_PROTO(struct folio *folio, int type),
- TP_ARGS(page, type)
+ TP_ARGS(folio, type)
);
-DEFINE_EVENT(f2fs__page, f2fs_readpage,
+DEFINE_EVENT(f2fs__folio, f2fs_readpage,
- TP_PROTO(struct page *page, int type),
+ TP_PROTO(struct folio *folio, int type),
- TP_ARGS(page, type)
+ TP_ARGS(folio, type)
);
-DEFINE_EVENT(f2fs__page, f2fs_set_page_dirty,
+DEFINE_EVENT(f2fs__folio, f2fs_set_page_dirty,
- TP_PROTO(struct page *page, int type),
+ TP_PROTO(struct folio *folio, int type),
- TP_ARGS(page, type)
+ TP_ARGS(folio, type)
);
-DEFINE_EVENT(f2fs__page, f2fs_vm_page_mkwrite,
+TRACE_EVENT(f2fs_replace_atomic_write_block,
+
+ TP_PROTO(struct inode *inode, struct inode *cow_inode, pgoff_t index,
+ block_t old_addr, block_t new_addr, bool recovery),
+
+ TP_ARGS(inode, cow_inode, index, old_addr, new_addr, recovery),
- TP_PROTO(struct page *page, int type),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(ino_t, cow_ino)
+ __field(pgoff_t, index)
+ __field(block_t, old_addr)
+ __field(block_t, new_addr)
+ __field(bool, recovery)
+ ),
- TP_ARGS(page, type)
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->cow_ino = cow_inode->i_ino;
+ __entry->index = index;
+ __entry->old_addr = old_addr;
+ __entry->new_addr = new_addr;
+ __entry->recovery = recovery;
+ ),
+
+ TP_printk("dev = (%d,%d), ino = %lu, cow_ino = %lu, index = %lu, "
+ "old_addr = 0x%llx, new_addr = 0x%llx, recovery = %d",
+ show_dev_ino(__entry),
+ __entry->cow_ino,
+ (unsigned long)__entry->index,
+ (unsigned long long)__entry->old_addr,
+ (unsigned long long)__entry->new_addr,
+ __entry->recovery)
);
-TRACE_EVENT(f2fs_filemap_fault,
+DECLARE_EVENT_CLASS(f2fs_mmap,
- TP_PROTO(struct inode *inode, pgoff_t index, unsigned long ret),
+ TP_PROTO(struct inode *inode, pgoff_t index,
+ vm_flags_t flags, vm_fault_t ret),
- TP_ARGS(inode, index, ret),
+ TP_ARGS(inode, index, flags, ret),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(ino_t, ino)
__field(pgoff_t, index)
- __field(unsigned long, ret)
+ __field(vm_flags_t, flags)
+ __field(vm_fault_t, ret)
),
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
__entry->index = index;
+ __entry->flags = flags;
__entry->ret = ret;
),
- TP_printk("dev = (%d,%d), ino = %lu, index = %lu, ret = %lx",
+ TP_printk("dev = (%d,%d), ino = %lu, index = %lu, flags: %s, ret: %s",
show_dev_ino(__entry),
(unsigned long)__entry->index,
- __entry->ret)
+ __print_flags(__entry->flags, "|", FAULT_FLAG_TRACE),
+ __print_flags(__entry->ret, "|", VM_FAULT_RESULT_TRACE))
+);
+
+DEFINE_EVENT(f2fs_mmap, f2fs_filemap_fault,
+
+ TP_PROTO(struct inode *inode, pgoff_t index,
+ vm_flags_t flags, vm_fault_t ret),
+
+ TP_ARGS(inode, index, flags, ret)
+);
+
+DEFINE_EVENT(f2fs_mmap, f2fs_vm_page_mkwrite,
+
+ TP_PROTO(struct inode *inode, pgoff_t index,
+ vm_flags_t flags, vm_fault_t ret),
+
+ TP_ARGS(inode, index, flags, ret)
);
TRACE_EVENT(f2fs_writepages,
@@ -1339,7 +1513,6 @@ TRACE_EVENT(f2fs_writepages,
__field(char, for_kupdate)
__field(char, for_background)
__field(char, tagged_writepages)
- __field(char, for_reclaim)
__field(char, range_cyclic)
__field(char, for_sync)
),
@@ -1358,14 +1531,13 @@ TRACE_EVENT(f2fs_writepages,
__entry->for_kupdate = wbc->for_kupdate;
__entry->for_background = wbc->for_background;
__entry->tagged_writepages = wbc->tagged_writepages;
- __entry->for_reclaim = wbc->for_reclaim;
__entry->range_cyclic = wbc->range_cyclic;
__entry->for_sync = wbc->for_sync;
),
TP_printk("dev = (%d,%d), ino = %lu, %s, %s, nr_to_write %ld, "
"skipped %ld, start %lld, end %lld, wb_idx %lu, sync_mode %d, "
- "kupdate %u background %u tagged %u reclaim %u cyclic %u sync %u",
+ "kupdate %u background %u tagged %u cyclic %u sync %u",
show_dev_ino(__entry),
show_block_type(__entry->type),
show_file_type(__entry->dir),
@@ -1378,7 +1550,6 @@ TRACE_EVENT(f2fs_writepages,
__entry->for_kupdate,
__entry->for_background,
__entry->tagged_writepages,
- __entry->for_reclaim,
__entry->range_cyclic,
__entry->for_sync)
);
@@ -1411,26 +1582,26 @@ TRACE_EVENT(f2fs_readpages,
TRACE_EVENT(f2fs_write_checkpoint,
- TP_PROTO(struct super_block *sb, int reason, const char *msg),
+ TP_PROTO(struct super_block *sb, int reason, u16 phase),
- TP_ARGS(sb, reason, msg),
+ TP_ARGS(sb, reason, phase),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(int, reason)
- __string(dest_msg, msg)
+ __field(u16, phase)
),
TP_fast_assign(
__entry->dev = sb->s_dev;
__entry->reason = reason;
- __assign_str(dest_msg, msg);
+ __entry->phase = phase;
),
TP_printk("dev = (%d,%d), checkpoint for %s, state = %s",
show_dev(__entry->dev),
show_cpreason(__entry->reason),
- __get_str(dest_msg))
+ show_cp_phase(__entry->phase))
);
DECLARE_EVENT_CLASS(f2fs_discard,
@@ -1478,7 +1649,7 @@ DEFINE_EVENT(f2fs_discard, f2fs_remove_discard,
TP_ARGS(dev, blkstart, blklen)
);
-TRACE_EVENT(f2fs_issue_reset_zone,
+DECLARE_EVENT_CLASS(f2fs_reset_zone,
TP_PROTO(struct block_device *dev, block_t blkstart),
@@ -1494,11 +1665,25 @@ TRACE_EVENT(f2fs_issue_reset_zone,
__entry->blkstart = blkstart;
),
- TP_printk("dev = (%d,%d), reset zone at block = 0x%llx",
+ TP_printk("dev = (%d,%d), zone at block = 0x%llx",
show_dev(__entry->dev),
(unsigned long long)__entry->blkstart)
);
+DEFINE_EVENT(f2fs_reset_zone, f2fs_queue_reset_zone,
+
+ TP_PROTO(struct block_device *dev, block_t blkstart),
+
+ TP_ARGS(dev, blkstart)
+);
+
+DEFINE_EVENT(f2fs_reset_zone, f2fs_issue_reset_zone,
+
+ TP_PROTO(struct block_device *dev, block_t blkstart),
+
+ TP_ARGS(dev, blkstart)
+);
+
TRACE_EVENT(f2fs_issue_flush,
TP_PROTO(struct block_device *dev, unsigned int nobarrier,
@@ -1945,6 +2130,7 @@ TRACE_EVENT(f2fs_iostat,
__field(unsigned long long, fs_nrio)
__field(unsigned long long, fs_mrio)
__field(unsigned long long, fs_discard)
+ __field(unsigned long long, fs_reset_zone)
),
TP_fast_assign(
@@ -1975,13 +2161,15 @@ TRACE_EVENT(f2fs_iostat,
__entry->fs_cdrio = iostat[FS_CDATA_READ_IO];
__entry->fs_nrio = iostat[FS_NODE_READ_IO];
__entry->fs_mrio = iostat[FS_META_READ_IO];
- __entry->fs_discard = iostat[FS_DISCARD];
+ __entry->fs_discard = iostat[FS_DISCARD_IO];
+ __entry->fs_reset_zone = iostat[FS_ZONE_RESET_IO];
),
TP_printk("dev = (%d,%d), "
"app [write=%llu (direct=%llu, buffered=%llu), mapped=%llu, "
"compr(buffered=%llu, mapped=%llu)], "
- "fs [data=%llu, cdata=%llu, node=%llu, meta=%llu, discard=%llu], "
+ "fs [data=%llu, cdata=%llu, node=%llu, meta=%llu, discard=%llu, "
+ "reset_zone=%llu], "
"gc [data=%llu, node=%llu], "
"cp [data=%llu, node=%llu, meta=%llu], "
"app [read=%llu (direct=%llu, buffered=%llu), mapped=%llu], "
@@ -1992,6 +2180,7 @@ TRACE_EVENT(f2fs_iostat,
__entry->app_bio, __entry->app_mio, __entry->app_bcdio,
__entry->app_mcdio, __entry->fs_dio, __entry->fs_cdio,
__entry->fs_nio, __entry->fs_mio, __entry->fs_discard,
+ __entry->fs_reset_zone,
__entry->fs_gc_dio, __entry->fs_gc_nio, __entry->fs_cp_dio,
__entry->fs_cp_nio, __entry->fs_cp_mio,
__entry->app_rio, __entry->app_drio, __entry->app_brio,
@@ -2048,33 +2237,33 @@ TRACE_EVENT(f2fs_iostat_latency,
TP_fast_assign(
__entry->dev = sbi->sb->s_dev;
- __entry->d_rd_peak = iostat_lat[0][DATA].peak_lat;
- __entry->d_rd_avg = iostat_lat[0][DATA].avg_lat;
- __entry->d_rd_cnt = iostat_lat[0][DATA].cnt;
- __entry->n_rd_peak = iostat_lat[0][NODE].peak_lat;
- __entry->n_rd_avg = iostat_lat[0][NODE].avg_lat;
- __entry->n_rd_cnt = iostat_lat[0][NODE].cnt;
- __entry->m_rd_peak = iostat_lat[0][META].peak_lat;
- __entry->m_rd_avg = iostat_lat[0][META].avg_lat;
- __entry->m_rd_cnt = iostat_lat[0][META].cnt;
- __entry->d_wr_s_peak = iostat_lat[1][DATA].peak_lat;
- __entry->d_wr_s_avg = iostat_lat[1][DATA].avg_lat;
- __entry->d_wr_s_cnt = iostat_lat[1][DATA].cnt;
- __entry->n_wr_s_peak = iostat_lat[1][NODE].peak_lat;
- __entry->n_wr_s_avg = iostat_lat[1][NODE].avg_lat;
- __entry->n_wr_s_cnt = iostat_lat[1][NODE].cnt;
- __entry->m_wr_s_peak = iostat_lat[1][META].peak_lat;
- __entry->m_wr_s_avg = iostat_lat[1][META].avg_lat;
- __entry->m_wr_s_cnt = iostat_lat[1][META].cnt;
- __entry->d_wr_as_peak = iostat_lat[2][DATA].peak_lat;
- __entry->d_wr_as_avg = iostat_lat[2][DATA].avg_lat;
- __entry->d_wr_as_cnt = iostat_lat[2][DATA].cnt;
- __entry->n_wr_as_peak = iostat_lat[2][NODE].peak_lat;
- __entry->n_wr_as_avg = iostat_lat[2][NODE].avg_lat;
- __entry->n_wr_as_cnt = iostat_lat[2][NODE].cnt;
- __entry->m_wr_as_peak = iostat_lat[2][META].peak_lat;
- __entry->m_wr_as_avg = iostat_lat[2][META].avg_lat;
- __entry->m_wr_as_cnt = iostat_lat[2][META].cnt;
+ __entry->d_rd_peak = iostat_lat[READ_IO][DATA].peak_lat;
+ __entry->d_rd_avg = iostat_lat[READ_IO][DATA].avg_lat;
+ __entry->d_rd_cnt = iostat_lat[READ_IO][DATA].cnt;
+ __entry->n_rd_peak = iostat_lat[READ_IO][NODE].peak_lat;
+ __entry->n_rd_avg = iostat_lat[READ_IO][NODE].avg_lat;
+ __entry->n_rd_cnt = iostat_lat[READ_IO][NODE].cnt;
+ __entry->m_rd_peak = iostat_lat[READ_IO][META].peak_lat;
+ __entry->m_rd_avg = iostat_lat[READ_IO][META].avg_lat;
+ __entry->m_rd_cnt = iostat_lat[READ_IO][META].cnt;
+ __entry->d_wr_s_peak = iostat_lat[WRITE_SYNC_IO][DATA].peak_lat;
+ __entry->d_wr_s_avg = iostat_lat[WRITE_SYNC_IO][DATA].avg_lat;
+ __entry->d_wr_s_cnt = iostat_lat[WRITE_SYNC_IO][DATA].cnt;
+ __entry->n_wr_s_peak = iostat_lat[WRITE_SYNC_IO][NODE].peak_lat;
+ __entry->n_wr_s_avg = iostat_lat[WRITE_SYNC_IO][NODE].avg_lat;
+ __entry->n_wr_s_cnt = iostat_lat[WRITE_SYNC_IO][NODE].cnt;
+ __entry->m_wr_s_peak = iostat_lat[WRITE_SYNC_IO][META].peak_lat;
+ __entry->m_wr_s_avg = iostat_lat[WRITE_SYNC_IO][META].avg_lat;
+ __entry->m_wr_s_cnt = iostat_lat[WRITE_SYNC_IO][META].cnt;
+ __entry->d_wr_as_peak = iostat_lat[WRITE_ASYNC_IO][DATA].peak_lat;
+ __entry->d_wr_as_avg = iostat_lat[WRITE_ASYNC_IO][DATA].avg_lat;
+ __entry->d_wr_as_cnt = iostat_lat[WRITE_ASYNC_IO][DATA].cnt;
+ __entry->n_wr_as_peak = iostat_lat[WRITE_ASYNC_IO][NODE].peak_lat;
+ __entry->n_wr_as_avg = iostat_lat[WRITE_ASYNC_IO][NODE].avg_lat;
+ __entry->n_wr_as_cnt = iostat_lat[WRITE_ASYNC_IO][NODE].cnt;
+ __entry->m_wr_as_peak = iostat_lat[WRITE_ASYNC_IO][META].peak_lat;
+ __entry->m_wr_as_avg = iostat_lat[WRITE_ASYNC_IO][META].avg_lat;
+ __entry->m_wr_as_cnt = iostat_lat[WRITE_ASYNC_IO][META].cnt;
),
TP_printk("dev = (%d,%d), "
@@ -2182,12 +2371,12 @@ DECLARE_EVENT_CLASS(f2fs__rw_start,
* because this screws up the tooling that parses
* the traces.
*/
- __assign_str(pathbuf, pathname);
+ __assign_str(pathbuf);
(void)strreplace(__get_str(pathbuf), ' ', '_');
__entry->offset = offset;
__entry->bytes = bytes;
__entry->i_size = i_size_read(inode);
- __assign_str(cmdline, command);
+ __assign_str(cmdline);
(void)strreplace(__get_str(cmdline), ' ', '_');
__entry->pid = pid;
__entry->ino = inode->i_ino;
diff --git a/include/trace/events/fib.h b/include/trace/events/fib.h
index c2300c407f58..feb28b359eff 100644
--- a/include/trace/events/fib.h
+++ b/include/trace/events/fib.h
@@ -7,6 +7,8 @@
#include <linux/skbuff.h>
#include <linux/netdevice.h>
+#include <net/flow.h>
+#include <net/inet_dscp.h>
#include <net/ip_fib.h>
#include <linux/tracepoint.h>
@@ -36,7 +38,6 @@ TRACE_EVENT(fib_table_lookup,
),
TP_fast_assign(
- struct in6_addr in6_zero = {};
struct net_device *dev;
struct in6_addr *in6;
__be32 *p32;
@@ -45,7 +46,7 @@ TRACE_EVENT(fib_table_lookup,
__entry->err = err;
__entry->oif = flp->flowi4_oif;
__entry->iif = flp->flowi4_iif;
- __entry->tos = flp->flowi4_tos;
+ __entry->tos = inet_dscp_to_dsfield(flp->flowi4_dscp);
__entry->scope = flp->flowi4_scope;
__entry->flags = flp->flowi4_flags;
@@ -66,7 +67,7 @@ TRACE_EVENT(fib_table_lookup,
}
dev = nhc ? nhc->nhc_dev : NULL;
- strlcpy(__entry->name, dev ? dev->name : "-", IFNAMSIZ);
+ strscpy(__entry->name, dev ? dev->name : "-", IFNAMSIZ);
if (nhc) {
if (nhc->nhc_gw_family == AF_INET) {
@@ -74,7 +75,7 @@ TRACE_EVENT(fib_table_lookup,
*p32 = nhc->nhc_gw.ipv4;
in6 = (struct in6_addr *)__entry->gw6;
- *in6 = in6_zero;
+ *in6 = in6addr_any;
} else if (nhc->nhc_gw_family == AF_INET6) {
p32 = (__be32 *) __entry->gw4;
*p32 = 0;
@@ -87,7 +88,7 @@ TRACE_EVENT(fib_table_lookup,
*p32 = 0;
in6 = (struct in6_addr *)__entry->gw6;
- *in6 = in6_zero;
+ *in6 = in6addr_any;
}
),
diff --git a/include/trace/events/fib6.h b/include/trace/events/fib6.h
index 6e821eb79450..8d22b2e98d48 100644
--- a/include/trace/events/fib6.h
+++ b/include/trace/events/fib6.h
@@ -22,6 +22,7 @@ TRACE_EVENT(fib6_table_lookup,
__field( int, err )
__field( int, oif )
__field( int, iif )
+ __field( u32, flowlabel )
__field( __u8, tos )
__field( __u8, scope )
__field( __u8, flags )
@@ -42,6 +43,7 @@ TRACE_EVENT(fib6_table_lookup,
__entry->err = ip6_rt_type_to_error(res->fib6_type);
__entry->oif = flp->flowi6_oif;
__entry->iif = flp->flowi6_iif;
+ __entry->flowlabel = ntohl(flowi6_get_flowlabel(flp));
__entry->tos = ip6_tclass(flp->flowlabel);
__entry->scope = flp->flowi6_scope;
__entry->flags = flp->flowi6_flags;
@@ -63,27 +65,24 @@ TRACE_EVENT(fib6_table_lookup,
}
if (res->nh && res->nh->fib_nh_dev) {
- strlcpy(__entry->name, res->nh->fib_nh_dev->name, IFNAMSIZ);
+ strscpy(__entry->name, res->nh->fib_nh_dev->name, IFNAMSIZ);
} else {
strcpy(__entry->name, "-");
}
if (res->f6i == net->ipv6.fib6_null_entry) {
- struct in6_addr in6_zero = {};
-
in6 = (struct in6_addr *)__entry->gw;
- *in6 = in6_zero;
-
+ *in6 = in6addr_any;
} else if (res->nh) {
in6 = (struct in6_addr *)__entry->gw;
*in6 = res->nh->fib_nh_gw6;
}
),
- TP_printk("table %3u oif %d iif %d proto %u %pI6c/%u -> %pI6c/%u tos %d scope %d flags %x ==> dev %s gw %pI6c err %d",
+ TP_printk("table %3u oif %d iif %d proto %u %pI6c/%u -> %pI6c/%u flowlabel %#x tos %d scope %d flags %x ==> dev %s gw %pI6c err %d",
__entry->tb_id, __entry->oif, __entry->iif, __entry->proto,
__entry->src, __entry->sport, __entry->dst, __entry->dport,
- __entry->tos, __entry->scope, __entry->flags,
- __entry->name, __entry->gw, __entry->err)
+ __entry->flowlabel, __entry->tos, __entry->scope,
+ __entry->flags, __entry->name, __entry->gw, __entry->err)
);
#endif /* _TRACE_FIB6_H */
diff --git a/include/trace/events/filelock.h b/include/trace/events/filelock.h
index 1646dadd7f37..370016c38a5b 100644
--- a/include/trace/events/filelock.h
+++ b/include/trace/events/filelock.h
@@ -27,7 +27,8 @@
{ FL_SLEEP, "FL_SLEEP" }, \
{ FL_DOWNGRADE_PENDING, "FL_DOWNGRADE_PENDING" }, \
{ FL_UNLOCK_PENDING, "FL_UNLOCK_PENDING" }, \
- { FL_OFDLCK, "FL_OFDLCK" })
+ { FL_OFDLCK, "FL_OFDLCK" }, \
+ { FL_RECLAIM, "FL_RECLAIM"})
#define show_fl_type(val) \
__print_symbolic(val, \
@@ -68,11 +69,11 @@ DECLARE_EVENT_CLASS(filelock_lock,
__field(struct file_lock *, fl)
__field(unsigned long, i_ino)
__field(dev_t, s_dev)
- __field(struct file_lock *, fl_blocker)
- __field(fl_owner_t, fl_owner)
- __field(unsigned int, fl_pid)
- __field(unsigned int, fl_flags)
- __field(unsigned char, fl_type)
+ __field(struct file_lock_core *, blocker)
+ __field(fl_owner_t, owner)
+ __field(unsigned int, pid)
+ __field(unsigned int, flags)
+ __field(unsigned char, type)
__field(loff_t, fl_start)
__field(loff_t, fl_end)
__field(int, ret)
@@ -82,11 +83,11 @@ DECLARE_EVENT_CLASS(filelock_lock,
__entry->fl = fl ? fl : NULL;
__entry->s_dev = inode->i_sb->s_dev;
__entry->i_ino = inode->i_ino;
- __entry->fl_blocker = fl ? fl->fl_blocker : NULL;
- __entry->fl_owner = fl ? fl->fl_owner : NULL;
- __entry->fl_pid = fl ? fl->fl_pid : 0;
- __entry->fl_flags = fl ? fl->fl_flags : 0;
- __entry->fl_type = fl ? fl->fl_type : 0;
+ __entry->blocker = fl ? fl->c.flc_blocker : NULL;
+ __entry->owner = fl ? fl->c.flc_owner : NULL;
+ __entry->pid = fl ? fl->c.flc_pid : 0;
+ __entry->flags = fl ? fl->c.flc_flags : 0;
+ __entry->type = fl ? fl->c.flc_type : 0;
__entry->fl_start = fl ? fl->fl_start : 0;
__entry->fl_end = fl ? fl->fl_end : 0;
__entry->ret = ret;
@@ -94,9 +95,9 @@ DECLARE_EVENT_CLASS(filelock_lock,
TP_printk("fl=%p dev=0x%x:0x%x ino=0x%lx fl_blocker=%p fl_owner=%p fl_pid=%u fl_flags=%s fl_type=%s fl_start=%lld fl_end=%lld ret=%d",
__entry->fl, MAJOR(__entry->s_dev), MINOR(__entry->s_dev),
- __entry->i_ino, __entry->fl_blocker, __entry->fl_owner,
- __entry->fl_pid, show_fl_flags(__entry->fl_flags),
- show_fl_type(__entry->fl_type),
+ __entry->i_ino, __entry->blocker, __entry->owner,
+ __entry->pid, show_fl_flags(__entry->flags),
+ show_fl_type(__entry->type),
__entry->fl_start, __entry->fl_end, __entry->ret)
);
@@ -117,59 +118,59 @@ DEFINE_EVENT(filelock_lock, flock_lock_inode,
TP_ARGS(inode, fl, ret));
DECLARE_EVENT_CLASS(filelock_lease,
- TP_PROTO(struct inode *inode, struct file_lock *fl),
+ TP_PROTO(struct inode *inode, struct file_lease *fl),
TP_ARGS(inode, fl),
TP_STRUCT__entry(
- __field(struct file_lock *, fl)
+ __field(struct file_lease *, fl)
__field(unsigned long, i_ino)
__field(dev_t, s_dev)
- __field(struct file_lock *, fl_blocker)
- __field(fl_owner_t, fl_owner)
- __field(unsigned int, fl_flags)
- __field(unsigned char, fl_type)
- __field(unsigned long, fl_break_time)
- __field(unsigned long, fl_downgrade_time)
+ __field(struct file_lock_core *, blocker)
+ __field(fl_owner_t, owner)
+ __field(unsigned int, flags)
+ __field(unsigned char, type)
+ __field(unsigned long, break_time)
+ __field(unsigned long, downgrade_time)
),
TP_fast_assign(
__entry->fl = fl ? fl : NULL;
__entry->s_dev = inode->i_sb->s_dev;
__entry->i_ino = inode->i_ino;
- __entry->fl_blocker = fl ? fl->fl_blocker : NULL;
- __entry->fl_owner = fl ? fl->fl_owner : NULL;
- __entry->fl_flags = fl ? fl->fl_flags : 0;
- __entry->fl_type = fl ? fl->fl_type : 0;
- __entry->fl_break_time = fl ? fl->fl_break_time : 0;
- __entry->fl_downgrade_time = fl ? fl->fl_downgrade_time : 0;
+ __entry->blocker = fl ? fl->c.flc_blocker : NULL;
+ __entry->owner = fl ? fl->c.flc_owner : NULL;
+ __entry->flags = fl ? fl->c.flc_flags : 0;
+ __entry->type = fl ? fl->c.flc_type : 0;
+ __entry->break_time = fl ? fl->fl_break_time : 0;
+ __entry->downgrade_time = fl ? fl->fl_downgrade_time : 0;
),
TP_printk("fl=%p dev=0x%x:0x%x ino=0x%lx fl_blocker=%p fl_owner=%p fl_flags=%s fl_type=%s fl_break_time=%lu fl_downgrade_time=%lu",
__entry->fl, MAJOR(__entry->s_dev), MINOR(__entry->s_dev),
- __entry->i_ino, __entry->fl_blocker, __entry->fl_owner,
- show_fl_flags(__entry->fl_flags),
- show_fl_type(__entry->fl_type),
- __entry->fl_break_time, __entry->fl_downgrade_time)
+ __entry->i_ino, __entry->blocker, __entry->owner,
+ show_fl_flags(__entry->flags),
+ show_fl_type(__entry->type),
+ __entry->break_time, __entry->downgrade_time)
);
-DEFINE_EVENT(filelock_lease, break_lease_noblock, TP_PROTO(struct inode *inode, struct file_lock *fl),
+DEFINE_EVENT(filelock_lease, break_lease_noblock, TP_PROTO(struct inode *inode, struct file_lease *fl),
TP_ARGS(inode, fl));
-DEFINE_EVENT(filelock_lease, break_lease_block, TP_PROTO(struct inode *inode, struct file_lock *fl),
+DEFINE_EVENT(filelock_lease, break_lease_block, TP_PROTO(struct inode *inode, struct file_lease *fl),
TP_ARGS(inode, fl));
-DEFINE_EVENT(filelock_lease, break_lease_unblock, TP_PROTO(struct inode *inode, struct file_lock *fl),
+DEFINE_EVENT(filelock_lease, break_lease_unblock, TP_PROTO(struct inode *inode, struct file_lease *fl),
TP_ARGS(inode, fl));
-DEFINE_EVENT(filelock_lease, generic_delete_lease, TP_PROTO(struct inode *inode, struct file_lock *fl),
+DEFINE_EVENT(filelock_lease, generic_delete_lease, TP_PROTO(struct inode *inode, struct file_lease *fl),
TP_ARGS(inode, fl));
-DEFINE_EVENT(filelock_lease, time_out_leases, TP_PROTO(struct inode *inode, struct file_lock *fl),
+DEFINE_EVENT(filelock_lease, time_out_leases, TP_PROTO(struct inode *inode, struct file_lease *fl),
TP_ARGS(inode, fl));
TRACE_EVENT(generic_add_lease,
- TP_PROTO(struct inode *inode, struct file_lock *fl),
+ TP_PROTO(struct inode *inode, struct file_lease *fl),
TP_ARGS(inode, fl),
@@ -179,9 +180,9 @@ TRACE_EVENT(generic_add_lease,
__field(int, rcount)
__field(int, icount)
__field(dev_t, s_dev)
- __field(fl_owner_t, fl_owner)
- __field(unsigned int, fl_flags)
- __field(unsigned char, fl_type)
+ __field(fl_owner_t, owner)
+ __field(unsigned int, flags)
+ __field(unsigned char, type)
),
TP_fast_assign(
@@ -189,22 +190,22 @@ TRACE_EVENT(generic_add_lease,
__entry->i_ino = inode->i_ino;
__entry->wcount = atomic_read(&inode->i_writecount);
__entry->rcount = atomic_read(&inode->i_readcount);
- __entry->icount = atomic_read(&inode->i_count);
- __entry->fl_owner = fl->fl_owner;
- __entry->fl_flags = fl->fl_flags;
- __entry->fl_type = fl->fl_type;
+ __entry->icount = icount_read(inode);
+ __entry->owner = fl->c.flc_owner;
+ __entry->flags = fl->c.flc_flags;
+ __entry->type = fl->c.flc_type;
),
TP_printk("dev=0x%x:0x%x ino=0x%lx wcount=%d rcount=%d icount=%d fl_owner=%p fl_flags=%s fl_type=%s",
MAJOR(__entry->s_dev), MINOR(__entry->s_dev),
__entry->i_ino, __entry->wcount, __entry->rcount,
- __entry->icount, __entry->fl_owner,
- show_fl_flags(__entry->fl_flags),
- show_fl_type(__entry->fl_type))
+ __entry->icount, __entry->owner,
+ show_fl_flags(__entry->flags),
+ show_fl_type(__entry->type))
);
TRACE_EVENT(leases_conflict,
- TP_PROTO(bool conflict, struct file_lock *lease, struct file_lock *breaker),
+ TP_PROTO(bool conflict, struct file_lease *lease, struct file_lease *breaker),
TP_ARGS(conflict, lease, breaker),
@@ -220,11 +221,11 @@ TRACE_EVENT(leases_conflict,
TP_fast_assign(
__entry->lease = lease;
- __entry->l_fl_flags = lease->fl_flags;
- __entry->l_fl_type = lease->fl_type;
+ __entry->l_fl_flags = lease->c.flc_flags;
+ __entry->l_fl_type = lease->c.flc_type;
__entry->breaker = breaker;
- __entry->b_fl_flags = breaker->fl_flags;
- __entry->b_fl_type = breaker->fl_type;
+ __entry->b_fl_flags = breaker->c.flc_flags;
+ __entry->b_fl_type = breaker->c.flc_type;
__entry->conflict = conflict;
),
diff --git a/include/trace/events/filemap.h b/include/trace/events/filemap.h
index 46c89c1e460c..f48fe637bfd2 100644
--- a/include/trace/events/filemap.h
+++ b/include/trace/events/filemap.h
@@ -56,6 +56,90 @@ DEFINE_EVENT(mm_filemap_op_page_cache, mm_filemap_add_to_page_cache,
TP_ARGS(folio)
);
+DECLARE_EVENT_CLASS(mm_filemap_op_page_cache_range,
+
+ TP_PROTO(
+ struct address_space *mapping,
+ pgoff_t index,
+ pgoff_t last_index
+ ),
+
+ TP_ARGS(mapping, index, last_index),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, i_ino)
+ __field(dev_t, s_dev)
+ __field(unsigned long, index)
+ __field(unsigned long, last_index)
+ ),
+
+ TP_fast_assign(
+ __entry->i_ino = mapping->host->i_ino;
+ if (mapping->host->i_sb)
+ __entry->s_dev =
+ mapping->host->i_sb->s_dev;
+ else
+ __entry->s_dev = mapping->host->i_rdev;
+ __entry->index = index;
+ __entry->last_index = last_index;
+ ),
+
+ TP_printk(
+ "dev=%d:%d ino=%lx ofs=%lld-%lld",
+ MAJOR(__entry->s_dev),
+ MINOR(__entry->s_dev), __entry->i_ino,
+ ((loff_t)__entry->index) << PAGE_SHIFT,
+ ((((loff_t)__entry->last_index + 1) << PAGE_SHIFT) - 1)
+ )
+);
+
+DEFINE_EVENT(mm_filemap_op_page_cache_range, mm_filemap_get_pages,
+ TP_PROTO(
+ struct address_space *mapping,
+ pgoff_t index,
+ pgoff_t last_index
+ ),
+ TP_ARGS(mapping, index, last_index)
+);
+
+DEFINE_EVENT(mm_filemap_op_page_cache_range, mm_filemap_map_pages,
+ TP_PROTO(
+ struct address_space *mapping,
+ pgoff_t index,
+ pgoff_t last_index
+ ),
+ TP_ARGS(mapping, index, last_index)
+);
+
+TRACE_EVENT(mm_filemap_fault,
+ TP_PROTO(struct address_space *mapping, pgoff_t index),
+
+ TP_ARGS(mapping, index),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, i_ino)
+ __field(dev_t, s_dev)
+ __field(unsigned long, index)
+ ),
+
+ TP_fast_assign(
+ __entry->i_ino = mapping->host->i_ino;
+ if (mapping->host->i_sb)
+ __entry->s_dev =
+ mapping->host->i_sb->s_dev;
+ else
+ __entry->s_dev = mapping->host->i_rdev;
+ __entry->index = index;
+ ),
+
+ TP_printk(
+ "dev=%d:%d ino=%lx ofs=%lld",
+ MAJOR(__entry->s_dev),
+ MINOR(__entry->s_dev), __entry->i_ino,
+ ((loff_t)__entry->index) << PAGE_SHIFT
+ )
+);
+
TRACE_EVENT(filemap_set_wb_err,
TP_PROTO(struct address_space *mapping, errseq_t eseq),
diff --git a/include/trace/events/firewire.h b/include/trace/events/firewire.h
new file mode 100644
index 000000000000..ad0e0cf82b9c
--- /dev/null
+++ b/include/trace/events/firewire.h
@@ -0,0 +1,912 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+// Copyright (c) 2024 Takashi Sakamoto
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM firewire
+
+#if !defined(_FIREWIRE_TRACE_EVENT_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _FIREWIRE_TRACE_EVENT_H
+
+#include <linux/tracepoint.h>
+#include <linux/firewire.h>
+
+#include <linux/firewire-constants.h>
+
+// Some macros are defined in 'drivers/firewire/packet-header-definitions.h'.
+
+// The content of TP_printk field is preprocessed, then put to the module binary.
+#define ASYNC_HEADER_GET_DESTINATION(header) \
+ (((header)[0] & ASYNC_HEADER_Q0_DESTINATION_MASK) >> ASYNC_HEADER_Q0_DESTINATION_SHIFT)
+
+#define ASYNC_HEADER_GET_TLABEL(header) \
+ (((header)[0] & ASYNC_HEADER_Q0_TLABEL_MASK) >> ASYNC_HEADER_Q0_TLABEL_SHIFT)
+
+#define ASYNC_HEADER_GET_TCODE(header) \
+ (((header)[0] & ASYNC_HEADER_Q0_TCODE_MASK) >> ASYNC_HEADER_Q0_TCODE_SHIFT)
+
+#define ASYNC_HEADER_GET_SOURCE(header) \
+ (((header)[1] & ASYNC_HEADER_Q1_SOURCE_MASK) >> ASYNC_HEADER_Q1_SOURCE_SHIFT)
+
+#define ASYNC_HEADER_GET_OFFSET(header) \
+ ((((unsigned long long)((header)[1] & ASYNC_HEADER_Q1_OFFSET_HIGH_MASK)) >> ASYNC_HEADER_Q1_OFFSET_HIGH_SHIFT) << 32)| \
+ (header)[2]
+
+#define ASYNC_HEADER_GET_RCODE(header) \
+ (((header)[1] & ASYNC_HEADER_Q1_RCODE_MASK) >> ASYNC_HEADER_Q1_RCODE_SHIFT)
+
+#define QUADLET_SIZE 4
+
+DECLARE_EVENT_CLASS(async_outbound_initiate_template,
+ TP_PROTO(u64 transaction, unsigned int card_index, unsigned int generation, unsigned int scode, const u32 *header, const u32 *data, unsigned int data_count),
+ TP_ARGS(transaction, card_index, generation, scode, header, data, data_count),
+ TP_STRUCT__entry(
+ __field(u64, transaction)
+ __field(u8, card_index)
+ __field(u8, generation)
+ __field(u8, scode)
+ __array(u32, header, ASYNC_HEADER_QUADLET_COUNT)
+ __dynamic_array(u32, data, data_count)
+ ),
+ TP_fast_assign(
+ __entry->transaction = transaction;
+ __entry->card_index = card_index;
+ __entry->generation = generation;
+ __entry->scode = scode;
+ memcpy(__entry->header, header, QUADLET_SIZE * ASYNC_HEADER_QUADLET_COUNT);
+ memcpy(__get_dynamic_array(data), data, __get_dynamic_array_len(data));
+ ),
+ // This format is for the request subaction.
+ TP_printk(
+ "transaction=0x%llx card_index=%u generation=%u scode=%u dst_id=0x%04x tlabel=%u tcode=%u src_id=0x%04x offset=0x%012llx header=%s data=%s",
+ __entry->transaction,
+ __entry->card_index,
+ __entry->generation,
+ __entry->scode,
+ ASYNC_HEADER_GET_DESTINATION(__entry->header),
+ ASYNC_HEADER_GET_TLABEL(__entry->header),
+ ASYNC_HEADER_GET_TCODE(__entry->header),
+ ASYNC_HEADER_GET_SOURCE(__entry->header),
+ ASYNC_HEADER_GET_OFFSET(__entry->header),
+ __print_array(__entry->header, ASYNC_HEADER_QUADLET_COUNT, QUADLET_SIZE),
+ __print_array(__get_dynamic_array(data),
+ __get_dynamic_array_len(data) / QUADLET_SIZE, QUADLET_SIZE)
+ )
+);
+
+// The value of status is one of ack codes and rcodes specific to Linux FireWire subsystem.
+DECLARE_EVENT_CLASS(async_outbound_complete_template,
+ TP_PROTO(u64 transaction, unsigned int card_index, unsigned int generation, unsigned int scode, unsigned int status, unsigned int timestamp),
+ TP_ARGS(transaction, card_index, generation, scode, status, timestamp),
+ TP_STRUCT__entry(
+ __field(u64, transaction)
+ __field(u8, card_index)
+ __field(u8, generation)
+ __field(u8, scode)
+ __field(u8, status)
+ __field(u16, timestamp)
+ ),
+ TP_fast_assign(
+ __entry->transaction = transaction;
+ __entry->card_index = card_index;
+ __entry->generation = generation;
+ __entry->scode = scode;
+ __entry->status = status;
+ __entry->timestamp = timestamp;
+ ),
+ TP_printk(
+ "transaction=0x%llx card_index=%u generation=%u scode=%u status=%u timestamp=0x%04x",
+ __entry->transaction,
+ __entry->card_index,
+ __entry->generation,
+ __entry->scode,
+ __entry->status,
+ __entry->timestamp
+ )
+);
+
+// The value of status is one of ack codes and rcodes specific to Linux FireWire subsystem.
+DECLARE_EVENT_CLASS(async_inbound_template,
+ TP_PROTO(u64 transaction, unsigned int card_index, unsigned int generation, unsigned int scode, unsigned int status, unsigned int timestamp, const u32 *header, const u32 *data, unsigned int data_count),
+ TP_ARGS(transaction, card_index, generation, scode, status, timestamp, header, data, data_count),
+ TP_STRUCT__entry(
+ __field(u64, transaction)
+ __field(u8, card_index)
+ __field(u8, generation)
+ __field(u8, scode)
+ __field(u8, status)
+ __field(u16, timestamp)
+ __array(u32, header, ASYNC_HEADER_QUADLET_COUNT)
+ __dynamic_array(u32, data, data_count)
+ ),
+ TP_fast_assign(
+ __entry->transaction = transaction;
+ __entry->card_index = card_index;
+ __entry->generation = generation;
+ __entry->scode = scode;
+ __entry->status = status;
+ __entry->timestamp = timestamp;
+ memcpy(__entry->header, header, QUADLET_SIZE * ASYNC_HEADER_QUADLET_COUNT);
+ memcpy(__get_dynamic_array(data), data, __get_dynamic_array_len(data));
+ ),
+ // This format is for the response subaction.
+ TP_printk(
+ "transaction=0x%llx card_index=%u generation=%u scode=%u status=%u timestamp=0x%04x dst_id=0x%04x tlabel=%u tcode=%u src_id=0x%04x rcode=%u header=%s data=%s",
+ __entry->transaction,
+ __entry->card_index,
+ __entry->generation,
+ __entry->scode,
+ __entry->status,
+ __entry->timestamp,
+ ASYNC_HEADER_GET_DESTINATION(__entry->header),
+ ASYNC_HEADER_GET_TLABEL(__entry->header),
+ ASYNC_HEADER_GET_TCODE(__entry->header),
+ ASYNC_HEADER_GET_SOURCE(__entry->header),
+ ASYNC_HEADER_GET_RCODE(__entry->header),
+ __print_array(__entry->header, ASYNC_HEADER_QUADLET_COUNT, QUADLET_SIZE),
+ __print_array(__get_dynamic_array(data),
+ __get_dynamic_array_len(data) / QUADLET_SIZE, QUADLET_SIZE)
+ )
+);
+
+DEFINE_EVENT(async_outbound_initiate_template, async_request_outbound_initiate,
+ TP_PROTO(u64 transaction, unsigned int card_index, unsigned int generation, unsigned int scode, const u32 *header, const u32 *data, unsigned int data_count),
+ TP_ARGS(transaction, card_index, generation, scode, header, data, data_count)
+);
+
+DEFINE_EVENT(async_outbound_complete_template, async_request_outbound_complete,
+ TP_PROTO(u64 transaction, unsigned int card_index, unsigned int generation, unsigned int scode, unsigned int status, unsigned int timestamp),
+ TP_ARGS(transaction, card_index, generation, scode, status, timestamp)
+);
+
+DEFINE_EVENT(async_inbound_template, async_response_inbound,
+ TP_PROTO(u64 transaction, unsigned int card_index, unsigned int generation, unsigned int scode, unsigned int status, unsigned int timestamp, const u32 *header, const u32 *data, unsigned int data_count),
+ TP_ARGS(transaction, card_index, generation, scode, status, timestamp, header, data, data_count)
+);
+
+DEFINE_EVENT_PRINT(async_inbound_template, async_request_inbound,
+ TP_PROTO(u64 transaction, unsigned int card_index, unsigned int generation, unsigned int scode, unsigned int status, unsigned int timestamp, const u32 *header, const u32 *data, unsigned int data_count),
+ TP_ARGS(transaction, card_index, generation, scode, status, timestamp, header, data, data_count),
+ TP_printk(
+ "transaction=0x%llx card_index=%u generation=%u scode=%u status=%u timestamp=0x%04x dst_id=0x%04x tlabel=%u tcode=%u src_id=0x%04x offset=0x%012llx header=%s data=%s",
+ __entry->transaction,
+ __entry->card_index,
+ __entry->generation,
+ __entry->scode,
+ __entry->status,
+ __entry->timestamp,
+ ASYNC_HEADER_GET_DESTINATION(__entry->header),
+ ASYNC_HEADER_GET_TLABEL(__entry->header),
+ ASYNC_HEADER_GET_TCODE(__entry->header),
+ ASYNC_HEADER_GET_SOURCE(__entry->header),
+ ASYNC_HEADER_GET_OFFSET(__entry->header),
+ __print_array(__entry->header, ASYNC_HEADER_QUADLET_COUNT, QUADLET_SIZE),
+ __print_array(__get_dynamic_array(data),
+ __get_dynamic_array_len(data) / QUADLET_SIZE, QUADLET_SIZE)
+ )
+);
+
+DEFINE_EVENT_PRINT(async_outbound_initiate_template, async_response_outbound_initiate,
+ TP_PROTO(u64 transaction, unsigned int card_index, unsigned int generation, unsigned int scode, const u32 *header, const u32 *data, unsigned int data_count),
+ TP_ARGS(transaction, card_index, generation, scode, header, data, data_count),
+ TP_printk(
+ "transaction=0x%llx card_index=%u generation=%u scode=%u dst_id=0x%04x tlabel=%u tcode=%u src_id=0x%04x rcode=%u header=%s data=%s",
+ __entry->transaction,
+ __entry->card_index,
+ __entry->generation,
+ __entry->scode,
+ ASYNC_HEADER_GET_DESTINATION(__entry->header),
+ ASYNC_HEADER_GET_TLABEL(__entry->header),
+ ASYNC_HEADER_GET_TCODE(__entry->header),
+ ASYNC_HEADER_GET_SOURCE(__entry->header),
+ ASYNC_HEADER_GET_RCODE(__entry->header),
+ __print_array(__entry->header, ASYNC_HEADER_QUADLET_COUNT, QUADLET_SIZE),
+ __print_array(__get_dynamic_array(data),
+ __get_dynamic_array_len(data) / QUADLET_SIZE, QUADLET_SIZE)
+ )
+);
+
+DEFINE_EVENT(async_outbound_complete_template, async_response_outbound_complete,
+ TP_PROTO(u64 transaction, unsigned int card_index, unsigned int generation, unsigned int scode, unsigned int status, unsigned int timestamp),
+ TP_ARGS(transaction, card_index, generation, scode, status, timestamp)
+);
+
+#undef ASYNC_HEADER_GET_DESTINATION
+#undef ASYNC_HEADER_GET_TLABEL
+#undef ASYNC_HEADER_GET_TCODE
+#undef ASYNC_HEADER_GET_SOURCE
+#undef ASYNC_HEADER_GET_OFFSET
+#undef ASYNC_HEADER_GET_RCODE
+
+TRACE_EVENT(async_phy_outbound_initiate,
+ TP_PROTO(u64 packet, unsigned int card_index, unsigned int generation, u32 first_quadlet, u32 second_quadlet),
+ TP_ARGS(packet, card_index, generation, first_quadlet, second_quadlet),
+ TP_STRUCT__entry(
+ __field(u64, packet)
+ __field(u8, card_index)
+ __field(u8, generation)
+ __field(u32, first_quadlet)
+ __field(u32, second_quadlet)
+ ),
+ TP_fast_assign(
+ __entry->packet = packet;
+ __entry->card_index = card_index;
+ __entry->generation = generation;
+ __entry->first_quadlet = first_quadlet;
+ __entry->second_quadlet = second_quadlet
+ ),
+ TP_printk(
+ "packet=0x%llx card_index=%u generation=%u first_quadlet=0x%08x second_quadlet=0x%08x",
+ __entry->packet,
+ __entry->card_index,
+ __entry->generation,
+ __entry->first_quadlet,
+ __entry->second_quadlet
+ )
+);
+
+TRACE_EVENT(async_phy_outbound_complete,
+ TP_PROTO(u64 packet, unsigned int card_index, unsigned int generation, unsigned int status, unsigned int timestamp),
+ TP_ARGS(packet, card_index, generation, status, timestamp),
+ TP_STRUCT__entry(
+ __field(u64, packet)
+ __field(u8, card_index)
+ __field(u8, generation)
+ __field(u8, status)
+ __field(u16, timestamp)
+ ),
+ TP_fast_assign(
+ __entry->packet = packet;
+ __entry->card_index = card_index;
+ __entry->generation = generation;
+ __entry->status = status;
+ __entry->timestamp = timestamp;
+ ),
+ TP_printk(
+ "packet=0x%llx card_index=%u generation=%u status=%u timestamp=0x%04x",
+ __entry->packet,
+ __entry->card_index,
+ __entry->generation,
+ __entry->status,
+ __entry->timestamp
+ )
+);
+
+TRACE_EVENT(async_phy_inbound,
+ TP_PROTO(u64 packet, unsigned int card_index, unsigned int generation, unsigned int status, unsigned int timestamp, u32 first_quadlet, u32 second_quadlet),
+ TP_ARGS(packet, card_index, generation, status, timestamp, first_quadlet, second_quadlet),
+ TP_STRUCT__entry(
+ __field(u64, packet)
+ __field(u8, card_index)
+ __field(u8, generation)
+ __field(u8, status)
+ __field(u16, timestamp)
+ __field(u32, first_quadlet)
+ __field(u32, second_quadlet)
+ ),
+ TP_fast_assign(
+ __entry->packet = packet;
+ __entry->generation = generation;
+ __entry->status = status;
+ __entry->timestamp = timestamp;
+ __entry->first_quadlet = first_quadlet;
+ __entry->second_quadlet = second_quadlet
+ ),
+ TP_printk(
+ "packet=0x%llx card_index=%u generation=%u status=%u timestamp=0x%04x first_quadlet=0x%08x second_quadlet=0x%08x",
+ __entry->packet,
+ __entry->card_index,
+ __entry->generation,
+ __entry->status,
+ __entry->timestamp,
+ __entry->first_quadlet,
+ __entry->second_quadlet
+ )
+);
+
+DECLARE_EVENT_CLASS(bus_reset_arrange_template,
+ TP_PROTO(unsigned int card_index, unsigned int generation, bool short_reset),
+ TP_ARGS(card_index, generation, short_reset),
+ TP_STRUCT__entry(
+ __field(u8, card_index)
+ __field(u8, generation)
+ __field(bool, short_reset)
+ ),
+ TP_fast_assign(
+ __entry->card_index = card_index;
+ __entry->generation = generation;
+ __entry->short_reset = short_reset;
+ ),
+ TP_printk(
+ "card_index=%u generation=%u short_reset=%s",
+ __entry->card_index,
+ __entry->generation,
+ __entry->short_reset ? "true" : "false"
+ )
+);
+
+DEFINE_EVENT(bus_reset_arrange_template, bus_reset_initiate,
+ TP_PROTO(unsigned int card_index, unsigned int generation, bool short_reset),
+ TP_ARGS(card_index, generation, short_reset)
+);
+
+DEFINE_EVENT(bus_reset_arrange_template, bus_reset_schedule,
+ TP_PROTO(unsigned int card_index, unsigned int generation, bool short_reset),
+ TP_ARGS(card_index, generation, short_reset)
+);
+
+DEFINE_EVENT(bus_reset_arrange_template, bus_reset_postpone,
+ TP_PROTO(unsigned int card_index, unsigned int generation, bool short_reset),
+ TP_ARGS(card_index, generation, short_reset)
+);
+
+TRACE_EVENT(bus_reset_handle,
+ TP_PROTO(unsigned int card_index, unsigned int generation, unsigned int node_id, bool bm_abdicate, u32 *self_ids, unsigned int self_id_count),
+ TP_ARGS(card_index, generation, node_id, bm_abdicate, self_ids, self_id_count),
+ TP_STRUCT__entry(
+ __field(u8, card_index)
+ __field(u8, generation)
+ __field(u8, node_id)
+ __field(bool, bm_abdicate)
+ __dynamic_array(u32, self_ids, self_id_count)
+ ),
+ TP_fast_assign(
+ __entry->card_index = card_index;
+ __entry->generation = generation;
+ __entry->node_id = node_id;
+ __entry->bm_abdicate = bm_abdicate;
+ memcpy(__get_dynamic_array(self_ids), self_ids, __get_dynamic_array_len(self_ids));
+ ),
+ TP_printk(
+ "card_index=%u generation=%u node_id=0x%04x bm_abdicate=%s self_ids=%s",
+ __entry->card_index,
+ __entry->generation,
+ __entry->node_id,
+ __entry->bm_abdicate ? "true" : "false",
+ __print_array(__get_dynamic_array(self_ids),
+ __get_dynamic_array_len(self_ids) / QUADLET_SIZE, QUADLET_SIZE)
+ )
+);
+
+// Some macros are defined in 'drivers/firewire/phy-packet-definitions.h'.
+
+// The content of TP_printk field is preprocessed, then put to the module binary.
+
+#define PHY_PACKET_SELF_ID_GET_PHY_ID(quads) \
+ ((((const u32 *)quads)[0] & SELF_ID_PHY_ID_MASK) >> SELF_ID_PHY_ID_SHIFT)
+
+#define PHY_PACKET_SELF_ID_GET_LINK_ACTIVE(quads) \
+ ((((const u32 *)quads)[0] & SELF_ID_ZERO_LINK_ACTIVE_MASK) >> SELF_ID_ZERO_LINK_ACTIVE_SHIFT)
+
+#define PHY_PACKET_SELF_ID_GET_GAP_COUNT(quads) \
+ ((((const u32 *)quads)[0] & SELF_ID_ZERO_GAP_COUNT_MASK) >> SELF_ID_ZERO_GAP_COUNT_SHIFT)
+
+#define PHY_PACKET_SELF_ID_GET_SCODE(quads) \
+ ((((const u32 *)quads)[0] & SELF_ID_ZERO_SCODE_MASK) >> SELF_ID_ZERO_SCODE_SHIFT)
+
+#define PHY_PACKET_SELF_ID_GET_CONTENDER(quads) \
+ ((((const u32 *)quads)[0] & SELF_ID_ZERO_CONTENDER_MASK) >> SELF_ID_ZERO_CONTENDER_SHIFT)
+
+#define PHY_PACKET_SELF_ID_GET_POWER_CLASS(quads) \
+ ((((const u32 *)quads)[0] & SELF_ID_ZERO_POWER_CLASS_MASK) >> SELF_ID_ZERO_POWER_CLASS_SHIFT)
+
+#define PHY_PACKET_SELF_ID_GET_INITIATED_RESET(quads) \
+ ((((const u32 *)quads)[0] & SELF_ID_ZERO_INITIATED_RESET_MASK) >> SELF_ID_ZERO_INITIATED_RESET_SHIFT)
+
+TRACE_EVENT(self_id_sequence,
+ TP_PROTO(unsigned int card_index, const u32 *self_id_sequence, unsigned int quadlet_count, unsigned int generation),
+ TP_ARGS(card_index, self_id_sequence, quadlet_count, generation),
+ TP_STRUCT__entry(
+ __field(u8, card_index)
+ __field(u8, generation)
+ __dynamic_array(u8, port_status, self_id_sequence_get_port_capacity(quadlet_count))
+ __dynamic_array(u32, self_id_sequence, quadlet_count)
+ ),
+ TP_fast_assign(
+ __entry->card_index = card_index;
+ __entry->generation = generation;
+ {
+ u8 *port_status = __get_dynamic_array(port_status);
+ unsigned int port_index;
+
+ for (port_index = 0; port_index < __get_dynamic_array_len(port_status); ++port_index) {
+ port_status[port_index] =
+ self_id_sequence_get_port_status(self_id_sequence,
+ quadlet_count, port_index);
+ }
+ }
+ memcpy(__get_dynamic_array(self_id_sequence), self_id_sequence,
+ __get_dynamic_array_len(self_id_sequence));
+ ),
+ TP_printk(
+ "card_index=%u generation=%u phy_id=0x%02x link_active=%s gap_count=%u scode=%u contender=%s power_class=%u initiated_reset=%s port_status=%s self_id_sequence=%s",
+ __entry->card_index,
+ __entry->generation,
+ PHY_PACKET_SELF_ID_GET_PHY_ID(__get_dynamic_array(self_id_sequence)),
+ PHY_PACKET_SELF_ID_GET_LINK_ACTIVE(__get_dynamic_array(self_id_sequence)) ? "true" : "false",
+ PHY_PACKET_SELF_ID_GET_GAP_COUNT(__get_dynamic_array(self_id_sequence)),
+ PHY_PACKET_SELF_ID_GET_SCODE(__get_dynamic_array(self_id_sequence)),
+ PHY_PACKET_SELF_ID_GET_CONTENDER(__get_dynamic_array(self_id_sequence)) ? "true" : "false",
+ PHY_PACKET_SELF_ID_GET_POWER_CLASS(__get_dynamic_array(self_id_sequence)),
+ PHY_PACKET_SELF_ID_GET_INITIATED_RESET(__get_dynamic_array(self_id_sequence)) ? "true" : "false",
+ __print_array(__get_dynamic_array(port_status), __get_dynamic_array_len(port_status), 1),
+ __print_array(__get_dynamic_array(self_id_sequence),
+ __get_dynamic_array_len(self_id_sequence) / QUADLET_SIZE, QUADLET_SIZE)
+ )
+);
+
+#undef PHY_PACKET_SELF_ID_GET_PHY_ID
+#undef PHY_PACKET_SELF_ID_GET_LINK_ACTIVE
+#undef PHY_PACKET_SELF_ID_GET_GAP_COUNT
+#undef PHY_PACKET_SELF_ID_GET_SCODE
+#undef PHY_PACKET_SELF_ID_GET_CONTENDER
+#undef PHY_PACKET_SELF_ID_GET_POWER_CLASS
+#undef PHY_PACKET_SELF_ID_GET_INITIATED_RESET
+
+TRACE_EVENT_CONDITION(isoc_outbound_allocate,
+ TP_PROTO(const struct fw_iso_context *ctx, unsigned int channel, unsigned int scode),
+ TP_ARGS(ctx, channel, scode),
+ TP_CONDITION(ctx->type == FW_ISO_CONTEXT_TRANSMIT),
+ TP_STRUCT__entry(
+ __field(u64, context)
+ __field(u8, card_index)
+ __field(u8, channel)
+ __field(u8, scode)
+ ),
+ TP_fast_assign(
+ __entry->context = (uintptr_t)ctx;
+ __entry->card_index = ctx->card->index;
+ __entry->channel = channel;
+ __entry->scode = scode;
+ ),
+ TP_printk(
+ "context=0x%llx card_index=%u channel=%u scode=%u",
+ __entry->context,
+ __entry->card_index,
+ __entry->channel,
+ __entry->scode
+ )
+);
+
+TRACE_EVENT_CONDITION(isoc_inbound_single_allocate,
+ TP_PROTO(const struct fw_iso_context *ctx, unsigned int channel, unsigned int header_size),
+ TP_ARGS(ctx, channel, header_size),
+ TP_CONDITION(ctx->type == FW_ISO_CONTEXT_RECEIVE),
+ TP_STRUCT__entry(
+ __field(u64, context)
+ __field(u8, card_index)
+ __field(u8, channel)
+ __field(u8, header_size)
+ ),
+ TP_fast_assign(
+ __entry->context = (uintptr_t)ctx;
+ __entry->card_index = ctx->card->index;
+ __entry->channel = channel;
+ __entry->header_size = header_size;
+ ),
+ TP_printk(
+ "context=0x%llx card_index=%u channel=%u header_size=%u",
+ __entry->context,
+ __entry->card_index,
+ __entry->channel,
+ __entry->header_size
+ )
+);
+
+TRACE_EVENT_CONDITION(isoc_inbound_multiple_allocate,
+ TP_PROTO(const struct fw_iso_context *ctx),
+ TP_ARGS(ctx),
+ TP_CONDITION(ctx->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL),
+ TP_STRUCT__entry(
+ __field(u64, context)
+ __field(u8, card_index)
+ ),
+ TP_fast_assign(
+ __entry->context = (uintptr_t)ctx;
+ __entry->card_index = ctx->card->index;
+ ),
+ TP_printk(
+ "context=0x%llx card_index=%u",
+ __entry->context,
+ __entry->card_index
+ )
+);
+
+DECLARE_EVENT_CLASS(isoc_destroy_template,
+ TP_PROTO(const struct fw_iso_context *ctx),
+ TP_ARGS(ctx),
+ TP_STRUCT__entry(
+ __field(u64, context)
+ __field(u8, card_index)
+ ),
+ TP_fast_assign(
+ __entry->context = (uintptr_t)ctx;
+ __entry->card_index = ctx->card->index;
+ ),
+ TP_printk(
+ "context=0x%llx card_index=%u",
+ __entry->context,
+ __entry->card_index
+ )
+)
+
+DEFINE_EVENT_CONDITION(isoc_destroy_template, isoc_outbound_destroy,
+ TP_PROTO(const struct fw_iso_context *ctx),
+ TP_ARGS(ctx),
+ TP_CONDITION(ctx->type == FW_ISO_CONTEXT_TRANSMIT)
+);
+
+DEFINE_EVENT_CONDITION(isoc_destroy_template, isoc_inbound_single_destroy,
+ TP_PROTO(const struct fw_iso_context *ctx),
+ TP_ARGS(ctx),
+ TP_CONDITION(ctx->type == FW_ISO_CONTEXT_RECEIVE)
+);
+
+DEFINE_EVENT_CONDITION(isoc_destroy_template, isoc_inbound_multiple_destroy,
+ TP_PROTO(const struct fw_iso_context *ctx),
+ TP_ARGS(ctx),
+ TP_CONDITION(ctx->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL)
+);
+
+TRACE_EVENT(isoc_inbound_multiple_channels,
+ TP_PROTO(const struct fw_iso_context *ctx, u64 channels),
+ TP_ARGS(ctx, channels),
+ TP_STRUCT__entry(
+ __field(u64, context)
+ __field(u8, card_index)
+ __field(u64, channels)
+ ),
+ TP_fast_assign(
+ __entry->context = (uintptr_t)ctx;
+ __entry->card_index = ctx->card->index;
+ __entry->channels = channels;
+ ),
+ TP_printk(
+ "context=0x%llx card_index=%u channels=0x%016llx",
+ __entry->context,
+ __entry->card_index,
+ __entry->channels
+ )
+);
+
+TRACE_EVENT_CONDITION(isoc_outbound_start,
+ TP_PROTO(const struct fw_iso_context *ctx, int cycle_match),
+ TP_ARGS(ctx, cycle_match),
+ TP_CONDITION(ctx->type == FW_ISO_CONTEXT_TRANSMIT),
+ TP_STRUCT__entry(
+ __field(u64, context)
+ __field(u8, card_index)
+ __field(bool, cycle_match)
+ __field(u16, cycle)
+ ),
+ TP_fast_assign(
+ __entry->context = (uintptr_t)ctx;
+ __entry->card_index = ctx->card->index;
+ __entry->cycle_match = cycle_match < 0 ? false : true;
+ __entry->cycle = __entry->cycle_match ? (u16)cycle_match : 0;
+ ),
+ TP_printk(
+ "context=0x%llx card_index=%u cycle_match=%s cycle=0x%04x",
+ __entry->context,
+ __entry->card_index,
+ __entry->cycle_match ? "true" : "false",
+ __entry->cycle
+ )
+);
+
+DECLARE_EVENT_CLASS(isoc_inbound_start_template,
+ TP_PROTO(const struct fw_iso_context *ctx, int cycle_match, unsigned int sync, unsigned int tags),
+ TP_ARGS(ctx, cycle_match, sync, tags),
+ TP_STRUCT__entry(
+ __field(u64, context)
+ __field(u8, card_index)
+ __field(bool, cycle_match)
+ __field(u16, cycle)
+ __field(u8, sync)
+ __field(u8, tags)
+ ),
+ TP_fast_assign(
+ __entry->context = (uintptr_t)ctx;
+ __entry->card_index = ctx->card->index;
+ __entry->cycle_match = cycle_match < 0 ? false : true;
+ __entry->cycle = __entry->cycle_match ? (u16)cycle_match : 0;
+ __entry->sync = sync;
+ __entry->tags = tags;
+ ),
+ TP_printk(
+ "context=0x%llx card_index=%u cycle_match=%s cycle=0x%04x sync=%u tags=%s",
+ __entry->context,
+ __entry->card_index,
+ __entry->cycle_match ? "true" : "false",
+ __entry->cycle,
+ __entry->sync,
+ __print_flags(__entry->tags, "|",
+ { FW_ISO_CONTEXT_MATCH_TAG0, "0" },
+ { FW_ISO_CONTEXT_MATCH_TAG1, "1" },
+ { FW_ISO_CONTEXT_MATCH_TAG2, "2" },
+ { FW_ISO_CONTEXT_MATCH_TAG3, "3" }
+ )
+ )
+);
+
+DEFINE_EVENT_CONDITION(isoc_inbound_start_template, isoc_inbound_single_start,
+ TP_PROTO(const struct fw_iso_context *ctx, int cycle_match, unsigned int sync, unsigned int tags),
+ TP_ARGS(ctx, cycle_match, sync, tags),
+ TP_CONDITION(ctx->type == FW_ISO_CONTEXT_RECEIVE)
+);
+
+DEFINE_EVENT_CONDITION(isoc_inbound_start_template, isoc_inbound_multiple_start,
+ TP_PROTO(const struct fw_iso_context *ctx, int cycle_match, unsigned int sync, unsigned int tags),
+ TP_ARGS(ctx, cycle_match, sync, tags),
+ TP_CONDITION(ctx->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL)
+);
+
+DECLARE_EVENT_CLASS(isoc_stop_template,
+ TP_PROTO(const struct fw_iso_context *ctx),
+ TP_ARGS(ctx),
+ TP_STRUCT__entry(
+ __field(u64, context)
+ __field(u8, card_index)
+ ),
+ TP_fast_assign(
+ __entry->context = (uintptr_t)ctx;
+ __entry->card_index = ctx->card->index;
+ ),
+ TP_printk(
+ "context=0x%llx card_index=%u",
+ __entry->context,
+ __entry->card_index
+ )
+)
+
+DEFINE_EVENT_CONDITION(isoc_stop_template, isoc_outbound_stop,
+ TP_PROTO(const struct fw_iso_context *ctx),
+ TP_ARGS(ctx),
+ TP_CONDITION(ctx->type == FW_ISO_CONTEXT_TRANSMIT)
+);
+
+DEFINE_EVENT_CONDITION(isoc_stop_template, isoc_inbound_single_stop,
+ TP_PROTO(const struct fw_iso_context *ctx),
+ TP_ARGS(ctx),
+ TP_CONDITION(ctx->type == FW_ISO_CONTEXT_RECEIVE)
+);
+
+DEFINE_EVENT_CONDITION(isoc_stop_template, isoc_inbound_multiple_stop,
+ TP_PROTO(const struct fw_iso_context *ctx),
+ TP_ARGS(ctx),
+ TP_CONDITION(ctx->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL)
+);
+
+DECLARE_EVENT_CLASS(isoc_flush_template,
+ TP_PROTO(const struct fw_iso_context *ctx),
+ TP_ARGS(ctx),
+ TP_STRUCT__entry(
+ __field(u64, context)
+ __field(u8, card_index)
+ ),
+ TP_fast_assign(
+ __entry->context = (uintptr_t)ctx;
+ __entry->card_index = ctx->card->index;
+ ),
+ TP_printk(
+ "context=0x%llx card_index=%u",
+ __entry->context,
+ __entry->card_index
+ )
+);
+
+DEFINE_EVENT_CONDITION(isoc_flush_template, isoc_outbound_flush,
+ TP_PROTO(const struct fw_iso_context *ctx),
+ TP_ARGS(ctx),
+ TP_CONDITION(ctx->type == FW_ISO_CONTEXT_TRANSMIT)
+);
+
+DEFINE_EVENT_CONDITION(isoc_flush_template, isoc_inbound_single_flush,
+ TP_PROTO(const struct fw_iso_context *ctx),
+ TP_ARGS(ctx),
+ TP_CONDITION(ctx->type == FW_ISO_CONTEXT_RECEIVE)
+);
+
+DEFINE_EVENT_CONDITION(isoc_flush_template, isoc_inbound_multiple_flush,
+ TP_PROTO(const struct fw_iso_context *ctx),
+ TP_ARGS(ctx),
+ TP_CONDITION(ctx->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL)
+);
+
+DECLARE_EVENT_CLASS(isoc_flush_completions_template,
+ TP_PROTO(const struct fw_iso_context *ctx),
+ TP_ARGS(ctx),
+ TP_STRUCT__entry(
+ __field(u64, context)
+ __field(u8, card_index)
+ ),
+ TP_fast_assign(
+ __entry->context = (uintptr_t)ctx;
+ __entry->card_index = ctx->card->index;
+ ),
+ TP_printk(
+ "context=0x%llx card_index=%u",
+ __entry->context,
+ __entry->card_index
+ )
+);
+
+DEFINE_EVENT_CONDITION(isoc_flush_completions_template, isoc_outbound_flush_completions,
+ TP_PROTO(const struct fw_iso_context *ctx),
+ TP_ARGS(ctx),
+ TP_CONDITION(ctx->type == FW_ISO_CONTEXT_TRANSMIT)
+);
+
+DEFINE_EVENT_CONDITION(isoc_flush_completions_template, isoc_inbound_single_flush_completions,
+ TP_PROTO(const struct fw_iso_context *ctx),
+ TP_ARGS(ctx),
+ TP_CONDITION(ctx->type == FW_ISO_CONTEXT_RECEIVE)
+);
+
+DEFINE_EVENT_CONDITION(isoc_flush_completions_template, isoc_inbound_multiple_flush_completions,
+ TP_PROTO(const struct fw_iso_context *ctx),
+ TP_ARGS(ctx),
+ TP_CONDITION(ctx->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL)
+);
+
+#define TP_STRUCT__entry_iso_packet(ctx, buffer_offset, packet) \
+ TP_STRUCT__entry( \
+ __field(u64, context) \
+ __field(u8, card_index) \
+ __field(u32, buffer_offset) \
+ __field(bool, interrupt) \
+ __field(bool, skip) \
+ __field(u8, sy) \
+ __field(u8, tag) \
+ __dynamic_array(u32, header, packet->header_length / QUADLET_SIZE) \
+ )
+
+#define TP_fast_assign_iso_packet(ctx, buffer_offset, packet) \
+ TP_fast_assign( \
+ __entry->context = (uintptr_t)ctx; \
+ __entry->card_index = ctx->card->index; \
+ __entry->buffer_offset = buffer_offset; \
+ __entry->interrupt = packet->interrupt; \
+ __entry->skip = packet->skip; \
+ __entry->sy = packet->sy; \
+ __entry->tag = packet->tag; \
+ memcpy(__get_dynamic_array(header), packet->header, \
+ __get_dynamic_array_len(header)); \
+ )
+
+TRACE_EVENT_CONDITION(isoc_outbound_queue,
+ TP_PROTO(const struct fw_iso_context *ctx, unsigned long buffer_offset, const struct fw_iso_packet *packet),
+ TP_ARGS(ctx, buffer_offset, packet),
+ TP_CONDITION(ctx->type == FW_ISO_CONTEXT_TRANSMIT),
+ TP_STRUCT__entry_iso_packet(ctx, buffer_offset, packet),
+ TP_fast_assign_iso_packet(ctx, buffer_offset, packet),
+ TP_printk(
+ "context=0x%llx card_index=%u buffer_offset=0x%x interrupt=%s skip=%s sy=%d tag=%u header=%s",
+ __entry->context,
+ __entry->card_index,
+ __entry->buffer_offset,
+ __entry->interrupt ? "true" : "false",
+ __entry->skip ? "true" : "false",
+ __entry->sy,
+ __entry->tag,
+ __print_array(__get_dynamic_array(header),
+ __get_dynamic_array_len(header) / QUADLET_SIZE, QUADLET_SIZE)
+ )
+);
+
+TRACE_EVENT_CONDITION(isoc_inbound_single_queue,
+ TP_PROTO(const struct fw_iso_context *ctx, unsigned long buffer_offset, const struct fw_iso_packet *packet),
+ TP_ARGS(ctx, buffer_offset, packet),
+ TP_CONDITION(ctx->type == FW_ISO_CONTEXT_RECEIVE),
+ TP_STRUCT__entry_iso_packet(ctx, buffer_offset, packet),
+ TP_fast_assign_iso_packet(ctx, buffer_offset, packet),
+ TP_printk(
+ "context=0x%llx card_index=%u buffer_offset=0x%x interrupt=%s skip=%s",
+ __entry->context,
+ __entry->card_index,
+ __entry->buffer_offset,
+ __entry->interrupt ? "true" : "false",
+ __entry->skip ? "true" : "false"
+ )
+);
+
+TRACE_EVENT_CONDITION(isoc_inbound_multiple_queue,
+ TP_PROTO(const struct fw_iso_context *ctx, unsigned long buffer_offset, const struct fw_iso_packet *packet),
+ TP_ARGS(ctx, buffer_offset, packet),
+ TP_CONDITION(ctx->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL),
+ TP_STRUCT__entry_iso_packet(ctx, buffer_offset, packet),
+ TP_fast_assign_iso_packet(ctx, buffer_offset, packet),
+ TP_printk(
+ "context=0x%llx card_index=%u buffer_offset=0x%x interrupt=%s",
+ __entry->context,
+ __entry->card_index,
+ __entry->buffer_offset,
+ __entry->interrupt ? "true" : "false"
+ )
+);
+
+#undef TP_STRUCT__entry_iso_packet
+#undef TP_fast_assign_iso_packet
+
+#ifndef show_cause
+enum fw_iso_context_completions_cause {
+ FW_ISO_CONTEXT_COMPLETIONS_CAUSE_FLUSH = 0,
+ FW_ISO_CONTEXT_COMPLETIONS_CAUSE_INTERRUPT,
+ FW_ISO_CONTEXT_COMPLETIONS_CAUSE_HEADER_OVERFLOW,
+};
+#define show_cause(cause) \
+ __print_symbolic(cause, \
+ { FW_ISO_CONTEXT_COMPLETIONS_CAUSE_FLUSH, "FLUSH" }, \
+ { FW_ISO_CONTEXT_COMPLETIONS_CAUSE_INTERRUPT, "INTERRUPT" }, \
+ { FW_ISO_CONTEXT_COMPLETIONS_CAUSE_HEADER_OVERFLOW, "HEADER_OVERFLOW" } \
+ )
+#endif
+
+DECLARE_EVENT_CLASS(isoc_single_completions_template,
+ TP_PROTO(const struct fw_iso_context *ctx, u16 timestamp, enum fw_iso_context_completions_cause cause, const u32 *header, unsigned int header_length),
+ TP_ARGS(ctx, timestamp, cause, header, header_length),
+ TP_STRUCT__entry(
+ __field(u64, context)
+ __field(u8, card_index)
+ __field(u16, timestamp)
+ __field(u8, cause)
+ __dynamic_array(u32, header, header_length / QUADLET_SIZE)
+ ),
+ TP_fast_assign(
+ __entry->context = (uintptr_t)ctx;
+ __entry->card_index = ctx->card->index;
+ __entry->timestamp = timestamp;
+ __entry->cause = cause;
+ memcpy(__get_dynamic_array(header), header, __get_dynamic_array_len(header));
+ ),
+ TP_printk(
+ "context=0x%llx card_index=%u timestamp=0x%04x cause=%s header=%s",
+ __entry->context,
+ __entry->card_index,
+ __entry->timestamp,
+ show_cause(__entry->cause),
+ __print_array(__get_dynamic_array(header),
+ __get_dynamic_array_len(header) / QUADLET_SIZE, QUADLET_SIZE)
+ )
+)
+
+DEFINE_EVENT_CONDITION(isoc_single_completions_template, isoc_outbound_completions,
+ TP_PROTO(const struct fw_iso_context *ctx, u16 timestamp, enum fw_iso_context_completions_cause cause, const u32 *header, unsigned int header_length),
+ TP_ARGS(ctx, timestamp, cause, header, header_length),
+ TP_CONDITION(ctx->type == FW_ISO_CONTEXT_TRANSMIT)
+);
+
+DEFINE_EVENT_CONDITION(isoc_single_completions_template, isoc_inbound_single_completions,
+ TP_PROTO(const struct fw_iso_context *ctx, u16 timestamp, enum fw_iso_context_completions_cause cause, const u32 *header, unsigned int header_length),
+ TP_ARGS(ctx, timestamp, cause, header, header_length),
+ TP_CONDITION(ctx->type == FW_ISO_CONTEXT_RECEIVE)
+);
+
+TRACE_EVENT(isoc_inbound_multiple_completions,
+ TP_PROTO(const struct fw_iso_context *ctx, unsigned int completed, enum fw_iso_context_completions_cause cause),
+ TP_ARGS(ctx, completed, cause),
+ TP_STRUCT__entry(
+ __field(u64, context)
+ __field(u8, card_index)
+ __field(u16, completed)
+ __field(u8, cause)
+ ),
+ TP_fast_assign(
+ __entry->context = (uintptr_t)ctx;
+ __entry->card_index = ctx->card->index;
+ __entry->completed = completed;
+ __entry->cause = cause;
+ ),
+ TP_printk(
+ "context=0x%llx card_index=%u completed=%u cause=%s",
+ __entry->context,
+ __entry->card_index,
+ __entry->completed,
+ show_cause(__entry->cause)
+ )
+);
+
+#undef QUADLET_SIZE
+
+#endif // _FIREWIRE_TRACE_EVENT_H
+
+#include <trace/define_trace.h>
diff --git a/include/trace/events/firewire_ohci.h b/include/trace/events/firewire_ohci.h
new file mode 100644
index 000000000000..4f9a7f2577f3
--- /dev/null
+++ b/include/trace/events/firewire_ohci.h
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+// Copyright (c) 2024 Takashi Sakamoto
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM firewire_ohci
+
+#if !defined(_FIREWIRE_OHCI_TRACE_EVENT_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _FIREWIRE_OHCI_TRACE_EVENT_H
+
+#include <linux/tracepoint.h>
+
+// Some macros and helper functions are defined in 'drivers/firewire/ohci.c'.
+
+TRACE_EVENT(irqs,
+ TP_PROTO(unsigned int card_index, u32 events),
+ TP_ARGS(card_index, events),
+ TP_STRUCT__entry(
+ __field(u8, card_index)
+ __field(u32, events)
+ ),
+ TP_fast_assign(
+ __entry->card_index = card_index;
+ __entry->events = events;
+ ),
+ TP_printk(
+ "card_index=%u events=%s",
+ __entry->card_index,
+ __print_flags(__entry->events, "|",
+ { OHCI1394_selfIDComplete, "selfIDComplete" },
+ { OHCI1394_RQPkt, "RQPkt" },
+ { OHCI1394_RSPkt, "RSPkt" },
+ { OHCI1394_reqTxComplete, "reqTxComplete" },
+ { OHCI1394_respTxComplete, "respTxComplete" },
+ { OHCI1394_isochRx, "isochRx" },
+ { OHCI1394_isochTx, "isochTx" },
+ { OHCI1394_postedWriteErr, "postedWriteErr" },
+ { OHCI1394_cycleTooLong, "cycleTooLong" },
+ { OHCI1394_cycle64Seconds, "cycle64Seconds" },
+ { OHCI1394_cycleInconsistent, "cycleInconsistent" },
+ { OHCI1394_regAccessFail, "regAccessFail" },
+ { OHCI1394_unrecoverableError, "unrecoverableError" },
+ { OHCI1394_busReset, "busReset" }
+ )
+ )
+);
+
+#define QUADLET_SIZE 4
+
+#define SELF_ID_COUNT_IS_ERROR(reg) \
+ (!!(((reg) & OHCI1394_SelfIDCount_selfIDError_MASK) >> OHCI1394_SelfIDCount_selfIDError_SHIFT))
+
+#define SELF_ID_COUNT_GET_GENERATION(reg) \
+ (((reg) & OHCI1394_SelfIDCount_selfIDGeneration_MASK) >> OHCI1394_SelfIDCount_selfIDGeneration_SHIFT)
+
+#define SELF_ID_RECEIVE_Q0_GET_GENERATION(quadlet) \
+ (((quadlet) & OHCI1394_SELF_ID_RECEIVE_Q0_GENERATION_MASK) >> OHCI1394_SELF_ID_RECEIVE_Q0_GENERATION_SHIFT)
+
+#define SELF_ID_RECEIVE_Q0_GET_TIMESTAMP(quadlet) \
+ (((quadlet) & OHCI1394_SELF_ID_RECEIVE_Q0_TIMESTAMP_MASK) >> OHCI1394_SELF_ID_RECEIVE_Q0_TIMESTAMP_SHIFT)
+
+TRACE_EVENT(self_id_complete,
+ TP_PROTO(unsigned int card_index, u32 reg, const __le32 *self_id_receive, bool has_be_header_quirk),
+ TP_ARGS(card_index, reg, self_id_receive, has_be_header_quirk),
+ TP_STRUCT__entry(
+ __field(u8, card_index)
+ __field(u32, reg)
+ __dynamic_array(u32, self_id_receive, ohci1394_self_id_count_get_size(reg))
+ ),
+ TP_fast_assign(
+ __entry->card_index = card_index;
+ __entry->reg = reg;
+ {
+ u32 *ptr = __get_dynamic_array(self_id_receive);
+ int i;
+
+ for (i = 0; i < __get_dynamic_array_len(self_id_receive) / QUADLET_SIZE; ++i)
+ ptr[i] = cond_le32_to_cpu(self_id_receive[i], has_be_header_quirk);
+ }
+ ),
+ TP_printk(
+ "card_index=%u is_error=%s generation_at_bus_reset=%u generation_at_completion=%u timestamp=0x%04x packet_data=%s",
+ __entry->card_index,
+ SELF_ID_COUNT_IS_ERROR(__entry->reg) ? "true" : "false",
+ SELF_ID_COUNT_GET_GENERATION(__entry->reg),
+ SELF_ID_RECEIVE_Q0_GET_GENERATION(((const u32 *)__get_dynamic_array(self_id_receive))[0]),
+ SELF_ID_RECEIVE_Q0_GET_TIMESTAMP(((const u32 *)__get_dynamic_array(self_id_receive))[0]),
+ __print_array(((const u32 *)__get_dynamic_array(self_id_receive)) + 1,
+ (__get_dynamic_array_len(self_id_receive) / QUADLET_SIZE) - 1, QUADLET_SIZE)
+ )
+);
+
+#undef SELF_ID_COUNT_IS_ERROR
+#undef SELF_ID_COUNT_GET_GENERATION
+#undef SELF_ID_RECEIVE_Q0_GET_GENERATION
+#undef SELF_ID_RECEIVE_Q0_GET_TIMESTAMP
+
+#undef QUADLET_SIZE
+
+#endif // _FIREWIRE_OHCI_TRACE_EVENT_H
+
+#include <trace/define_trace.h>
diff --git a/include/trace/events/fs_dax.h b/include/trace/events/fs_dax.h
index 97b09fcf7e52..50ebc1290ab0 100644
--- a/include/trace/events/fs_dax.h
+++ b/include/trace/events/fs_dax.h
@@ -15,7 +15,7 @@ DECLARE_EVENT_CLASS(dax_pmd_fault_class,
__field(unsigned long, ino)
__field(unsigned long, vm_start)
__field(unsigned long, vm_end)
- __field(unsigned long, vm_flags)
+ __field(vm_flags_t, vm_flags)
__field(unsigned long, address)
__field(pgoff_t, pgoff)
__field(pgoff_t, max_pgoff)
@@ -62,14 +62,14 @@ DEFINE_PMD_FAULT_EVENT(dax_pmd_fault_done);
DECLARE_EVENT_CLASS(dax_pmd_load_hole_class,
TP_PROTO(struct inode *inode, struct vm_fault *vmf,
- struct page *zero_page,
+ struct folio *zero_folio,
void *radix_entry),
- TP_ARGS(inode, vmf, zero_page, radix_entry),
+ TP_ARGS(inode, vmf, zero_folio, radix_entry),
TP_STRUCT__entry(
__field(unsigned long, ino)
- __field(unsigned long, vm_flags)
+ __field(vm_flags_t, vm_flags)
__field(unsigned long, address)
- __field(struct page *, zero_page)
+ __field(struct folio *, zero_folio)
__field(void *, radix_entry)
__field(dev_t, dev)
),
@@ -78,17 +78,17 @@ DECLARE_EVENT_CLASS(dax_pmd_load_hole_class,
__entry->ino = inode->i_ino;
__entry->vm_flags = vmf->vma->vm_flags;
__entry->address = vmf->address;
- __entry->zero_page = zero_page;
+ __entry->zero_folio = zero_folio;
__entry->radix_entry = radix_entry;
),
- TP_printk("dev %d:%d ino %#lx %s address %#lx zero_page %p "
+ TP_printk("dev %d:%d ino %#lx %s address %#lx zero_folio %p "
"radix_entry %#lx",
MAJOR(__entry->dev),
MINOR(__entry->dev),
__entry->ino,
__entry->vm_flags & VM_SHARED ? "shared" : "private",
__entry->address,
- __entry->zero_page,
+ __entry->zero_folio,
(unsigned long)__entry->radix_entry
)
)
@@ -96,66 +96,18 @@ DECLARE_EVENT_CLASS(dax_pmd_load_hole_class,
#define DEFINE_PMD_LOAD_HOLE_EVENT(name) \
DEFINE_EVENT(dax_pmd_load_hole_class, name, \
TP_PROTO(struct inode *inode, struct vm_fault *vmf, \
- struct page *zero_page, void *radix_entry), \
- TP_ARGS(inode, vmf, zero_page, radix_entry))
+ struct folio *zero_folio, void *radix_entry), \
+ TP_ARGS(inode, vmf, zero_folio, radix_entry))
DEFINE_PMD_LOAD_HOLE_EVENT(dax_pmd_load_hole);
DEFINE_PMD_LOAD_HOLE_EVENT(dax_pmd_load_hole_fallback);
-DECLARE_EVENT_CLASS(dax_pmd_insert_mapping_class,
- TP_PROTO(struct inode *inode, struct vm_fault *vmf,
- long length, pfn_t pfn, void *radix_entry),
- TP_ARGS(inode, vmf, length, pfn, radix_entry),
- TP_STRUCT__entry(
- __field(unsigned long, ino)
- __field(unsigned long, vm_flags)
- __field(unsigned long, address)
- __field(long, length)
- __field(u64, pfn_val)
- __field(void *, radix_entry)
- __field(dev_t, dev)
- __field(int, write)
- ),
- TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->ino = inode->i_ino;
- __entry->vm_flags = vmf->vma->vm_flags;
- __entry->address = vmf->address;
- __entry->write = vmf->flags & FAULT_FLAG_WRITE;
- __entry->length = length;
- __entry->pfn_val = pfn.val;
- __entry->radix_entry = radix_entry;
- ),
- TP_printk("dev %d:%d ino %#lx %s %s address %#lx length %#lx "
- "pfn %#llx %s radix_entry %#lx",
- MAJOR(__entry->dev),
- MINOR(__entry->dev),
- __entry->ino,
- __entry->vm_flags & VM_SHARED ? "shared" : "private",
- __entry->write ? "write" : "read",
- __entry->address,
- __entry->length,
- __entry->pfn_val & ~PFN_FLAGS_MASK,
- __print_flags_u64(__entry->pfn_val & PFN_FLAGS_MASK, "|",
- PFN_FLAGS_TRACE),
- (unsigned long)__entry->radix_entry
- )
-)
-
-#define DEFINE_PMD_INSERT_MAPPING_EVENT(name) \
-DEFINE_EVENT(dax_pmd_insert_mapping_class, name, \
- TP_PROTO(struct inode *inode, struct vm_fault *vmf, \
- long length, pfn_t pfn, void *radix_entry), \
- TP_ARGS(inode, vmf, length, pfn, radix_entry))
-
-DEFINE_PMD_INSERT_MAPPING_EVENT(dax_pmd_insert_mapping);
-
DECLARE_EVENT_CLASS(dax_pte_fault_class,
TP_PROTO(struct inode *inode, struct vm_fault *vmf, int result),
TP_ARGS(inode, vmf, result),
TP_STRUCT__entry(
__field(unsigned long, ino)
- __field(unsigned long, vm_flags)
+ __field(vm_flags_t, vm_flags)
__field(unsigned long, address)
__field(pgoff_t, pgoff)
__field(dev_t, dev)
@@ -194,36 +146,6 @@ DEFINE_PTE_FAULT_EVENT(dax_load_hole);
DEFINE_PTE_FAULT_EVENT(dax_insert_pfn_mkwrite_no_entry);
DEFINE_PTE_FAULT_EVENT(dax_insert_pfn_mkwrite);
-TRACE_EVENT(dax_insert_mapping,
- TP_PROTO(struct inode *inode, struct vm_fault *vmf, void *radix_entry),
- TP_ARGS(inode, vmf, radix_entry),
- TP_STRUCT__entry(
- __field(unsigned long, ino)
- __field(unsigned long, vm_flags)
- __field(unsigned long, address)
- __field(void *, radix_entry)
- __field(dev_t, dev)
- __field(int, write)
- ),
- TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->ino = inode->i_ino;
- __entry->vm_flags = vmf->vma->vm_flags;
- __entry->address = vmf->address;
- __entry->write = vmf->flags & FAULT_FLAG_WRITE;
- __entry->radix_entry = radix_entry;
- ),
- TP_printk("dev %d:%d ino %#lx %s %s address %#lx radix_entry %#lx",
- MAJOR(__entry->dev),
- MINOR(__entry->dev),
- __entry->ino,
- __entry->vm_flags & VM_SHARED ? "shared" : "private",
- __entry->write ? "write" : "read",
- __entry->address,
- (unsigned long)__entry->radix_entry
- )
-)
-
DECLARE_EVENT_CLASS(dax_writeback_range_class,
TP_PROTO(struct inode *inode, pgoff_t start_index, pgoff_t end_index),
TP_ARGS(inode, start_index, end_index),
diff --git a/include/trace/events/fscache.h b/include/trace/events/fscache.h
index a6190aa1b406..f1a73aa83fbb 100644
--- a/include/trace/events/fscache.h
+++ b/include/trace/events/fscache.h
@@ -35,12 +35,14 @@ enum fscache_volume_trace {
fscache_volume_get_cookie,
fscache_volume_get_create_work,
fscache_volume_get_hash_collision,
+ fscache_volume_get_withdraw,
fscache_volume_free,
fscache_volume_new_acquire,
fscache_volume_put_cookie,
fscache_volume_put_create_work,
fscache_volume_put_hash_collision,
fscache_volume_put_relinquish,
+ fscache_volume_put_withdraw,
fscache_volume_see_create_work,
fscache_volume_see_hash_wake,
fscache_volume_wait_create_work,
@@ -120,12 +122,14 @@ enum fscache_access_trace {
EM(fscache_volume_get_cookie, "GET cook ") \
EM(fscache_volume_get_create_work, "GET creat") \
EM(fscache_volume_get_hash_collision, "GET hcoll") \
+ EM(fscache_volume_get_withdraw, "GET withd") \
EM(fscache_volume_free, "FREE ") \
EM(fscache_volume_new_acquire, "NEW acq ") \
EM(fscache_volume_put_cookie, "PUT cook ") \
EM(fscache_volume_put_create_work, "PUT creat") \
EM(fscache_volume_put_hash_collision, "PUT hcoll") \
EM(fscache_volume_put_relinquish, "PUT relnq") \
+ EM(fscache_volume_put_withdraw, "PUT withd") \
EM(fscache_volume_see_create_work, "SEE creat") \
EM(fscache_volume_see_hash_wake, "SEE hwake") \
E_(fscache_volume_wait_create_work, "WAIT crea")
diff --git a/include/trace/events/fsi.h b/include/trace/events/fsi.h
index c9a72e8432b8..5ff15126ad9d 100644
--- a/include/trace/events/fsi.h
+++ b/include/trace/events/fsi.h
@@ -122,6 +122,37 @@ TRACE_EVENT(fsi_master_break,
)
);
+TRACE_EVENT(fsi_master_scan,
+ TP_PROTO(const struct fsi_master *master, bool scan),
+ TP_ARGS(master, scan),
+ TP_STRUCT__entry(
+ __field(int, master_idx)
+ __field(int, n_links)
+ __field(bool, scan)
+ ),
+ TP_fast_assign(
+ __entry->master_idx = master->idx;
+ __entry->n_links = master->n_links;
+ __entry->scan = scan;
+ ),
+ TP_printk("fsi%d (%d links) %s", __entry->master_idx, __entry->n_links,
+ __entry->scan ? "scan" : "unscan")
+);
+
+TRACE_EVENT(fsi_master_unregister,
+ TP_PROTO(const struct fsi_master *master),
+ TP_ARGS(master),
+ TP_STRUCT__entry(
+ __field(int, master_idx)
+ __field(int, n_links)
+ ),
+ TP_fast_assign(
+ __entry->master_idx = master->idx;
+ __entry->n_links = master->n_links;
+ ),
+ TP_printk("fsi%d (%d links)", __entry->master_idx, __entry->n_links)
+);
+
TRACE_EVENT(fsi_slave_init,
TP_PROTO(const struct fsi_slave *slave),
TP_ARGS(slave),
diff --git a/include/trace/events/fsi_master_i2cr.h b/include/trace/events/fsi_master_i2cr.h
new file mode 100644
index 000000000000..c33eba130049
--- /dev/null
+++ b/include/trace/events/fsi_master_i2cr.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM fsi_master_i2cr
+
+#if !defined(_TRACE_FSI_MASTER_I2CR_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_FSI_MASTER_I2CR_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(i2cr_i2c_error,
+ TP_PROTO(const struct i2c_client *client, uint32_t command, int rc),
+ TP_ARGS(client, command, rc),
+ TP_STRUCT__entry(
+ __field(int, bus)
+ __field(int, rc)
+ __array(unsigned char, command, sizeof(uint32_t))
+ __field(unsigned short, addr)
+ ),
+ TP_fast_assign(
+ __entry->bus = client->adapter->nr;
+ __entry->rc = rc;
+ memcpy(__entry->command, &command, sizeof(uint32_t));
+ __entry->addr = client->addr;
+ ),
+ TP_printk("%d-%02x command:{ %*ph } rc:%d", __entry->bus, __entry->addr,
+ (int)sizeof(uint32_t), __entry->command, __entry->rc)
+);
+
+TRACE_EVENT(i2cr_read,
+ TP_PROTO(const struct i2c_client *client, uint32_t command, uint64_t *data),
+ TP_ARGS(client, command, data),
+ TP_STRUCT__entry(
+ __field(int, bus)
+ __array(unsigned char, data, sizeof(uint64_t))
+ __array(unsigned char, command, sizeof(uint32_t))
+ __field(unsigned short, addr)
+ ),
+ TP_fast_assign(
+ __entry->bus = client->adapter->nr;
+ memcpy(__entry->data, data, sizeof(uint64_t));
+ memcpy(__entry->command, &command, sizeof(uint32_t));
+ __entry->addr = client->addr;
+ ),
+ TP_printk("%d-%02x command:{ %*ph } { %*ph }", __entry->bus, __entry->addr,
+ (int)sizeof(uint32_t), __entry->command, (int)sizeof(uint64_t), __entry->data)
+);
+
+TRACE_EVENT(i2cr_status,
+ TP_PROTO(const struct i2c_client *client, uint64_t status),
+ TP_ARGS(client, status),
+ TP_STRUCT__entry(
+ __field(uint64_t, status)
+ __field(int, bus)
+ __field(unsigned short, addr)
+ ),
+ TP_fast_assign(
+ __entry->status = status;
+ __entry->bus = client->adapter->nr;
+ __entry->addr = client->addr;
+ ),
+ TP_printk("%d-%02x %016llx", __entry->bus, __entry->addr, __entry->status)
+);
+
+TRACE_EVENT(i2cr_status_error,
+ TP_PROTO(const struct i2c_client *client, uint64_t status, uint64_t error, uint64_t log),
+ TP_ARGS(client, status, error, log),
+ TP_STRUCT__entry(
+ __field(uint64_t, error)
+ __field(uint64_t, log)
+ __field(uint64_t, status)
+ __field(int, bus)
+ __field(unsigned short, addr)
+ ),
+ TP_fast_assign(
+ __entry->error = error;
+ __entry->log = log;
+ __entry->status = status;
+ __entry->bus = client->adapter->nr;
+ __entry->addr = client->addr;
+ ),
+ TP_printk("%d-%02x status:%016llx error:%016llx log:%016llx", __entry->bus, __entry->addr,
+ __entry->status, __entry->error, __entry->log)
+);
+
+TRACE_EVENT(i2cr_write,
+ TP_PROTO(const struct i2c_client *client, uint32_t command, uint64_t data),
+ TP_ARGS(client, command, data),
+ TP_STRUCT__entry(
+ __field(int, bus)
+ __array(unsigned char, data, sizeof(uint64_t))
+ __array(unsigned char, command, sizeof(uint32_t))
+ __field(unsigned short, addr)
+ ),
+ TP_fast_assign(
+ __entry->bus = client->adapter->nr;
+ memcpy(__entry->data, &data, sizeof(uint64_t));
+ memcpy(__entry->command, &command, sizeof(uint32_t));
+ __entry->addr = client->addr;
+ ),
+ TP_printk("%d-%02x command:{ %*ph } { %*ph }", __entry->bus, __entry->addr,
+ (int)sizeof(uint32_t), __entry->command, (int)sizeof(uint64_t), __entry->data)
+);
+
+#endif
+
+#include <trace/define_trace.h>
diff --git a/include/trace/events/habanalabs.h b/include/trace/events/habanalabs.h
index 951643e6a7a9..fa0d2c6bace4 100644
--- a/include/trace/events/habanalabs.h
+++ b/include/trace/events/habanalabs.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0
*
- * Copyright 2016-2021 HabanaLabs, Ltd.
+ * Copyright 2022-2023 HabanaLabs, Ltd.
* All Rights Reserved.
*
*/
@@ -27,7 +27,7 @@ DECLARE_EVENT_CLASS(habanalabs_mmu_template,
),
TP_fast_assign(
- __assign_str(dname, dev_name(dev));
+ __assign_str(dname);
__entry->virt_addr = virt_addr;
__entry->phys_addr = phys_addr;
__entry->page_size = page_size;
@@ -64,7 +64,7 @@ DECLARE_EVENT_CLASS(habanalabs_dma_alloc_template,
),
TP_fast_assign(
- __assign_str(dname, dev_name(dev));
+ __assign_str(dname);
__entry->cpu_addr = cpu_addr;
__entry->dma_addr = dma_addr;
__entry->size = size;
@@ -87,6 +87,49 @@ DEFINE_EVENT(habanalabs_dma_alloc_template, habanalabs_dma_free,
TP_PROTO(struct device *dev, u64 cpu_addr, u64 dma_addr, size_t size, const char *caller),
TP_ARGS(dev, cpu_addr, dma_addr, size, caller));
+DECLARE_EVENT_CLASS(habanalabs_dma_map_template,
+ TP_PROTO(struct device *dev, u64 phys_addr, u64 dma_addr, size_t len,
+ enum dma_data_direction dir, const char *caller),
+
+ TP_ARGS(dev, phys_addr, dma_addr, len, dir, caller),
+
+ TP_STRUCT__entry(
+ __string(dname, dev_name(dev))
+ __field(u64, phys_addr)
+ __field(u64, dma_addr)
+ __field(u32, len)
+ __field(int, dir)
+ __field(const char *, caller)
+ ),
+
+ TP_fast_assign(
+ __assign_str(dname);
+ __entry->phys_addr = phys_addr;
+ __entry->dma_addr = dma_addr;
+ __entry->len = len;
+ __entry->dir = dir;
+ __entry->caller = caller;
+ ),
+
+ TP_printk("%s: phys_addr: %#llx, dma_addr: %#llx, len: %#x, dir: %d, caller: %s",
+ __get_str(dname),
+ __entry->phys_addr,
+ __entry->dma_addr,
+ __entry->len,
+ __entry->dir,
+ __entry->caller)
+);
+
+DEFINE_EVENT(habanalabs_dma_map_template, habanalabs_dma_map_page,
+ TP_PROTO(struct device *dev, u64 phys_addr, u64 dma_addr, size_t len,
+ enum dma_data_direction dir, const char *caller),
+ TP_ARGS(dev, phys_addr, dma_addr, len, dir, caller));
+
+DEFINE_EVENT(habanalabs_dma_map_template, habanalabs_dma_unmap_page,
+ TP_PROTO(struct device *dev, u64 phys_addr, u64 dma_addr, size_t len,
+ enum dma_data_direction dir, const char *caller),
+ TP_ARGS(dev, phys_addr, dma_addr, len, dir, caller));
+
DECLARE_EVENT_CLASS(habanalabs_comms_template,
TP_PROTO(struct device *dev, char *op_str),
@@ -98,11 +141,11 @@ DECLARE_EVENT_CLASS(habanalabs_comms_template,
),
TP_fast_assign(
- __assign_str(dname, dev_name(dev));
+ __assign_str(dname);
__entry->op_str = op_str;
),
- TP_printk("%s: cms: %s",
+ TP_printk("%s: cmd: %s",
__get_str(dname),
__entry->op_str)
);
@@ -135,7 +178,7 @@ DECLARE_EVENT_CLASS(habanalabs_reg_access_template,
),
TP_fast_assign(
- __assign_str(dname, dev_name(dev));
+ __assign_str(dname);
__entry->addr = addr;
__entry->val = val;
),
diff --git a/include/trace/events/handshake.h b/include/trace/events/handshake.h
new file mode 100644
index 000000000000..bdd8a03cf5ba
--- /dev/null
+++ b/include/trace/events/handshake.h
@@ -0,0 +1,319 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM handshake
+
+#if !defined(_TRACE_HANDSHAKE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HANDSHAKE_H
+
+#include <linux/net.h>
+#include <net/tls_prot.h>
+#include <linux/tracepoint.h>
+#include <trace/events/net_probe_common.h>
+
+#define TLS_RECORD_TYPE_LIST \
+ record_type(CHANGE_CIPHER_SPEC) \
+ record_type(ALERT) \
+ record_type(HANDSHAKE) \
+ record_type(DATA) \
+ record_type(HEARTBEAT) \
+ record_type(TLS12_CID) \
+ record_type_end(ACK)
+
+#undef record_type
+#undef record_type_end
+#define record_type(x) TRACE_DEFINE_ENUM(TLS_RECORD_TYPE_##x);
+#define record_type_end(x) TRACE_DEFINE_ENUM(TLS_RECORD_TYPE_##x);
+
+TLS_RECORD_TYPE_LIST
+
+#undef record_type
+#undef record_type_end
+#define record_type(x) { TLS_RECORD_TYPE_##x, #x },
+#define record_type_end(x) { TLS_RECORD_TYPE_##x, #x }
+
+#define show_tls_content_type(type) \
+ __print_symbolic(type, TLS_RECORD_TYPE_LIST)
+
+TRACE_DEFINE_ENUM(TLS_ALERT_LEVEL_WARNING);
+TRACE_DEFINE_ENUM(TLS_ALERT_LEVEL_FATAL);
+
+#define show_tls_alert_level(level) \
+ __print_symbolic(level, \
+ { TLS_ALERT_LEVEL_WARNING, "Warning" }, \
+ { TLS_ALERT_LEVEL_FATAL, "Fatal" })
+
+#define TLS_ALERT_DESCRIPTION_LIST \
+ alert_description(CLOSE_NOTIFY) \
+ alert_description(UNEXPECTED_MESSAGE) \
+ alert_description(BAD_RECORD_MAC) \
+ alert_description(RECORD_OVERFLOW) \
+ alert_description(HANDSHAKE_FAILURE) \
+ alert_description(BAD_CERTIFICATE) \
+ alert_description(UNSUPPORTED_CERTIFICATE) \
+ alert_description(CERTIFICATE_REVOKED) \
+ alert_description(CERTIFICATE_EXPIRED) \
+ alert_description(CERTIFICATE_UNKNOWN) \
+ alert_description(ILLEGAL_PARAMETER) \
+ alert_description(UNKNOWN_CA) \
+ alert_description(ACCESS_DENIED) \
+ alert_description(DECODE_ERROR) \
+ alert_description(DECRYPT_ERROR) \
+ alert_description(TOO_MANY_CIDS_REQUESTED) \
+ alert_description(PROTOCOL_VERSION) \
+ alert_description(INSUFFICIENT_SECURITY) \
+ alert_description(INTERNAL_ERROR) \
+ alert_description(INAPPROPRIATE_FALLBACK) \
+ alert_description(USER_CANCELED) \
+ alert_description(MISSING_EXTENSION) \
+ alert_description(UNSUPPORTED_EXTENSION) \
+ alert_description(UNRECOGNIZED_NAME) \
+ alert_description(BAD_CERTIFICATE_STATUS_RESPONSE) \
+ alert_description(UNKNOWN_PSK_IDENTITY) \
+ alert_description(CERTIFICATE_REQUIRED) \
+ alert_description_end(NO_APPLICATION_PROTOCOL)
+
+#undef alert_description
+#undef alert_description_end
+#define alert_description(x) TRACE_DEFINE_ENUM(TLS_ALERT_DESC_##x);
+#define alert_description_end(x) TRACE_DEFINE_ENUM(TLS_ALERT_DESC_##x);
+
+TLS_ALERT_DESCRIPTION_LIST
+
+#undef alert_description
+#undef alert_description_end
+#define alert_description(x) { TLS_ALERT_DESC_##x, #x },
+#define alert_description_end(x) { TLS_ALERT_DESC_##x, #x }
+
+#define show_tls_alert_description(desc) \
+ __print_symbolic(desc, TLS_ALERT_DESCRIPTION_LIST)
+
+DECLARE_EVENT_CLASS(handshake_event_class,
+ TP_PROTO(
+ const struct net *net,
+ const struct handshake_req *req,
+ const struct sock *sk
+ ),
+ TP_ARGS(net, req, sk),
+ TP_STRUCT__entry(
+ __field(const void *, req)
+ __field(const void *, sk)
+ __field(unsigned int, netns_ino)
+ ),
+ TP_fast_assign(
+ __entry->req = req;
+ __entry->sk = sk;
+ __entry->netns_ino = net->ns.inum;
+ ),
+ TP_printk("req=%p sk=%p",
+ __entry->req, __entry->sk
+ )
+);
+#define DEFINE_HANDSHAKE_EVENT(name) \
+ DEFINE_EVENT(handshake_event_class, name, \
+ TP_PROTO( \
+ const struct net *net, \
+ const struct handshake_req *req, \
+ const struct sock *sk \
+ ), \
+ TP_ARGS(net, req, sk))
+
+DECLARE_EVENT_CLASS(handshake_fd_class,
+ TP_PROTO(
+ const struct net *net,
+ const struct handshake_req *req,
+ const struct sock *sk,
+ int fd
+ ),
+ TP_ARGS(net, req, sk, fd),
+ TP_STRUCT__entry(
+ __field(const void *, req)
+ __field(const void *, sk)
+ __field(int, fd)
+ __field(unsigned int, netns_ino)
+ ),
+ TP_fast_assign(
+ __entry->req = req;
+ __entry->sk = req->hr_sk;
+ __entry->fd = fd;
+ __entry->netns_ino = net->ns.inum;
+ ),
+ TP_printk("req=%p sk=%p fd=%d",
+ __entry->req, __entry->sk, __entry->fd
+ )
+);
+#define DEFINE_HANDSHAKE_FD_EVENT(name) \
+ DEFINE_EVENT(handshake_fd_class, name, \
+ TP_PROTO( \
+ const struct net *net, \
+ const struct handshake_req *req, \
+ const struct sock *sk, \
+ int fd \
+ ), \
+ TP_ARGS(net, req, sk, fd))
+
+DECLARE_EVENT_CLASS(handshake_error_class,
+ TP_PROTO(
+ const struct net *net,
+ const struct handshake_req *req,
+ const struct sock *sk,
+ int err
+ ),
+ TP_ARGS(net, req, sk, err),
+ TP_STRUCT__entry(
+ __field(const void *, req)
+ __field(const void *, sk)
+ __field(int, err)
+ __field(unsigned int, netns_ino)
+ ),
+ TP_fast_assign(
+ __entry->req = req;
+ __entry->sk = sk;
+ __entry->err = err;
+ __entry->netns_ino = net->ns.inum;
+ ),
+ TP_printk("req=%p sk=%p err=%d",
+ __entry->req, __entry->sk, __entry->err
+ )
+);
+#define DEFINE_HANDSHAKE_ERROR(name) \
+ DEFINE_EVENT(handshake_error_class, name, \
+ TP_PROTO( \
+ const struct net *net, \
+ const struct handshake_req *req, \
+ const struct sock *sk, \
+ int err \
+ ), \
+ TP_ARGS(net, req, sk, err))
+
+DECLARE_EVENT_CLASS(handshake_alert_class,
+ TP_PROTO(
+ const struct sock *sk,
+ unsigned char level,
+ unsigned char description
+ ),
+ TP_ARGS(sk, level, description),
+ TP_STRUCT__entry(
+ /* sockaddr_in6 is always bigger than sockaddr_in */
+ __array(__u8, saddr, sizeof(struct sockaddr_in6))
+ __array(__u8, daddr, sizeof(struct sockaddr_in6))
+ __field(unsigned int, netns_ino)
+ __field(unsigned long, level)
+ __field(unsigned long, description)
+ ),
+ TP_fast_assign(
+ const struct inet_sock *inet = inet_sk(sk);
+
+ memset(__entry->saddr, 0, sizeof(struct sockaddr_in6));
+ memset(__entry->daddr, 0, sizeof(struct sockaddr_in6));
+ TP_STORE_ADDR_PORTS(__entry, inet, sk);
+
+ __entry->netns_ino = sock_net(sk)->ns.inum;
+ __entry->level = level;
+ __entry->description = description;
+ ),
+ TP_printk("src=%pISpc dest=%pISpc %s: %s",
+ __entry->saddr, __entry->daddr,
+ show_tls_alert_level(__entry->level),
+ show_tls_alert_description(__entry->description)
+ )
+);
+#define DEFINE_HANDSHAKE_ALERT(name) \
+ DEFINE_EVENT(handshake_alert_class, name, \
+ TP_PROTO( \
+ const struct sock *sk, \
+ unsigned char level, \
+ unsigned char description \
+ ), \
+ TP_ARGS(sk, level, description))
+
+
+/*
+ * Request lifetime events
+ */
+
+DEFINE_HANDSHAKE_EVENT(handshake_submit);
+DEFINE_HANDSHAKE_ERROR(handshake_submit_err);
+DEFINE_HANDSHAKE_EVENT(handshake_cancel);
+DEFINE_HANDSHAKE_EVENT(handshake_cancel_none);
+DEFINE_HANDSHAKE_EVENT(handshake_cancel_busy);
+DEFINE_HANDSHAKE_EVENT(handshake_destruct);
+
+
+TRACE_EVENT(handshake_complete,
+ TP_PROTO(
+ const struct net *net,
+ const struct handshake_req *req,
+ const struct sock *sk,
+ int status
+ ),
+ TP_ARGS(net, req, sk, status),
+ TP_STRUCT__entry(
+ __field(const void *, req)
+ __field(const void *, sk)
+ __field(int, status)
+ __field(unsigned int, netns_ino)
+ ),
+ TP_fast_assign(
+ __entry->req = req;
+ __entry->sk = sk;
+ __entry->status = status;
+ __entry->netns_ino = net->ns.inum;
+ ),
+ TP_printk("req=%p sk=%p status=%d",
+ __entry->req, __entry->sk, __entry->status
+ )
+);
+
+/*
+ * Netlink events
+ */
+
+DEFINE_HANDSHAKE_ERROR(handshake_notify_err);
+DEFINE_HANDSHAKE_FD_EVENT(handshake_cmd_accept);
+DEFINE_HANDSHAKE_ERROR(handshake_cmd_accept_err);
+DEFINE_HANDSHAKE_FD_EVENT(handshake_cmd_done);
+DEFINE_HANDSHAKE_ERROR(handshake_cmd_done_err);
+
+/*
+ * TLS Record events
+ */
+
+TRACE_EVENT(tls_contenttype,
+ TP_PROTO(
+ const struct sock *sk,
+ unsigned char type
+ ),
+ TP_ARGS(sk, type),
+ TP_STRUCT__entry(
+ /* sockaddr_in6 is always bigger than sockaddr_in */
+ __array(__u8, saddr, sizeof(struct sockaddr_in6))
+ __array(__u8, daddr, sizeof(struct sockaddr_in6))
+ __field(unsigned int, netns_ino)
+ __field(unsigned long, type)
+ ),
+ TP_fast_assign(
+ const struct inet_sock *inet = inet_sk(sk);
+
+ memset(__entry->saddr, 0, sizeof(struct sockaddr_in6));
+ memset(__entry->daddr, 0, sizeof(struct sockaddr_in6));
+ TP_STORE_ADDR_PORTS(__entry, inet, sk);
+
+ __entry->netns_ino = sock_net(sk)->ns.inum;
+ __entry->type = type;
+ ),
+ TP_printk("src=%pISpc dest=%pISpc %s",
+ __entry->saddr, __entry->daddr,
+ show_tls_content_type(__entry->type)
+ )
+);
+
+/*
+ * TLS Alert events
+ */
+
+DEFINE_HANDSHAKE_ALERT(tls_alert_send);
+DEFINE_HANDSHAKE_ALERT(tls_alert_recv);
+
+#endif /* _TRACE_HANDSHAKE_H */
+
+#include <trace/define_trace.h>
diff --git a/include/trace/events/huge_memory.h b/include/trace/events/huge_memory.h
index 3e6fb05852f9..4cde53b45a85 100644
--- a/include/trace/events/huge_memory.h
+++ b/include/trace/events/huge_memory.h
@@ -10,8 +10,7 @@
#define SCAN_STATUS \
EM( SCAN_FAIL, "failed") \
EM( SCAN_SUCCEED, "succeeded") \
- EM( SCAN_PMD_NULL, "pmd_null") \
- EM( SCAN_PMD_NONE, "pmd_none") \
+ EM( SCAN_NO_PTE_TABLE, "no_pte_table") \
EM( SCAN_PMD_MAPPED, "page_pmd_mapped") \
EM( SCAN_EXCEED_NONE_PTE, "exceed_none_pte") \
EM( SCAN_EXCEED_SWAP_PTE, "exceed_swap_pte") \
@@ -19,7 +18,6 @@
EM( SCAN_PTE_NON_PRESENT, "pte_non_present") \
EM( SCAN_PTE_UFFD_WP, "pte_uffd_wp") \
EM( SCAN_PTE_MAPPED_HUGEPAGE, "pte_mapped_hugepage") \
- EM( SCAN_PAGE_RO, "no_writable_page") \
EM( SCAN_LACK_REFERENCED_PAGE, "lack_referenced_page") \
EM( SCAN_PAGE_NULL, "page_null") \
EM( SCAN_SCAN_ABORT, "scan_aborted") \
@@ -36,7 +34,10 @@
EM( SCAN_ALLOC_HUGE_PAGE_FAIL, "alloc_huge_page_failed") \
EM( SCAN_CGROUP_CHARGE_FAIL, "ccgroup_charge_failed") \
EM( SCAN_TRUNCATED, "truncated") \
- EMe(SCAN_PAGE_HAS_PRIVATE, "page_has_private") \
+ EM( SCAN_PAGE_HAS_PRIVATE, "page_has_private") \
+ EM( SCAN_STORE_FAILED, "store_failed") \
+ EM( SCAN_COPY_MC, "copy_poisoned_page") \
+ EMe(SCAN_PAGE_FILLED, "page_filled")
#undef EM
#undef EMe
@@ -52,15 +53,14 @@ SCAN_STATUS
TRACE_EVENT(mm_khugepaged_scan_pmd,
- TP_PROTO(struct mm_struct *mm, struct page *page, bool writable,
+ TP_PROTO(struct mm_struct *mm, struct folio *folio,
int referenced, int none_or_zero, int status, int unmapped),
- TP_ARGS(mm, page, writable, referenced, none_or_zero, status, unmapped),
+ TP_ARGS(mm, folio, referenced, none_or_zero, status, unmapped),
TP_STRUCT__entry(
__field(struct mm_struct *, mm)
__field(unsigned long, pfn)
- __field(bool, writable)
__field(int, referenced)
__field(int, none_or_zero)
__field(int, status)
@@ -69,18 +69,16 @@ TRACE_EVENT(mm_khugepaged_scan_pmd,
TP_fast_assign(
__entry->mm = mm;
- __entry->pfn = page ? page_to_pfn(page) : -1;
- __entry->writable = writable;
+ __entry->pfn = folio ? folio_pfn(folio) : -1;
__entry->referenced = referenced;
__entry->none_or_zero = none_or_zero;
__entry->status = status;
__entry->unmapped = unmapped;
),
- TP_printk("mm=%p, scan_pfn=0x%lx, writable=%d, referenced=%d, none_or_zero=%d, status=%s, unmapped=%d",
+ TP_printk("mm=%p, scan_pfn=0x%lx, referenced=%d, none_or_zero=%d, status=%s, unmapped=%d",
__entry->mm,
__entry->pfn,
- __entry->writable,
__entry->referenced,
__entry->none_or_zero,
__print_symbolic(__entry->status, SCAN_STATUS),
@@ -113,32 +111,29 @@ TRACE_EVENT(mm_collapse_huge_page,
TRACE_EVENT(mm_collapse_huge_page_isolate,
- TP_PROTO(struct page *page, int none_or_zero,
- int referenced, bool writable, int status),
+ TP_PROTO(struct folio *folio, int none_or_zero,
+ int referenced, int status),
- TP_ARGS(page, none_or_zero, referenced, writable, status),
+ TP_ARGS(folio, none_or_zero, referenced, status),
TP_STRUCT__entry(
__field(unsigned long, pfn)
__field(int, none_or_zero)
__field(int, referenced)
- __field(bool, writable)
__field(int, status)
),
TP_fast_assign(
- __entry->pfn = page ? page_to_pfn(page) : -1;
+ __entry->pfn = folio ? folio_pfn(folio) : -1;
__entry->none_or_zero = none_or_zero;
__entry->referenced = referenced;
- __entry->writable = writable;
__entry->status = status;
),
- TP_printk("scan_pfn=0x%lx, none_or_zero=%d, referenced=%d, writable=%d, status=%s",
+ TP_printk("scan_pfn=0x%lx, none_or_zero=%d, referenced=%d, status=%s",
__entry->pfn,
__entry->none_or_zero,
__entry->referenced,
- __entry->writable,
__print_symbolic(__entry->status, SCAN_STATUS))
);
@@ -171,10 +166,10 @@ TRACE_EVENT(mm_collapse_huge_page_swapin,
TRACE_EVENT(mm_khugepaged_scan_file,
- TP_PROTO(struct mm_struct *mm, struct page *page, struct file *file,
+ TP_PROTO(struct mm_struct *mm, struct folio *folio, struct file *file,
int present, int swap, int result),
- TP_ARGS(mm, page, file, present, swap, result),
+ TP_ARGS(mm, folio, file, present, swap, result),
TP_STRUCT__entry(
__field(struct mm_struct *, mm)
@@ -187,8 +182,8 @@ TRACE_EVENT(mm_khugepaged_scan_file,
TP_fast_assign(
__entry->mm = mm;
- __entry->pfn = page ? page_to_pfn(page) : -1;
- __assign_str(filename, file->f_path.dentry->d_iname);
+ __entry->pfn = folio ? folio_pfn(folio) : -1;
+ __assign_str(filename);
__entry->present = present;
__entry->swap = swap;
__entry->result = result;
@@ -204,10 +199,10 @@ TRACE_EVENT(mm_khugepaged_scan_file,
);
TRACE_EVENT(mm_khugepaged_collapse_file,
- TP_PROTO(struct mm_struct *mm, struct page *hpage, pgoff_t index,
- bool is_shmem, unsigned long addr, struct file *file,
+ TP_PROTO(struct mm_struct *mm, struct folio *new_folio, pgoff_t index,
+ unsigned long addr, bool is_shmem, struct file *file,
int nr, int result),
- TP_ARGS(mm, hpage, index, addr, is_shmem, file, nr, result),
+ TP_ARGS(mm, new_folio, index, addr, is_shmem, file, nr, result),
TP_STRUCT__entry(
__field(struct mm_struct *, mm)
__field(unsigned long, hpfn)
@@ -221,16 +216,16 @@ TRACE_EVENT(mm_khugepaged_collapse_file,
TP_fast_assign(
__entry->mm = mm;
- __entry->hpfn = hpage ? page_to_pfn(hpage) : -1;
+ __entry->hpfn = new_folio ? folio_pfn(new_folio) : -1;
__entry->index = index;
__entry->addr = addr;
__entry->is_shmem = is_shmem;
- __assign_str(filename, file->f_path.dentry->d_iname);
+ __assign_str(filename);
__entry->nr = nr;
__entry->result = result;
),
- TP_printk("mm=%p, hpage_pfn=0x%lx, index=%ld, addr=%ld, is_shmem=%d, filename=%s, nr=%d, result=%s",
+ TP_printk("mm=%p, hpage_pfn=0x%lx, index=%ld, addr=%lx, is_shmem=%d, filename=%s, nr=%d, result=%s",
__entry->mm,
__entry->hpfn,
__entry->index,
diff --git a/include/trace/events/hugetlbfs.h b/include/trace/events/hugetlbfs.h
new file mode 100644
index 000000000000..59605dfaeeb4
--- /dev/null
+++ b/include/trace/events/hugetlbfs.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hugetlbfs
+
+#if !defined(_TRACE_HUGETLBFS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HUGETLBFS_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(hugetlbfs_alloc_inode,
+
+ TP_PROTO(struct inode *inode, struct inode *dir, int mode),
+
+ TP_ARGS(inode, dir, mode),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(ino_t, dir)
+ __field(__u16, mode)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->dir = dir ? dir->i_ino : 0;
+ __entry->mode = mode;
+ ),
+
+ TP_printk("dev %d,%d ino %lu dir %lu mode 0%o",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino,
+ (unsigned long) __entry->dir, __entry->mode)
+);
+
+DECLARE_EVENT_CLASS(hugetlbfs__inode,
+
+ TP_PROTO(struct inode *inode),
+
+ TP_ARGS(inode),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(__u16, mode)
+ __field(loff_t, size)
+ __field(unsigned int, nlink)
+ __field(unsigned int, seals)
+ __field(blkcnt_t, blocks)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->mode = inode->i_mode;
+ __entry->size = inode->i_size;
+ __entry->nlink = inode->i_nlink;
+ __entry->seals = HUGETLBFS_I(inode)->seals;
+ __entry->blocks = inode->i_blocks;
+ ),
+
+ TP_printk("dev %d,%d ino %lu mode 0%o size %lld nlink %u seals %u blocks %llu",
+ MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long) __entry->ino,
+ __entry->mode, __entry->size, __entry->nlink, __entry->seals,
+ (unsigned long long)__entry->blocks)
+);
+
+DEFINE_EVENT(hugetlbfs__inode, hugetlbfs_evict_inode,
+
+ TP_PROTO(struct inode *inode),
+
+ TP_ARGS(inode)
+);
+
+DEFINE_EVENT(hugetlbfs__inode, hugetlbfs_free_inode,
+
+ TP_PROTO(struct inode *inode),
+
+ TP_ARGS(inode)
+);
+
+TRACE_EVENT(hugetlbfs_setattr,
+
+ TP_PROTO(struct inode *inode, struct dentry *dentry,
+ struct iattr *attr),
+
+ TP_ARGS(inode, dentry, attr),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(unsigned int, d_len)
+ __string(d_name, dentry->d_name.name)
+ __field(unsigned int, ia_valid)
+ __field(unsigned int, ia_mode)
+ __field(loff_t, old_size)
+ __field(loff_t, ia_size)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->d_len = dentry->d_name.len;
+ __assign_str(d_name);
+ __entry->ia_valid = attr->ia_valid;
+ __entry->ia_mode = attr->ia_mode;
+ __entry->old_size = inode->i_size;
+ __entry->ia_size = attr->ia_size;
+ ),
+
+ TP_printk("dev %d,%d ino %lu name %.*s valid %#x mode 0%o old_size %lld size %lld",
+ MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long)__entry->ino,
+ __entry->d_len, __get_str(d_name), __entry->ia_valid, __entry->ia_mode,
+ __entry->old_size, __entry->ia_size)
+);
+
+TRACE_EVENT(hugetlbfs_fallocate,
+
+ TP_PROTO(struct inode *inode, int mode,
+ loff_t offset, loff_t len, int ret),
+
+ TP_ARGS(inode, mode, offset, len, ret),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(int, mode)
+ __field(loff_t, offset)
+ __field(loff_t, len)
+ __field(loff_t, size)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->mode = mode;
+ __entry->offset = offset;
+ __entry->len = len;
+ __entry->size = inode->i_size;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("dev %d,%d ino %lu mode 0%o offset %lld len %lld size %lld ret %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long)__entry->ino, __entry->mode,
+ (unsigned long long)__entry->offset,
+ (unsigned long long)__entry->len,
+ (unsigned long long)__entry->size,
+ __entry->ret)
+);
+
+#endif /* _TRACE_HUGETLBFS_H */
+
+ /* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/thermal_pressure.h b/include/trace/events/hw_pressure.h
index b68680201360..b9cd68854128 100644
--- a/include/trace/events/thermal_pressure.h
+++ b/include/trace/events/hw_pressure.h
@@ -1,27 +1,27 @@
/* SPDX-License-Identifier: GPL-2.0 */
#undef TRACE_SYSTEM
-#define TRACE_SYSTEM thermal_pressure
+#define TRACE_SYSTEM hw_pressure
#if !defined(_TRACE_THERMAL_PRESSURE_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_THERMAL_PRESSURE_H
#include <linux/tracepoint.h>
-TRACE_EVENT(thermal_pressure_update,
- TP_PROTO(int cpu, unsigned long thermal_pressure),
- TP_ARGS(cpu, thermal_pressure),
+TRACE_EVENT(hw_pressure_update,
+ TP_PROTO(int cpu, unsigned long hw_pressure),
+ TP_ARGS(cpu, hw_pressure),
TP_STRUCT__entry(
- __field(unsigned long, thermal_pressure)
+ __field(unsigned long, hw_pressure)
__field(int, cpu)
),
TP_fast_assign(
- __entry->thermal_pressure = thermal_pressure;
+ __entry->hw_pressure = hw_pressure;
__entry->cpu = cpu;
),
- TP_printk("cpu=%d thermal_pressure=%lu", __entry->cpu, __entry->thermal_pressure)
+ TP_printk("cpu=%d hw_pressure=%lu", __entry->cpu, __entry->hw_pressure)
);
#endif /* _TRACE_THERMAL_PRESSURE_H */
diff --git a/include/trace/events/hwmon.h b/include/trace/events/hwmon.h
index d7a1d0ffb679..3865098f21f1 100644
--- a/include/trace/events/hwmon.h
+++ b/include/trace/events/hwmon.h
@@ -9,36 +9,36 @@
DECLARE_EVENT_CLASS(hwmon_attr_class,
- TP_PROTO(int index, const char *attr_name, long val),
+ TP_PROTO(int index, const char *attr_name, long long val),
TP_ARGS(index, attr_name, val),
TP_STRUCT__entry(
__field(int, index)
__string(attr_name, attr_name)
- __field(long, val)
+ __field(long long, val)
),
TP_fast_assign(
__entry->index = index;
- __assign_str(attr_name, attr_name);
+ __assign_str(attr_name);
__entry->val = val;
),
- TP_printk("index=%d, attr_name=%s, val=%ld",
+ TP_printk("index=%d, attr_name=%s, val=%lld",
__entry->index, __get_str(attr_name), __entry->val)
);
DEFINE_EVENT(hwmon_attr_class, hwmon_attr_show,
- TP_PROTO(int index, const char *attr_name, long val),
+ TP_PROTO(int index, const char *attr_name, long long val),
TP_ARGS(index, attr_name, val)
);
DEFINE_EVENT(hwmon_attr_class, hwmon_attr_store,
- TP_PROTO(int index, const char *attr_name, long val),
+ TP_PROTO(int index, const char *attr_name, long long val),
TP_ARGS(index, attr_name, val)
);
@@ -57,8 +57,8 @@ TRACE_EVENT(hwmon_attr_show_string,
TP_fast_assign(
__entry->index = index;
- __assign_str(attr_name, attr_name);
- __assign_str(label, s);
+ __assign_str(attr_name);
+ __assign_str(label);
),
TP_printk("index=%d, attr_name=%s, val=%s",
diff --git a/include/trace/events/icmp.h b/include/trace/events/icmp.h
new file mode 100644
index 000000000000..31559796949a
--- /dev/null
+++ b/include/trace/events/icmp.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM icmp
+
+#if !defined(_TRACE_ICMP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_ICMP_H
+
+#include <linux/icmp.h>
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(icmp_send,
+
+ TP_PROTO(const struct sk_buff *skb, int type, int code),
+
+ TP_ARGS(skb, type, code),
+
+ TP_STRUCT__entry(
+ __field(const void *, skbaddr)
+ __field(int, type)
+ __field(int, code)
+ __array(__u8, saddr, 4)
+ __array(__u8, daddr, 4)
+ __field(__u16, sport)
+ __field(__u16, dport)
+ __field(unsigned short, ulen)
+ ),
+
+ TP_fast_assign(
+ struct iphdr *iph = ip_hdr(skb);
+ struct udphdr *uh = udp_hdr(skb);
+ int proto_4 = iph->protocol;
+ __be32 *p32;
+
+ __entry->skbaddr = skb;
+ __entry->type = type;
+ __entry->code = code;
+
+ if (proto_4 != IPPROTO_UDP || (u8 *)uh < skb->head ||
+ (u8 *)uh + sizeof(struct udphdr)
+ > skb_tail_pointer(skb)) {
+ __entry->sport = 0;
+ __entry->dport = 0;
+ __entry->ulen = 0;
+ } else {
+ __entry->sport = ntohs(uh->source);
+ __entry->dport = ntohs(uh->dest);
+ __entry->ulen = ntohs(uh->len);
+ }
+
+ p32 = (__be32 *) __entry->saddr;
+ *p32 = iph->saddr;
+
+ p32 = (__be32 *) __entry->daddr;
+ *p32 = iph->daddr;
+ ),
+
+ TP_printk("icmp_send: type=%d, code=%d. From %pI4:%u to %pI4:%u ulen=%d skbaddr=%p",
+ __entry->type, __entry->code,
+ __entry->saddr, __entry->sport, __entry->daddr,
+ __entry->dport, __entry->ulen, __entry->skbaddr)
+);
+
+#endif /* _TRACE_ICMP_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
+
diff --git a/include/trace/events/initcall.h b/include/trace/events/initcall.h
index eb903c3f195f..5282afdf3ddf 100644
--- a/include/trace/events/initcall.h
+++ b/include/trace/events/initcall.h
@@ -18,7 +18,7 @@ TRACE_EVENT(initcall_level,
),
TP_fast_assign(
- __assign_str(level, level);
+ __assign_str(level);
),
TP_printk("level=%s", __get_str(level))
diff --git a/include/trace/events/intel_ifs.h b/include/trace/events/intel_ifs.h
index d7353024016c..70323acde1de 100644
--- a/include/trace/events/intel_ifs.h
+++ b/include/trace/events/intel_ifs.h
@@ -10,31 +10,58 @@
TRACE_EVENT(ifs_status,
- TP_PROTO(int cpu, union ifs_scan activate, union ifs_status status),
+ TP_PROTO(int batch, int start, int stop, u64 status),
- TP_ARGS(cpu, activate, status),
+ TP_ARGS(batch, start, stop, status),
TP_STRUCT__entry(
+ __field( int, batch )
__field( u64, status )
- __field( int, cpu )
- __field( u8, start )
- __field( u8, stop )
+ __field( u16, start )
+ __field( u16, stop )
),
TP_fast_assign(
- __entry->cpu = cpu;
- __entry->start = activate.start;
- __entry->stop = activate.stop;
- __entry->status = status.data;
+ __entry->batch = batch;
+ __entry->start = start;
+ __entry->stop = stop;
+ __entry->status = status;
),
- TP_printk("cpu: %d, start: %.2x, stop: %.2x, status: %llx",
- __entry->cpu,
+ TP_printk("batch: 0x%.2x, start: 0x%.4x, stop: 0x%.4x, status: 0x%.16llx",
+ __entry->batch,
__entry->start,
__entry->stop,
__entry->status)
);
+TRACE_EVENT(ifs_sbaf,
+
+ TP_PROTO(int batch, union ifs_sbaf activate, union ifs_sbaf_status status),
+
+ TP_ARGS(batch, activate, status),
+
+ TP_STRUCT__entry(
+ __field( u64, status )
+ __field( int, batch )
+ __field( u16, bundle )
+ __field( u16, pgm )
+ ),
+
+ TP_fast_assign(
+ __entry->status = status.data;
+ __entry->batch = batch;
+ __entry->bundle = activate.bundle_idx;
+ __entry->pgm = activate.pgm_idx;
+ ),
+
+ TP_printk("batch: 0x%.2x, bundle_idx: 0x%.4x, pgm_idx: 0x%.4x, status: 0x%.16llx",
+ __entry->batch,
+ __entry->bundle,
+ __entry->pgm,
+ __entry->status)
+);
+
#endif /* _TRACE_IFS_H */
/* This part must be outside protection */
diff --git a/include/trace/events/intel_ish.h b/include/trace/events/intel_ish.h
index e6d7ff55ee8c..64b6612c41bc 100644
--- a/include/trace/events/intel_ish.h
+++ b/include/trace/events/intel_ish.h
@@ -18,7 +18,7 @@ TRACE_EVENT(ishtp_dump,
),
TP_fast_assign(
- __assign_str(message, message);
+ __assign_str(message);
),
TP_printk("%s", __get_str(message))
diff --git a/include/trace/events/io_uring.h b/include/trace/events/io_uring.h
index 936fd41bf147..34b31a855ea4 100644
--- a/include/trace/events/io_uring.h
+++ b/include/trace/events/io_uring.h
@@ -133,24 +133,24 @@ TRACE_EVENT(io_uring_file_get,
* io_uring_queue_async_work - called before submitting a new async work
*
* @req: pointer to a submitted request
- * @rw: type of workqueue, hashed or normal
+ * @hashed: whether async work is hashed
*
* Allows to trace asynchronous work submission.
*/
TRACE_EVENT(io_uring_queue_async_work,
- TP_PROTO(struct io_kiocb *req, int rw),
+ TP_PROTO(struct io_kiocb *req, bool hashed),
- TP_ARGS(req, rw),
+ TP_ARGS(req, hashed),
TP_STRUCT__entry (
__field( void *, ctx )
__field( void *, req )
__field( u64, user_data )
__field( u8, opcode )
- __field( unsigned int, flags )
+ __field( unsigned long long, flags )
__field( struct io_wq_work *, work )
- __field( int, rw )
+ __field( bool, hashed )
__string( op_str, io_uring_get_opcode(req->opcode) )
),
@@ -159,18 +159,18 @@ TRACE_EVENT(io_uring_queue_async_work,
__entry->ctx = req->ctx;
__entry->req = req;
__entry->user_data = req->cqe.user_data;
- __entry->flags = req->flags;
+ __entry->flags = (__force unsigned long long) req->flags;
__entry->opcode = req->opcode;
__entry->work = &req->work;
- __entry->rw = rw;
+ __entry->hashed = hashed;
- __assign_str(op_str, io_uring_get_opcode(req->opcode));
+ __assign_str(op_str);
),
- TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s, flags 0x%x, %s queue, work %p",
+ TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s, flags 0x%llx, %s queue, work %p",
__entry->ctx, __entry->req, __entry->user_data,
- __get_str(op_str),
- __entry->flags, __entry->rw ? "hashed" : "normal", __entry->work)
+ __get_str(op_str), __entry->flags,
+ __entry->hashed ? "hashed" : "normal", __entry->work)
);
/**
@@ -202,7 +202,7 @@ TRACE_EVENT(io_uring_defer,
__entry->data = req->cqe.user_data;
__entry->opcode = req->opcode;
- __assign_str(op_str, io_uring_get_opcode(req->opcode));
+ __assign_str(op_str);
),
TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s",
@@ -303,7 +303,7 @@ TRACE_EVENT(io_uring_fail_link,
__entry->opcode = req->opcode;
__entry->link = link;
- __assign_str(op_str, io_uring_get_opcode(req->opcode));
+ __assign_str(op_str);
),
TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s, link %p",
@@ -315,20 +315,14 @@ TRACE_EVENT(io_uring_fail_link,
* io_uring_complete - called when completing an SQE
*
* @ctx: pointer to a ring context structure
- * @req: pointer to a submitted request
- * @user_data: user data associated with the request
- * @res: result of the request
- * @cflags: completion flags
- * @extra1: extra 64-bit data for CQE32
- * @extra2: extra 64-bit data for CQE32
- *
+ * @req: (optional) pointer to a submitted request
+ * @cqe: pointer to the filled in CQE being posted
*/
TRACE_EVENT(io_uring_complete,
- TP_PROTO(void *ctx, void *req, u64 user_data, int res, unsigned cflags,
- u64 extra1, u64 extra2),
+TP_PROTO(struct io_ring_ctx *ctx, void *req, struct io_uring_cqe *cqe),
- TP_ARGS(ctx, req, user_data, res, cflags, extra1, extra2),
+ TP_ARGS(ctx, req, cqe),
TP_STRUCT__entry (
__field( void *, ctx )
@@ -343,11 +337,11 @@ TRACE_EVENT(io_uring_complete,
TP_fast_assign(
__entry->ctx = ctx;
__entry->req = req;
- __entry->user_data = user_data;
- __entry->res = res;
- __entry->cflags = cflags;
- __entry->extra1 = extra1;
- __entry->extra2 = extra2;
+ __entry->user_data = cqe->user_data;
+ __entry->res = cqe->res;
+ __entry->cflags = cqe->flags;
+ __entry->extra1 = ctx->flags & IORING_SETUP_CQE32 || cqe->flags & IORING_CQE_F_32 ? cqe->big_cqe[0] : 0;
+ __entry->extra2 = ctx->flags & IORING_SETUP_CQE32 || cqe->flags & IORING_CQE_F_32 ? cqe->big_cqe[1] : 0;
),
TP_printk("ring %p, req %p, user_data 0x%llx, result %d, cflags 0x%x "
@@ -360,27 +354,25 @@ TRACE_EVENT(io_uring_complete,
);
/**
- * io_uring_submit_sqe - called before submitting one SQE
+ * io_uring_submit_req - called before submitting a request
*
* @req: pointer to a submitted request
- * @force_nonblock: whether a context blocking or not
*
* Allows to track SQE submitting, to understand what was the source of it, SQ
* thread or io_uring_enter call.
*/
-TRACE_EVENT(io_uring_submit_sqe,
+TRACE_EVENT(io_uring_submit_req,
- TP_PROTO(struct io_kiocb *req, bool force_nonblock),
+ TP_PROTO(struct io_kiocb *req),
- TP_ARGS(req, force_nonblock),
+ TP_ARGS(req),
TP_STRUCT__entry (
__field( void *, ctx )
__field( void *, req )
__field( unsigned long long, user_data )
__field( u8, opcode )
- __field( u32, flags )
- __field( bool, force_nonblock )
+ __field( unsigned long long, flags )
__field( bool, sq_thread )
__string( op_str, io_uring_get_opcode(req->opcode) )
@@ -391,17 +383,16 @@ TRACE_EVENT(io_uring_submit_sqe,
__entry->req = req;
__entry->user_data = req->cqe.user_data;
__entry->opcode = req->opcode;
- __entry->flags = req->flags;
- __entry->force_nonblock = force_nonblock;
+ __entry->flags = (__force unsigned long long) req->flags;
__entry->sq_thread = req->ctx->flags & IORING_SETUP_SQPOLL;
- __assign_str(op_str, io_uring_get_opcode(req->opcode));
+ __assign_str(op_str);
),
- TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, flags 0x%x, "
- "non block %d, sq_thread %d", __entry->ctx, __entry->req,
- __entry->user_data, __get_str(op_str),
- __entry->flags, __entry->force_nonblock, __entry->sq_thread)
+ TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, flags 0x%llx, "
+ "sq_thread %d", __entry->ctx, __entry->req,
+ __entry->user_data, __get_str(op_str), __entry->flags,
+ __entry->sq_thread)
);
/*
@@ -439,7 +430,7 @@ TRACE_EVENT(io_uring_poll_arm,
__entry->mask = mask;
__entry->events = events;
- __assign_str(op_str, io_uring_get_opcode(req->opcode));
+ __assign_str(op_str);
),
TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, mask 0x%x, events 0x%x",
@@ -478,7 +469,7 @@ TRACE_EVENT(io_uring_task_add,
__entry->opcode = req->opcode;
__entry->mask = mask;
- __assign_str(op_str, io_uring_get_opcode(req->opcode));
+ __assign_str(op_str);
),
TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, mask %x",
@@ -541,7 +532,7 @@ TRACE_EVENT(io_uring_req_failed,
__entry->addr3 = sqe->addr3;
__entry->error = error;
- __assign_str(op_str, io_uring_get_opcode(sqe->opcode));
+ __assign_str(op_str);
),
TP_printk("ring %p, req %p, user_data 0x%llx, "
@@ -605,29 +596,25 @@ TRACE_EVENT(io_uring_cqe_overflow,
*
* @tctx: pointer to a io_uring_task
* @count: how many functions it ran
- * @loops: how many loops it ran
*
*/
TRACE_EVENT(io_uring_task_work_run,
- TP_PROTO(void *tctx, unsigned int count, unsigned int loops),
+ TP_PROTO(void *tctx, unsigned int count),
- TP_ARGS(tctx, count, loops),
+ TP_ARGS(tctx, count),
TP_STRUCT__entry (
__field( void *, tctx )
__field( unsigned int, count )
- __field( unsigned int, loops )
),
TP_fast_assign(
__entry->tctx = tctx;
__entry->count = count;
- __entry->loops = loops;
),
- TP_printk("tctx %p, count %u, loops %u",
- __entry->tctx, __entry->count, __entry->loops)
+ TP_printk("tctx %p, count %u", __entry->tctx, __entry->count)
);
TRACE_EVENT(io_uring_short_write,
@@ -658,7 +645,7 @@ TRACE_EVENT(io_uring_short_write,
/*
* io_uring_local_work_run - ran ring local task work
*
- * @tctx: pointer to a io_uring_ctx
+ * @ctx: pointer to an io_ring_ctx
* @count: how many functions it ran
* @loops: how many loops it ran
*
diff --git a/include/trace/events/iocost.h b/include/trace/events/iocost.h
index af8bfed528fc..e772b1bc60d6 100644
--- a/include/trace/events/iocost.h
+++ b/include/trace/events/iocost.h
@@ -34,8 +34,8 @@ DECLARE_EVENT_CLASS(iocost_iocg_state,
),
TP_fast_assign(
- __assign_str(devname, ioc_name(iocg->ioc));
- __assign_str(cgroup, path);
+ __assign_str(devname);
+ __assign_str(cgroup);
__entry->now = now->now;
__entry->vnow = now->vnow;
__entry->vrate = iocg->ioc->vtime_base_rate;
@@ -93,8 +93,8 @@ DECLARE_EVENT_CLASS(iocg_inuse_update,
),
TP_fast_assign(
- __assign_str(devname, ioc_name(iocg->ioc));
- __assign_str(cgroup, path);
+ __assign_str(devname);
+ __assign_str(cgroup);
__entry->now = now->now;
__entry->old_inuse = old_inuse;
__entry->new_inuse = new_inuse;
@@ -159,7 +159,7 @@ TRACE_EVENT(iocost_ioc_vrate_adj,
),
TP_fast_assign(
- __assign_str(devname, ioc_name(ioc));
+ __assign_str(devname);
__entry->old_vrate = ioc->vtime_base_rate;
__entry->new_vrate = new_vrate;
__entry->busy_level = ioc->busy_level;
@@ -200,8 +200,8 @@ TRACE_EVENT(iocost_iocg_forgive_debt,
),
TP_fast_assign(
- __assign_str(devname, ioc_name(iocg->ioc));
- __assign_str(cgroup, path);
+ __assign_str(devname);
+ __assign_str(cgroup);
__entry->now = now->now;
__entry->vnow = now->vnow;
__entry->usage_pct = usage_pct;
diff --git a/include/trace/events/iommu.h b/include/trace/events/iommu.h
index 29096fe12623..373007e567cb 100644
--- a/include/trace/events/iommu.h
+++ b/include/trace/events/iommu.h
@@ -28,7 +28,7 @@ DECLARE_EVENT_CLASS(iommu_group_event,
TP_fast_assign(
__entry->gid = group_id;
- __assign_str(device, dev_name(dev));
+ __assign_str(device);
),
TP_printk("IOMMU: groupID=%d device=%s",
@@ -62,7 +62,7 @@ DECLARE_EVENT_CLASS(iommu_device_event,
),
TP_fast_assign(
- __assign_str(device, dev_name(dev));
+ __assign_str(device);
),
TP_printk("IOMMU: device=%s", __get_str(device)
@@ -76,13 +76,6 @@ DEFINE_EVENT(iommu_device_event, attach_device_to_domain,
TP_ARGS(dev)
);
-DEFINE_EVENT(iommu_device_event, detach_device_from_domain,
-
- TP_PROTO(struct device *dev),
-
- TP_ARGS(dev)
-);
-
TRACE_EVENT(map,
TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size),
@@ -145,8 +138,8 @@ DECLARE_EVENT_CLASS(iommu_error,
),
TP_fast_assign(
- __assign_str(device, dev_name(dev));
- __assign_str(driver, dev_driver_string(dev));
+ __assign_str(device);
+ __assign_str(driver);
__entry->iova = iova;
__entry->flags = flags;
),
diff --git a/include/trace/events/ipi.h b/include/trace/events/ipi.h
index 0be71dad6ec0..9912f0ded81d 100644
--- a/include/trace/events/ipi.h
+++ b/include/trace/events/ipi.h
@@ -7,6 +7,51 @@
#include <linux/tracepoint.h>
+TRACE_EVENT(ipi_send_cpu,
+
+ TP_PROTO(const unsigned int cpu, unsigned long callsite, void *callback),
+
+ TP_ARGS(cpu, callsite, callback),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, cpu)
+ __field(void *, callsite)
+ __field(void *, callback)
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->callsite = (void *)callsite;
+ __entry->callback = callback;
+ ),
+
+ TP_printk("cpu=%u callsite=%pS callback=%pS",
+ __entry->cpu, __entry->callsite, __entry->callback)
+);
+
+TRACE_EVENT(ipi_send_cpumask,
+
+ TP_PROTO(const struct cpumask *cpumask, unsigned long callsite, void *callback),
+
+ TP_ARGS(cpumask, callsite, callback),
+
+ TP_STRUCT__entry(
+ __cpumask(cpumask)
+ __field(void *, callsite)
+ __field(void *, callback)
+ ),
+
+ TP_fast_assign(
+ __assign_cpumask(cpumask, cpumask_bits(cpumask));
+ __entry->callsite = (void *)callsite;
+ __entry->callback = callback;
+ ),
+
+ TP_printk("cpumask=%s callsite=%pS callback=%pS",
+ __get_cpumask(cpumask), __entry->callsite, __entry->callback)
+);
+
+#ifdef CONFIG_HAVE_EXTRA_IPI_TRACEPOINTS
/**
* ipi_raise - called when a smp cross call is made
*
@@ -83,6 +128,7 @@ DEFINE_EVENT(ipi_handler, ipi_exit,
TP_ARGS(reason)
);
+#endif /* CONFIG_HAVE_EXTRA_IPI_TRACEPOINTS */
#endif /* _TRACE_IPI_H */
diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
index eeceafaaea4c..837c1740d0d0 100644
--- a/include/trace/events/irq.h
+++ b/include/trace/events/irq.h
@@ -63,7 +63,7 @@ TRACE_EVENT(irq_handler_entry,
TP_fast_assign(
__entry->irq = irq;
- __assign_str(name, action->name);
+ __assign_str(name);
),
TP_printk("irq=%d name=%s", __entry->irq, __get_str(name))
@@ -160,6 +160,53 @@ DEFINE_EVENT(softirq, softirq_raise,
TP_ARGS(vec_nr)
);
+DECLARE_EVENT_CLASS(tasklet,
+
+ TP_PROTO(struct tasklet_struct *t, void *func),
+
+ TP_ARGS(t, func),
+
+ TP_STRUCT__entry(
+ __field( void *, tasklet)
+ __field( void *, func)
+ ),
+
+ TP_fast_assign(
+ __entry->tasklet = t;
+ __entry->func = func;
+ ),
+
+ TP_printk("tasklet=%ps function=%ps", __entry->tasklet, __entry->func)
+);
+
+/**
+ * tasklet_entry - called immediately before the tasklet is run
+ * @t: tasklet pointer
+ * @func: tasklet callback or function being run
+ *
+ * Used to find individual tasklet execution time
+ */
+DEFINE_EVENT(tasklet, tasklet_entry,
+
+ TP_PROTO(struct tasklet_struct *t, void *func),
+
+ TP_ARGS(t, func)
+);
+
+/**
+ * tasklet_exit - called immediately after the tasklet is run
+ * @t: tasklet pointer
+ * @func: tasklet callback or function being run
+ *
+ * Used to find individual tasklet execution time
+ */
+DEFINE_EVENT(tasklet, tasklet_exit,
+
+ TP_PROTO(struct tasklet_struct *t, void *func),
+
+ TP_ARGS(t, func)
+);
+
#endif /* _TRACE_IRQ_H */
/* This part must be outside protection */
diff --git a/include/trace/events/irq_matrix.h b/include/trace/events/irq_matrix.h
index 267d4cbbf360..93244078b4e6 100644
--- a/include/trace/events/irq_matrix.h
+++ b/include/trace/events/irq_matrix.h
@@ -138,14 +138,6 @@ DEFINE_EVENT(irq_matrix_global_update, irq_matrix_assign_system,
TP_ARGS(bit, matrix)
);
-DEFINE_EVENT(irq_matrix_cpu, irq_matrix_alloc_reserved,
-
- TP_PROTO(int bit, unsigned int cpu,
- struct irq_matrix *matrix, struct cpumap *cmap),
-
- TP_ARGS(bit, cpu, matrix, cmap)
-);
-
DEFINE_EVENT(irq_matrix_cpu, irq_matrix_reserve_managed,
TP_PROTO(int bit, unsigned int cpu,
diff --git a/include/trace/events/iscsi.h b/include/trace/events/iscsi.h
index 8ff2a3ca5d75..990fd154f586 100644
--- a/include/trace/events/iscsi.h
+++ b/include/trace/events/iscsi.h
@@ -30,7 +30,7 @@ DECLARE_EVENT_CLASS(iscsi_log_msg,
),
TP_fast_assign(
- __assign_str(dname, dev_name(dev));
+ __assign_str(dname);
__assign_vstr(msg, vaf->fmt, vaf->va);
),
diff --git a/include/trace/events/jbd2.h b/include/trace/events/jbd2.h
index 8f5ee380d309..5646ae15a957 100644
--- a/include/trace/events/jbd2.h
+++ b/include/trace/events/jbd2.h
@@ -462,11 +462,9 @@ TRACE_EVENT(jbd2_shrink_scan_exit,
TRACE_EVENT(jbd2_shrink_checkpoint_list,
TP_PROTO(journal_t *journal, tid_t first_tid, tid_t tid, tid_t last_tid,
- unsigned long nr_freed, unsigned long nr_scanned,
- tid_t next_tid),
+ unsigned long nr_freed, tid_t next_tid),
- TP_ARGS(journal, first_tid, tid, last_tid, nr_freed,
- nr_scanned, next_tid),
+ TP_ARGS(journal, first_tid, tid, last_tid, nr_freed, next_tid),
TP_STRUCT__entry(
__field(dev_t, dev)
@@ -474,7 +472,6 @@ TRACE_EVENT(jbd2_shrink_checkpoint_list,
__field(tid_t, tid)
__field(tid_t, last_tid)
__field(unsigned long, nr_freed)
- __field(unsigned long, nr_scanned)
__field(tid_t, next_tid)
),
@@ -484,15 +481,14 @@ TRACE_EVENT(jbd2_shrink_checkpoint_list,
__entry->tid = tid;
__entry->last_tid = last_tid;
__entry->nr_freed = nr_freed;
- __entry->nr_scanned = nr_scanned;
__entry->next_tid = next_tid;
),
TP_printk("dev %d,%d shrink transaction %u-%u(%u) freed %lu "
- "scanned %lu next transaction %u",
+ "next transaction %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->first_tid, __entry->tid, __entry->last_tid,
- __entry->nr_freed, __entry->nr_scanned, __entry->next_tid)
+ __entry->nr_freed, __entry->next_tid)
);
#endif /* _TRACE_JBD2_H */
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
index 58688768ef0f..7f93e754da5c 100644
--- a/include/trace/events/kmem.h
+++ b/include/trace/events/kmem.h
@@ -22,6 +22,7 @@ TRACE_EVENT(kmem_cache_alloc,
TP_STRUCT__entry(
__field( unsigned long, call_site )
__field( const void *, ptr )
+ __string( name, s->name )
__field( size_t, bytes_req )
__field( size_t, bytes_alloc )
__field( unsigned long, gfp_flags )
@@ -32,18 +33,20 @@ TRACE_EVENT(kmem_cache_alloc,
TP_fast_assign(
__entry->call_site = call_site;
__entry->ptr = ptr;
+ __assign_str(name);
__entry->bytes_req = s->object_size;
__entry->bytes_alloc = s->size;
__entry->gfp_flags = (__force unsigned long)gfp_flags;
__entry->node = node;
- __entry->accounted = IS_ENABLED(CONFIG_MEMCG_KMEM) ?
+ __entry->accounted = IS_ENABLED(CONFIG_MEMCG) ?
((gfp_flags & __GFP_ACCOUNT) ||
(s->flags & SLAB_ACCOUNT)) : false;
),
- TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d accounted=%s",
+ TP_printk("call_site=%pS ptr=%p name=%s bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d accounted=%s",
(void *)__entry->call_site,
__entry->ptr,
+ __get_str(name),
__entry->bytes_req,
__entry->bytes_alloc,
show_gfp_flags(__entry->gfp_flags),
@@ -87,7 +90,7 @@ TRACE_EVENT(kmalloc,
__entry->bytes_alloc,
show_gfp_flags(__entry->gfp_flags),
__entry->node,
- (IS_ENABLED(CONFIG_MEMCG_KMEM) &&
+ (IS_ENABLED(CONFIG_MEMCG) &&
(__entry->gfp_flags & (__force unsigned long)__GFP_ACCOUNT)) ? "true" : "false")
);
@@ -126,7 +129,7 @@ TRACE_EVENT(kmem_cache_free,
TP_fast_assign(
__entry->call_site = call_site;
__entry->ptr = ptr;
- __assign_str(name, s->name);
+ __assign_str(name);
),
TP_printk("call_site=%pS ptr=%p name=%s",
@@ -304,6 +307,84 @@ TRACE_EVENT(mm_page_alloc_extfrag,
__entry->change_ownership)
);
+TRACE_EVENT(mm_setup_per_zone_wmarks,
+
+ TP_PROTO(struct zone *zone),
+
+ TP_ARGS(zone),
+
+ TP_STRUCT__entry(
+ __field(int, node_id)
+ __string(name, zone->name)
+ __field(unsigned long, watermark_min)
+ __field(unsigned long, watermark_low)
+ __field(unsigned long, watermark_high)
+ __field(unsigned long, watermark_promo)
+ ),
+
+ TP_fast_assign(
+ __entry->node_id = zone->zone_pgdat->node_id;
+ __assign_str(name);
+ __entry->watermark_min = zone->_watermark[WMARK_MIN];
+ __entry->watermark_low = zone->_watermark[WMARK_LOW];
+ __entry->watermark_high = zone->_watermark[WMARK_HIGH];
+ __entry->watermark_promo = zone->_watermark[WMARK_PROMO];
+ ),
+
+ TP_printk("node_id=%d zone name=%s watermark min=%lu low=%lu high=%lu promo=%lu",
+ __entry->node_id,
+ __get_str(name),
+ __entry->watermark_min,
+ __entry->watermark_low,
+ __entry->watermark_high,
+ __entry->watermark_promo)
+);
+
+TRACE_EVENT(mm_setup_per_zone_lowmem_reserve,
+
+ TP_PROTO(struct zone *zone, struct zone *upper_zone, long lowmem_reserve),
+
+ TP_ARGS(zone, upper_zone, lowmem_reserve),
+
+ TP_STRUCT__entry(
+ __field(int, node_id)
+ __string(name, zone->name)
+ __string(upper_name, upper_zone->name)
+ __field(long, lowmem_reserve)
+ ),
+
+ TP_fast_assign(
+ __entry->node_id = zone->zone_pgdat->node_id;
+ __assign_str(name);
+ __assign_str(upper_name);
+ __entry->lowmem_reserve = lowmem_reserve;
+ ),
+
+ TP_printk("node_id=%d zone name=%s upper_zone name=%s lowmem_reserve_pages=%ld",
+ __entry->node_id,
+ __get_str(name),
+ __get_str(upper_name),
+ __entry->lowmem_reserve)
+);
+
+TRACE_EVENT(mm_calculate_totalreserve_pages,
+
+ TP_PROTO(unsigned long totalreserve_pages),
+
+ TP_ARGS(totalreserve_pages),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, totalreserve_pages)
+ ),
+
+ TP_fast_assign(
+ __entry->totalreserve_pages = totalreserve_pages;
+ ),
+
+ TP_printk("totalreserve_pages=%lu", __entry->totalreserve_pages)
+);
+
+
/*
* Required for uniquely and securely identifying mm in rss_stat tracepoint.
*/
diff --git a/include/trace/events/ksm.h b/include/trace/events/ksm.h
new file mode 100644
index 000000000000..e728647b5d26
--- /dev/null
+++ b/include/trace/events/ksm.h
@@ -0,0 +1,284 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ksm
+
+#if !defined(_TRACE_KSM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_KSM_H
+
+#include <linux/tracepoint.h>
+
+/**
+ * ksm_scan_template - called for start / stop scan
+ *
+ * @seq: sequence number of scan
+ * @rmap_entries: actual number of rmap entries
+ *
+ * Allows to trace the start / stop of a ksm scan.
+ */
+DECLARE_EVENT_CLASS(ksm_scan_template,
+
+ TP_PROTO(int seq, u32 rmap_entries),
+
+ TP_ARGS(seq, rmap_entries),
+
+ TP_STRUCT__entry(
+ __field(int, seq)
+ __field(u32, rmap_entries)
+ ),
+
+ TP_fast_assign(
+ __entry->seq = seq;
+ __entry->rmap_entries = rmap_entries;
+ ),
+
+ TP_printk("seq %d rmap size %d",
+ __entry->seq, __entry->rmap_entries)
+);
+
+/**
+ * ksm_start_scan - called after a new ksm scan is started
+ *
+ * @seq: sequence number of scan
+ * @rmap_entries: actual number of rmap entries
+ *
+ * Allows to trace the start of a ksm scan.
+ */
+DEFINE_EVENT(ksm_scan_template, ksm_start_scan,
+
+ TP_PROTO(int seq, u32 rmap_entries),
+
+ TP_ARGS(seq, rmap_entries)
+);
+
+/**
+ * ksm_stop_scan - called after a new ksm scan has completed
+ *
+ * @seq: sequence number of scan
+ * @rmap_entries: actual number of rmap entries
+ *
+ * Allows to trace the completion of a ksm scan.
+ */
+DEFINE_EVENT(ksm_scan_template, ksm_stop_scan,
+
+ TP_PROTO(int seq, u32 rmap_entries),
+
+ TP_ARGS(seq, rmap_entries)
+);
+
+/**
+ * ksm_enter - called after a new process has been added / removed from ksm
+ *
+ * @mm: address of the mm object of the process
+ *
+ * Allows to trace the when a process has been added or removed from ksm.
+ */
+DECLARE_EVENT_CLASS(ksm_enter_exit_template,
+
+ TP_PROTO(void *mm),
+
+ TP_ARGS(mm),
+
+ TP_STRUCT__entry(
+ __field(void *, mm)
+ ),
+
+ TP_fast_assign(
+ __entry->mm = mm;
+ ),
+
+ TP_printk("mm %p", __entry->mm)
+);
+
+/**
+ * ksm_enter - called after a new process has been added to ksm
+ *
+ * @mm: address of the mm object of the process
+ *
+ * Allows to trace the when a process has been added to ksm.
+ */
+DEFINE_EVENT(ksm_enter_exit_template, ksm_enter,
+
+ TP_PROTO(void *mm),
+
+ TP_ARGS(mm)
+);
+
+/**
+ * ksm_exit - called after a new process has been removed from ksm
+ *
+ * @mm: address of the mm object of the process
+ *
+ * Allows to trace the when a process has been removed from ksm.
+ */
+DEFINE_EVENT(ksm_enter_exit_template, ksm_exit,
+
+ TP_PROTO(void *mm),
+
+ TP_ARGS(mm)
+);
+
+/**
+ * ksm_merge_one_page - called after a page has been merged
+ *
+ * @pfn: page frame number of ksm page
+ * @rmap_item: address of rmap_item object
+ * @mm: address of the process mm struct
+ * @err: success
+ *
+ * Allows to trace the ksm merging of individual pages.
+ */
+TRACE_EVENT(ksm_merge_one_page,
+
+ TP_PROTO(unsigned long pfn, void *rmap_item, void *mm, int err),
+
+ TP_ARGS(pfn, rmap_item, mm, err),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, pfn)
+ __field(void *, rmap_item)
+ __field(void *, mm)
+ __field(int, err)
+ ),
+
+ TP_fast_assign(
+ __entry->pfn = pfn;
+ __entry->rmap_item = rmap_item;
+ __entry->mm = mm;
+ __entry->err = err;
+ ),
+
+ TP_printk("ksm pfn %lu rmap_item %p mm %p error %d",
+ __entry->pfn, __entry->rmap_item, __entry->mm, __entry->err)
+);
+
+/**
+ * ksm_merge_with_ksm_page - called after a page has been merged with a ksm page
+ *
+ * @ksm_page: address ksm page
+ * @pfn: page frame number of ksm page
+ * @rmap_item: address of rmap_item object
+ * @mm: address of the mm object of the process
+ * @err: success
+ *
+ * Allows to trace the merging of a page with a ksm page.
+ */
+TRACE_EVENT(ksm_merge_with_ksm_page,
+
+ TP_PROTO(void *ksm_page, unsigned long pfn, void *rmap_item, void *mm, int err),
+
+ TP_ARGS(ksm_page, pfn, rmap_item, mm, err),
+
+ TP_STRUCT__entry(
+ __field(void *, ksm_page)
+ __field(unsigned long, pfn)
+ __field(void *, rmap_item)
+ __field(void *, mm)
+ __field(int, err)
+ ),
+
+ TP_fast_assign(
+ __entry->ksm_page = ksm_page;
+ __entry->pfn = pfn;
+ __entry->rmap_item = rmap_item;
+ __entry->mm = mm;
+ __entry->err = err;
+ ),
+
+ TP_printk("%spfn %lu rmap_item %p mm %p error %d",
+ (__entry->ksm_page ? "ksm " : ""),
+ __entry->pfn, __entry->rmap_item, __entry->mm, __entry->err)
+);
+
+/**
+ * ksm_remove_ksm_page - called after a ksm page has been removed
+ *
+ * @pfn: page frame number of ksm page
+ *
+ * Allows to trace the removing of stable ksm pages.
+ */
+TRACE_EVENT(ksm_remove_ksm_page,
+
+ TP_PROTO(unsigned long pfn),
+
+ TP_ARGS(pfn),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, pfn)
+ ),
+
+ TP_fast_assign(
+ __entry->pfn = pfn;
+ ),
+
+ TP_printk("pfn %lu", __entry->pfn)
+);
+
+/**
+ * ksm_remove_rmap_item - called after a rmap_item has been removed from the
+ * stable tree
+ *
+ * @pfn: page frame number of ksm page
+ * @rmap_item: address of rmap_item object
+ * @mm: address of the process mm struct
+ *
+ * Allows to trace the removal of pages from the stable tree list.
+ */
+TRACE_EVENT(ksm_remove_rmap_item,
+
+ TP_PROTO(unsigned long pfn, void *rmap_item, void *mm),
+
+ TP_ARGS(pfn, rmap_item, mm),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, pfn)
+ __field(void *, rmap_item)
+ __field(void *, mm)
+ ),
+
+ TP_fast_assign(
+ __entry->pfn = pfn;
+ __entry->rmap_item = rmap_item;
+ __entry->mm = mm;
+ ),
+
+ TP_printk("pfn %lu rmap_item %p mm %p",
+ __entry->pfn, __entry->rmap_item, __entry->mm)
+);
+
+/**
+ * ksm_advisor - called after the advisor has run
+ *
+ * @scan_time: scan time in seconds
+ * @pages_to_scan: new pages_to_scan value
+ * @cpu_percent: cpu usage in percent
+ *
+ * Allows to trace the ksm advisor.
+ */
+TRACE_EVENT(ksm_advisor,
+
+ TP_PROTO(s64 scan_time, unsigned long pages_to_scan,
+ unsigned int cpu_percent),
+
+ TP_ARGS(scan_time, pages_to_scan, cpu_percent),
+
+ TP_STRUCT__entry(
+ __field(s64, scan_time)
+ __field(unsigned long, pages_to_scan)
+ __field(unsigned int, cpu_percent)
+ ),
+
+ TP_fast_assign(
+ __entry->scan_time = scan_time;
+ __entry->pages_to_scan = pages_to_scan;
+ __entry->cpu_percent = cpu_percent;
+ ),
+
+ TP_printk("ksm scan time %lld pages_to_scan %lu cpu percent %u",
+ __entry->scan_time, __entry->pages_to_scan,
+ __entry->cpu_percent)
+);
+
+#endif /* _TRACE_KSM_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
index 3bd31ea23fee..b282e3a86769 100644
--- a/include/trace/events/kvm.h
+++ b/include/trace/events/kvm.h
@@ -62,7 +62,7 @@ TRACE_EVENT(kvm_vcpu_wakeup,
__entry->valid ? "valid" : "invalid")
);
-#if defined(CONFIG_HAVE_KVM_IRQFD)
+#if defined(CONFIG_HAVE_KVM_IRQCHIP)
TRACE_EVENT(kvm_set_irq,
TP_PROTO(unsigned int gsi, int level, int irq_source_id),
TP_ARGS(gsi, level, irq_source_id),
@@ -82,95 +82,15 @@ TRACE_EVENT(kvm_set_irq,
TP_printk("gsi %u level %d source %d",
__entry->gsi, __entry->level, __entry->irq_source_id)
);
-#endif /* defined(CONFIG_HAVE_KVM_IRQFD) */
-
-#if defined(__KVM_HAVE_IOAPIC)
-#define kvm_deliver_mode \
- {0x0, "Fixed"}, \
- {0x1, "LowPrio"}, \
- {0x2, "SMI"}, \
- {0x3, "Res3"}, \
- {0x4, "NMI"}, \
- {0x5, "INIT"}, \
- {0x6, "SIPI"}, \
- {0x7, "ExtINT"}
-
-TRACE_EVENT(kvm_ioapic_set_irq,
- TP_PROTO(__u64 e, int pin, bool coalesced),
- TP_ARGS(e, pin, coalesced),
- TP_STRUCT__entry(
- __field( __u64, e )
- __field( int, pin )
- __field( bool, coalesced )
- ),
-
- TP_fast_assign(
- __entry->e = e;
- __entry->pin = pin;
- __entry->coalesced = coalesced;
- ),
-
- TP_printk("pin %u dst %x vec %u (%s|%s|%s%s)%s",
- __entry->pin, (u8)(__entry->e >> 56), (u8)__entry->e,
- __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
- (__entry->e & (1<<11)) ? "logical" : "physical",
- (__entry->e & (1<<15)) ? "level" : "edge",
- (__entry->e & (1<<16)) ? "|masked" : "",
- __entry->coalesced ? " (coalesced)" : "")
-);
-
-TRACE_EVENT(kvm_ioapic_delayed_eoi_inj,
- TP_PROTO(__u64 e),
- TP_ARGS(e),
-
- TP_STRUCT__entry(
- __field( __u64, e )
- ),
-
- TP_fast_assign(
- __entry->e = e;
- ),
-
- TP_printk("dst %x vec %u (%s|%s|%s%s)",
- (u8)(__entry->e >> 56), (u8)__entry->e,
- __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
- (__entry->e & (1<<11)) ? "logical" : "physical",
- (__entry->e & (1<<15)) ? "level" : "edge",
- (__entry->e & (1<<16)) ? "|masked" : "")
-);
-
-TRACE_EVENT(kvm_msi_set_irq,
- TP_PROTO(__u64 address, __u64 data),
- TP_ARGS(address, data),
-
- TP_STRUCT__entry(
- __field( __u64, address )
- __field( __u64, data )
- ),
-
- TP_fast_assign(
- __entry->address = address;
- __entry->data = data;
- ),
-
- TP_printk("dst %llx vec %u (%s|%s|%s%s)",
- (u8)(__entry->address >> 12) | ((__entry->address >> 32) & 0xffffff00),
- (u8)__entry->data,
- __print_symbolic((__entry->data >> 8 & 0x7), kvm_deliver_mode),
- (__entry->address & (1<<2)) ? "logical" : "physical",
- (__entry->data & (1<<15)) ? "level" : "edge",
- (__entry->address & (1<<3)) ? "|rh" : "")
-);
+#ifdef CONFIG_KVM_IOAPIC
#define kvm_irqchips \
{KVM_IRQCHIP_PIC_MASTER, "PIC master"}, \
{KVM_IRQCHIP_PIC_SLAVE, "PIC slave"}, \
{KVM_IRQCHIP_IOAPIC, "IOAPIC"}
-#endif /* defined(__KVM_HAVE_IOAPIC) */
-
-#if defined(CONFIG_HAVE_KVM_IRQFD)
+#endif /* CONFIG_KVM_IOAPIC */
#ifdef kvm_irqchips
#define kvm_ack_irq_string "irqchip %s pin %u"
@@ -197,7 +117,7 @@ TRACE_EVENT(kvm_ack_irq,
TP_printk(kvm_ack_irq_string, kvm_ack_irq_parm)
);
-#endif /* defined(CONFIG_HAVE_KVM_IRQFD) */
+#endif /* defined(CONFIG_HAVE_KVM_IRQCHIP) */
@@ -438,37 +358,49 @@ TRACE_EVENT(kvm_dirty_ring_exit,
TP_printk("vcpu %d", __entry->vcpu_id)
);
-TRACE_EVENT(kvm_unmap_hva_range,
- TP_PROTO(unsigned long start, unsigned long end),
- TP_ARGS(start, end),
+#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
+/*
+ * @start: Starting address of guest memory range
+ * @end: End address of guest memory range
+ * @attr: The value of the attribute being set.
+ */
+TRACE_EVENT(kvm_vm_set_mem_attributes,
+ TP_PROTO(gfn_t start, gfn_t end, unsigned long attr),
+ TP_ARGS(start, end, attr),
TP_STRUCT__entry(
- __field( unsigned long, start )
- __field( unsigned long, end )
+ __field(gfn_t, start)
+ __field(gfn_t, end)
+ __field(unsigned long, attr)
),
TP_fast_assign(
__entry->start = start;
__entry->end = end;
+ __entry->attr = attr;
),
- TP_printk("mmu notifier unmap range: %#016lx -- %#016lx",
- __entry->start, __entry->end)
+ TP_printk("%#016llx -- %#016llx [0x%lx]",
+ __entry->start, __entry->end, __entry->attr)
);
+#endif /* CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES */
-TRACE_EVENT(kvm_set_spte_hva,
- TP_PROTO(unsigned long hva),
- TP_ARGS(hva),
+TRACE_EVENT(kvm_unmap_hva_range,
+ TP_PROTO(unsigned long start, unsigned long end),
+ TP_ARGS(start, end),
TP_STRUCT__entry(
- __field( unsigned long, hva )
+ __field( unsigned long, start )
+ __field( unsigned long, end )
),
TP_fast_assign(
- __entry->hva = hva;
+ __entry->start = start;
+ __entry->end = end;
),
- TP_printk("mmu notifier set pte hva: %#016lx", __entry->hva)
+ TP_printk("mmu notifier unmap range: %#016lx -- %#016lx",
+ __entry->start, __entry->end)
);
TRACE_EVENT(kvm_age_hva,
diff --git a/include/trace/events/kyber.h b/include/trace/events/kyber.h
index bf7533f171ff..9d44781efc1c 100644
--- a/include/trace/events/kyber.h
+++ b/include/trace/events/kyber.h
@@ -31,8 +31,8 @@ TRACE_EVENT(kyber_latency,
TP_fast_assign(
__entry->dev = dev;
- strlcpy(__entry->domain, domain, sizeof(__entry->domain));
- strlcpy(__entry->type, type, sizeof(__entry->type));
+ strscpy(__entry->domain, domain, sizeof(__entry->domain));
+ strscpy(__entry->type, type, sizeof(__entry->type));
__entry->percentile = percentile;
__entry->numerator = numerator;
__entry->denominator = denominator;
@@ -59,7 +59,7 @@ TRACE_EVENT(kyber_adjust,
TP_fast_assign(
__entry->dev = dev;
- strlcpy(__entry->domain, domain, sizeof(__entry->domain));
+ strscpy(__entry->domain, domain, sizeof(__entry->domain));
__entry->depth = depth;
),
@@ -81,7 +81,7 @@ TRACE_EVENT(kyber_throttled,
TP_fast_assign(
__entry->dev = dev;
- strlcpy(__entry->domain, domain, sizeof(__entry->domain));
+ strscpy(__entry->domain, domain, sizeof(__entry->domain));
),
TP_printk("%d,%d %s", MAJOR(__entry->dev), MINOR(__entry->dev),
diff --git a/include/trace/events/lock.h b/include/trace/events/lock.h
index 9ebd081e057e..8e89baa3775f 100644
--- a/include/trace/events/lock.h
+++ b/include/trace/events/lock.h
@@ -37,7 +37,7 @@ TRACE_EVENT(lock_acquire,
TP_fast_assign(
__entry->flags = (trylock ? 1 : 0) | (read ? 2 : 0);
- __assign_str(name, lock->name);
+ __assign_str(name);
__entry->lockdep_addr = lock;
),
@@ -59,7 +59,7 @@ DECLARE_EVENT_CLASS(lock,
),
TP_fast_assign(
- __assign_str(name, lock->name);
+ __assign_str(name);
__entry->lockdep_addr = lock;
),
diff --git a/include/trace/events/mce.h b/include/trace/events/mce.h
index 1391ada0da3b..c1c50df9ecfd 100644
--- a/include/trace/events/mce.h
+++ b/include/trace/events/mce.h
@@ -9,11 +9,19 @@
#include <linux/tracepoint.h>
#include <asm/mce.h>
+/*
+ * MCE Event Record.
+ *
+ * Only very relevant and transient information which cannot be
+ * gathered from a system by any other means or which can only be
+ * acquired arduously should be added to this record.
+ */
+
TRACE_EVENT(mce_record,
- TP_PROTO(struct mce *m),
+ TP_PROTO(struct mce_hw_err *err),
- TP_ARGS(m),
+ TP_ARGS(err),
TP_STRUCT__entry(
__field( u64, mcgcap )
@@ -25,6 +33,7 @@ TRACE_EVENT(mce_record,
__field( u64, ipid )
__field( u64, ip )
__field( u64, tsc )
+ __field( u64, ppin )
__field( u64, walltime )
__field( u32, cpu )
__field( u32, cpuid )
@@ -33,40 +42,51 @@ TRACE_EVENT(mce_record,
__field( u8, cs )
__field( u8, bank )
__field( u8, cpuvendor )
+ __field( u32, microcode )
+ __dynamic_array(u8, v_data, sizeof(err->vendor))
),
TP_fast_assign(
- __entry->mcgcap = m->mcgcap;
- __entry->mcgstatus = m->mcgstatus;
- __entry->status = m->status;
- __entry->addr = m->addr;
- __entry->misc = m->misc;
- __entry->synd = m->synd;
- __entry->ipid = m->ipid;
- __entry->ip = m->ip;
- __entry->tsc = m->tsc;
- __entry->walltime = m->time;
- __entry->cpu = m->extcpu;
- __entry->cpuid = m->cpuid;
- __entry->apicid = m->apicid;
- __entry->socketid = m->socketid;
- __entry->cs = m->cs;
- __entry->bank = m->bank;
- __entry->cpuvendor = m->cpuvendor;
+ __entry->mcgcap = err->m.mcgcap;
+ __entry->mcgstatus = err->m.mcgstatus;
+ __entry->status = err->m.status;
+ __entry->addr = err->m.addr;
+ __entry->misc = err->m.misc;
+ __entry->synd = err->m.synd;
+ __entry->ipid = err->m.ipid;
+ __entry->ip = err->m.ip;
+ __entry->tsc = err->m.tsc;
+ __entry->ppin = err->m.ppin;
+ __entry->walltime = err->m.time;
+ __entry->cpu = err->m.extcpu;
+ __entry->cpuid = err->m.cpuid;
+ __entry->apicid = err->m.apicid;
+ __entry->socketid = err->m.socketid;
+ __entry->cs = err->m.cs;
+ __entry->bank = err->m.bank;
+ __entry->cpuvendor = err->m.cpuvendor;
+ __entry->microcode = err->m.microcode;
+ memcpy(__get_dynamic_array(v_data), &err->vendor, sizeof(err->vendor));
),
- TP_printk("CPU: %d, MCGc/s: %llx/%llx, MC%d: %016Lx, IPID: %016Lx, ADDR/MISC/SYND: %016Lx/%016Lx/%016Lx, RIP: %02x:<%016Lx>, TSC: %llx, PROCESSOR: %u:%x, TIME: %llu, SOCKET: %u, APIC: %x",
+ TP_printk("CPU: %d, MCGc/s: %llx/%llx, MC%d: %016llx, IPID: %016llx, ADDR: %016llx, MISC: %016llx, SYND: %016llx, RIP: %02x:<%016llx>, TSC: %llx, PPIN: %llx, vendor: %u, CPUID: %x, time: %llu, socket: %u, APIC: %x, microcode: %x, vendor data: %s",
__entry->cpu,
__entry->mcgcap, __entry->mcgstatus,
__entry->bank, __entry->status,
__entry->ipid,
- __entry->addr, __entry->misc, __entry->synd,
+ __entry->addr,
+ __entry->misc,
+ __entry->synd,
__entry->cs, __entry->ip,
__entry->tsc,
- __entry->cpuvendor, __entry->cpuid,
+ __entry->ppin,
+ __entry->cpuvendor,
+ __entry->cpuid,
__entry->walltime,
__entry->socketid,
- __entry->apicid)
+ __entry->apicid,
+ __entry->microcode,
+ __print_dynamic_array(v_data, sizeof(u8)))
);
#endif /* _TRACE_MCE_H */
diff --git a/include/trace/events/mdio.h b/include/trace/events/mdio.h
index 0f241cbe00ab..285b3e4f83ba 100644
--- a/include/trace/events/mdio.h
+++ b/include/trace/events/mdio.h
@@ -25,7 +25,7 @@ TRACE_EVENT_CONDITION(mdio_access,
),
TP_fast_assign(
- strncpy(__entry->busid, bus->id, MII_BUS_ID_SIZE);
+ strscpy(__entry->busid, bus->id, MII_BUS_ID_SIZE);
__entry->read = read;
__entry->addr = addr;
__entry->regnum = regnum;
diff --git a/include/trace/events/memcg.h b/include/trace/events/memcg.h
new file mode 100644
index 000000000000..dfe2f51019b4
--- /dev/null
+++ b/include/trace/events/memcg.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM memcg
+
+#if !defined(_TRACE_MEMCG_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_MEMCG_H
+
+#include <linux/memcontrol.h>
+#include <linux/tracepoint.h>
+
+
+DECLARE_EVENT_CLASS(memcg_rstat_stats,
+
+ TP_PROTO(struct mem_cgroup *memcg, int item, int val),
+
+ TP_ARGS(memcg, item, val),
+
+ TP_STRUCT__entry(
+ __field(u64, id)
+ __field(int, item)
+ __field(int, val)
+ ),
+
+ TP_fast_assign(
+ __entry->id = cgroup_id(memcg->css.cgroup);
+ __entry->item = item;
+ __entry->val = val;
+ ),
+
+ TP_printk("memcg_id=%llu item=%d val=%d",
+ __entry->id, __entry->item, __entry->val)
+);
+
+DEFINE_EVENT(memcg_rstat_stats, mod_memcg_state,
+
+ TP_PROTO(struct mem_cgroup *memcg, int item, int val),
+
+ TP_ARGS(memcg, item, val)
+);
+
+DEFINE_EVENT(memcg_rstat_stats, mod_memcg_lruvec_state,
+
+ TP_PROTO(struct mem_cgroup *memcg, int item, int val),
+
+ TP_ARGS(memcg, item, val)
+);
+
+DECLARE_EVENT_CLASS(memcg_rstat_events,
+
+ TP_PROTO(struct mem_cgroup *memcg, int item, unsigned long val),
+
+ TP_ARGS(memcg, item, val),
+
+ TP_STRUCT__entry(
+ __field(u64, id)
+ __field(int, item)
+ __field(unsigned long, val)
+ ),
+
+ TP_fast_assign(
+ __entry->id = cgroup_id(memcg->css.cgroup);
+ __entry->item = item;
+ __entry->val = val;
+ ),
+
+ TP_printk("memcg_id=%llu item=%d val=%lu",
+ __entry->id, __entry->item, __entry->val)
+);
+
+DEFINE_EVENT(memcg_rstat_events, count_memcg_events,
+
+ TP_PROTO(struct mem_cgroup *memcg, int item, unsigned long val),
+
+ TP_ARGS(memcg, item, val)
+);
+
+TRACE_EVENT(memcg_flush_stats,
+
+ TP_PROTO(struct mem_cgroup *memcg, s64 stats_updates,
+ bool force, bool needs_flush),
+
+ TP_ARGS(memcg, stats_updates, force, needs_flush),
+
+ TP_STRUCT__entry(
+ __field(u64, id)
+ __field(s64, stats_updates)
+ __field(bool, force)
+ __field(bool, needs_flush)
+ ),
+
+ TP_fast_assign(
+ __entry->id = cgroup_id(memcg->css.cgroup);
+ __entry->stats_updates = stats_updates;
+ __entry->force = force;
+ __entry->needs_flush = needs_flush;
+ ),
+
+ TP_printk("memcg_id=%llu stats_updates=%lld force=%d needs_flush=%d",
+ __entry->id, __entry->stats_updates,
+ __entry->force, __entry->needs_flush)
+);
+
+#endif /* _TRACE_MEMCG_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/memory-failure.h b/include/trace/events/memory-failure.h
new file mode 100644
index 000000000000..aa57cc8f896b
--- /dev/null
+++ b/include/trace/events/memory-failure.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM memory_failure
+#define TRACE_INCLUDE_FILE memory-failure
+
+#if !defined(_TRACE_MEMORY_FAILURE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_MEMORY_FAILURE_H
+
+#include <linux/tracepoint.h>
+#include <linux/mm.h>
+
+/*
+ * memory-failure recovery action result event
+ *
+ * unsigned long pfn - Page Frame Number of the corrupted page
+ * int type - Page types of the corrupted page
+ * int result - Result of recovery action
+ */
+
+#define MF_ACTION_RESULT \
+ EM ( MF_IGNORED, "Ignored" ) \
+ EM ( MF_FAILED, "Failed" ) \
+ EM ( MF_DELAYED, "Delayed" ) \
+ EMe ( MF_RECOVERED, "Recovered" )
+
+#define MF_PAGE_TYPE \
+ EM ( MF_MSG_KERNEL, "reserved kernel page" ) \
+ EM ( MF_MSG_KERNEL_HIGH_ORDER, "high-order kernel page" ) \
+ EM ( MF_MSG_HUGE, "huge page" ) \
+ EM ( MF_MSG_FREE_HUGE, "free huge page" ) \
+ EM ( MF_MSG_GET_HWPOISON, "get hwpoison page" ) \
+ EM ( MF_MSG_UNMAP_FAILED, "unmapping failed page" ) \
+ EM ( MF_MSG_DIRTY_SWAPCACHE, "dirty swapcache page" ) \
+ EM ( MF_MSG_CLEAN_SWAPCACHE, "clean swapcache page" ) \
+ EM ( MF_MSG_DIRTY_MLOCKED_LRU, "dirty mlocked LRU page" ) \
+ EM ( MF_MSG_CLEAN_MLOCKED_LRU, "clean mlocked LRU page" ) \
+ EM ( MF_MSG_DIRTY_UNEVICTABLE_LRU, "dirty unevictable LRU page" ) \
+ EM ( MF_MSG_CLEAN_UNEVICTABLE_LRU, "clean unevictable LRU page" ) \
+ EM ( MF_MSG_DIRTY_LRU, "dirty LRU page" ) \
+ EM ( MF_MSG_CLEAN_LRU, "clean LRU page" ) \
+ EM ( MF_MSG_TRUNCATED_LRU, "already truncated LRU page" ) \
+ EM ( MF_MSG_BUDDY, "free buddy page" ) \
+ EM ( MF_MSG_DAX, "dax page" ) \
+ EM ( MF_MSG_UNSPLIT_THP, "unsplit thp" ) \
+ EM ( MF_MSG_ALREADY_POISONED, "already poisoned" ) \
+ EM ( MF_MSG_PFN_MAP, "non struct page pfn" ) \
+ EMe ( MF_MSG_UNKNOWN, "unknown page" )
+
+/*
+ * First define the enums in MM_ACTION_RESULT to be exported to userspace
+ * via TRACE_DEFINE_ENUM().
+ */
+#undef EM
+#undef EMe
+#define EM(a, b) TRACE_DEFINE_ENUM(a);
+#define EMe(a, b) TRACE_DEFINE_ENUM(a);
+
+MF_ACTION_RESULT
+MF_PAGE_TYPE
+
+/*
+ * Now redefine the EM() and EMe() macros to map the enums to the strings
+ * that will be printed in the output.
+ */
+#undef EM
+#undef EMe
+#define EM(a, b) { a, b },
+#define EMe(a, b) { a, b }
+
+TRACE_EVENT(memory_failure_event,
+ TP_PROTO(unsigned long pfn,
+ int type,
+ int result),
+
+ TP_ARGS(pfn, type, result),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, pfn)
+ __field(int, type)
+ __field(int, result)
+ ),
+
+ TP_fast_assign(
+ __entry->pfn = pfn;
+ __entry->type = type;
+ __entry->result = result;
+ ),
+
+ TP_printk("pfn %#lx: recovery action for %s: %s",
+ __entry->pfn,
+ __print_symbolic(__entry->type, MF_PAGE_TYPE),
+ __print_symbolic(__entry->result, MF_ACTION_RESULT)
+ )
+);
+#endif /* _TRACE_MEMORY_FAILURE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/migrate.h b/include/trace/events/migrate.h
index 061b5128f335..cd01dd7b3640 100644
--- a/include/trace/events/migrate.h
+++ b/include/trace/events/migrate.h
@@ -22,7 +22,8 @@
EM( MR_NUMA_MISPLACED, "numa_misplaced") \
EM( MR_CONTIG_RANGE, "contig_range") \
EM( MR_LONGTERM_PIN, "longterm_pin") \
- EMe(MR_DEMOTION, "demotion")
+ EM( MR_DEMOTION, "demotion") \
+ EMe(MR_DAMON, "damon")
/*
* First define the enums in the above macros to be exported to userspace
@@ -49,10 +50,11 @@ TRACE_EVENT(mm_migrate_pages,
TP_PROTO(unsigned long succeeded, unsigned long failed,
unsigned long thp_succeeded, unsigned long thp_failed,
- unsigned long thp_split, enum migrate_mode mode, int reason),
+ unsigned long thp_split, unsigned long large_folio_split,
+ enum migrate_mode mode, int reason),
TP_ARGS(succeeded, failed, thp_succeeded, thp_failed,
- thp_split, mode, reason),
+ thp_split, large_folio_split, mode, reason),
TP_STRUCT__entry(
__field( unsigned long, succeeded)
@@ -60,26 +62,29 @@ TRACE_EVENT(mm_migrate_pages,
__field( unsigned long, thp_succeeded)
__field( unsigned long, thp_failed)
__field( unsigned long, thp_split)
+ __field( unsigned long, large_folio_split)
__field( enum migrate_mode, mode)
__field( int, reason)
),
TP_fast_assign(
- __entry->succeeded = succeeded;
- __entry->failed = failed;
- __entry->thp_succeeded = thp_succeeded;
- __entry->thp_failed = thp_failed;
- __entry->thp_split = thp_split;
- __entry->mode = mode;
- __entry->reason = reason;
+ __entry->succeeded = succeeded;
+ __entry->failed = failed;
+ __entry->thp_succeeded = thp_succeeded;
+ __entry->thp_failed = thp_failed;
+ __entry->thp_split = thp_split;
+ __entry->large_folio_split = large_folio_split;
+ __entry->mode = mode;
+ __entry->reason = reason;
),
- TP_printk("nr_succeeded=%lu nr_failed=%lu nr_thp_succeeded=%lu nr_thp_failed=%lu nr_thp_split=%lu mode=%s reason=%s",
+ TP_printk("nr_succeeded=%lu nr_failed=%lu nr_thp_succeeded=%lu nr_thp_failed=%lu nr_thp_split=%lu nr_split=%lu mode=%s reason=%s",
__entry->succeeded,
__entry->failed,
__entry->thp_succeeded,
__entry->thp_failed,
__entry->thp_split,
+ __entry->large_folio_split,
__print_symbolic(__entry->mode, MIGRATE_MODE),
__print_symbolic(__entry->reason, MIGRATE_REASON))
);
diff --git a/include/trace/events/mmap.h b/include/trace/events/mmap.h
index 216de5f03621..ee2843a5daef 100644
--- a/include/trace/events/mmap.h
+++ b/include/trace/events/mmap.h
@@ -35,7 +35,7 @@ TRACE_EVENT(vm_unmapped_area,
__entry->align_offset = info->align_offset;
),
- TP_printk("addr=0x%lx err=%ld total_vm=0x%lx flags=0x%lx len=0x%lx lo=0x%lx hi=0x%lx mask=0x%lx ofs=0x%lx\n",
+ TP_printk("addr=0x%lx err=%ld total_vm=0x%lx flags=0x%lx len=0x%lx lo=0x%lx hi=0x%lx mask=0x%lx ofs=0x%lx",
IS_ERR_VALUE(__entry->addr) ? 0 : __entry->addr,
IS_ERR_VALUE(__entry->addr) ? __entry->addr : 0,
__entry->total_vm, __entry->flags, __entry->length,
@@ -43,58 +43,6 @@ TRACE_EVENT(vm_unmapped_area,
__entry->align_offset)
);
-TRACE_EVENT(vma_mas_szero,
- TP_PROTO(struct maple_tree *mt, unsigned long start,
- unsigned long end),
-
- TP_ARGS(mt, start, end),
-
- TP_STRUCT__entry(
- __field(struct maple_tree *, mt)
- __field(unsigned long, start)
- __field(unsigned long, end)
- ),
-
- TP_fast_assign(
- __entry->mt = mt;
- __entry->start = start;
- __entry->end = end;
- ),
-
- TP_printk("mt_mod %p, (NULL), SNULL, %lu, %lu,",
- __entry->mt,
- (unsigned long) __entry->start,
- (unsigned long) __entry->end
- )
-);
-
-TRACE_EVENT(vma_store,
- TP_PROTO(struct maple_tree *mt, struct vm_area_struct *vma),
-
- TP_ARGS(mt, vma),
-
- TP_STRUCT__entry(
- __field(struct maple_tree *, mt)
- __field(struct vm_area_struct *, vma)
- __field(unsigned long, vm_start)
- __field(unsigned long, vm_end)
- ),
-
- TP_fast_assign(
- __entry->mt = mt;
- __entry->vma = vma;
- __entry->vm_start = vma->vm_start;
- __entry->vm_end = vma->vm_end - 1;
- ),
-
- TP_printk("mt_mod %p, (%p), STORE, %lu, %lu,",
- __entry->mt, __entry->vma,
- (unsigned long) __entry->vm_start,
- (unsigned long) __entry->vm_end
- )
-);
-
-
TRACE_EVENT(exit_mmap,
TP_PROTO(struct mm_struct *mm),
@@ -110,7 +58,7 @@ TRACE_EVENT(exit_mmap,
__entry->mt = &mm->mm_mt;
),
- TP_printk("mt_mod %p, DESTROY\n",
+ TP_printk("mt_mod %p, DESTROY",
__entry->mt
)
);
diff --git a/include/trace/events/mmap_lock.h b/include/trace/events/mmap_lock.h
index 14db8044c1ff..cf9f9faf8914 100644
--- a/include/trace/events/mmap_lock.h
+++ b/include/trace/events/mmap_lock.h
@@ -5,80 +5,72 @@
#if !defined(_TRACE_MMAP_LOCK_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_MMAP_LOCK_H
+#include <linux/memcontrol.h>
#include <linux/tracepoint.h>
#include <linux/types.h>
struct mm_struct;
-extern int trace_mmap_lock_reg(void);
-extern void trace_mmap_lock_unreg(void);
-
DECLARE_EVENT_CLASS(mmap_lock,
- TP_PROTO(struct mm_struct *mm, const char *memcg_path, bool write),
+ TP_PROTO(struct mm_struct *mm, bool write),
- TP_ARGS(mm, memcg_path, write),
+ TP_ARGS(mm, write),
TP_STRUCT__entry(
__field(struct mm_struct *, mm)
- __string(memcg_path, memcg_path)
+ __field(u64, memcg_id)
__field(bool, write)
),
TP_fast_assign(
__entry->mm = mm;
- __assign_str(memcg_path, memcg_path);
+ __entry->memcg_id = cgroup_id_from_mm(mm);
__entry->write = write;
),
TP_printk(
- "mm=%p memcg_path=%s write=%s",
- __entry->mm,
- __get_str(memcg_path),
+ "mm=%p memcg_id=%llu write=%s",
+ __entry->mm, __entry->memcg_id,
__entry->write ? "true" : "false"
)
);
#define DEFINE_MMAP_LOCK_EVENT(name) \
- DEFINE_EVENT_FN(mmap_lock, name, \
- TP_PROTO(struct mm_struct *mm, const char *memcg_path, \
- bool write), \
- TP_ARGS(mm, memcg_path, write), \
- trace_mmap_lock_reg, trace_mmap_lock_unreg)
+ DEFINE_EVENT(mmap_lock, name, \
+ TP_PROTO(struct mm_struct *mm, bool write), \
+ TP_ARGS(mm, write))
DEFINE_MMAP_LOCK_EVENT(mmap_lock_start_locking);
DEFINE_MMAP_LOCK_EVENT(mmap_lock_released);
-TRACE_EVENT_FN(mmap_lock_acquire_returned,
+TRACE_EVENT(mmap_lock_acquire_returned,
- TP_PROTO(struct mm_struct *mm, const char *memcg_path, bool write,
- bool success),
+ TP_PROTO(struct mm_struct *mm, bool write, bool success),
- TP_ARGS(mm, memcg_path, write, success),
+ TP_ARGS(mm, write, success),
TP_STRUCT__entry(
__field(struct mm_struct *, mm)
- __string(memcg_path, memcg_path)
+ __field(u64, memcg_id)
__field(bool, write)
__field(bool, success)
),
TP_fast_assign(
__entry->mm = mm;
- __assign_str(memcg_path, memcg_path);
+ __entry->memcg_id = cgroup_id_from_mm(mm);
__entry->write = write;
__entry->success = success;
),
TP_printk(
- "mm=%p memcg_path=%s write=%s success=%s",
+ "mm=%p memcg_id=%llu write=%s success=%s",
__entry->mm,
- __get_str(memcg_path),
+ __entry->memcg_id,
__entry->write ? "true" : "false",
__entry->success ? "true" : "false"
- ),
-
- trace_mmap_lock_reg, trace_mmap_lock_unreg
+ )
);
#endif /* _TRACE_MMAP_LOCK_H */
diff --git a/include/trace/events/mmc.h b/include/trace/events/mmc.h
index 7b706ff21335..f1c2e94f7f68 100644
--- a/include/trace/events/mmc.h
+++ b/include/trace/events/mmc.h
@@ -68,7 +68,7 @@ TRACE_EVENT(mmc_request_start,
__entry->need_retune = host->need_retune;
__entry->hold_retune = host->hold_retune;
__entry->retune_period = host->retune_period;
- __assign_str(name, mmc_hostname(host));
+ __assign_str(name);
__entry->mrq = mrq;
),
@@ -156,7 +156,7 @@ TRACE_EVENT(mmc_request_done,
__entry->need_retune = host->need_retune;
__entry->hold_retune = host->hold_retune;
__entry->retune_period = host->retune_period;
- __assign_str(name, mmc_hostname(host));
+ __assign_str(name);
__entry->mrq = mrq;
),
diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h
index 412b5a46374c..a6e5a44c9b42 100644
--- a/include/trace/events/mmflags.h
+++ b/include/trace/events/mmflags.h
@@ -13,8 +13,78 @@
* Thus most bits set go first.
*/
+/* These define the values that are enums (the bits) */
+#define TRACE_GFP_FLAGS_GENERAL \
+ TRACE_GFP_EM(DMA) \
+ TRACE_GFP_EM(HIGHMEM) \
+ TRACE_GFP_EM(DMA32) \
+ TRACE_GFP_EM(MOVABLE) \
+ TRACE_GFP_EM(RECLAIMABLE) \
+ TRACE_GFP_EM(HIGH) \
+ TRACE_GFP_EM(IO) \
+ TRACE_GFP_EM(FS) \
+ TRACE_GFP_EM(ZERO) \
+ TRACE_GFP_EM(DIRECT_RECLAIM) \
+ TRACE_GFP_EM(KSWAPD_RECLAIM) \
+ TRACE_GFP_EM(WRITE) \
+ TRACE_GFP_EM(NOWARN) \
+ TRACE_GFP_EM(RETRY_MAYFAIL) \
+ TRACE_GFP_EM(NOFAIL) \
+ TRACE_GFP_EM(NORETRY) \
+ TRACE_GFP_EM(MEMALLOC) \
+ TRACE_GFP_EM(COMP) \
+ TRACE_GFP_EM(NOMEMALLOC) \
+ TRACE_GFP_EM(HARDWALL) \
+ TRACE_GFP_EM(THISNODE) \
+ TRACE_GFP_EM(ACCOUNT) \
+ TRACE_GFP_EM(ZEROTAGS)
+
+#ifdef CONFIG_KASAN_HW_TAGS
+# define TRACE_GFP_FLAGS_KASAN \
+ TRACE_GFP_EM(SKIP_ZERO) \
+ TRACE_GFP_EM(SKIP_KASAN)
+#else
+# define TRACE_GFP_FLAGS_KASAN
+#endif
+
+#ifdef CONFIG_LOCKDEP
+# define TRACE_GFP_FLAGS_LOCKDEP \
+ TRACE_GFP_EM(NOLOCKDEP)
+#else
+# define TRACE_GFP_FLAGS_LOCKDEP
+#endif
+
+#ifdef CONFIG_SLAB_OBJ_EXT
+# define TRACE_GFP_FLAGS_SLAB \
+ TRACE_GFP_EM(NO_OBJ_EXT)
+#else
+# define TRACE_GFP_FLAGS_SLAB
+#endif
+
+#define TRACE_GFP_FLAGS \
+ TRACE_GFP_FLAGS_GENERAL \
+ TRACE_GFP_FLAGS_KASAN \
+ TRACE_GFP_FLAGS_LOCKDEP \
+ TRACE_GFP_FLAGS_SLAB
+
+#undef TRACE_GFP_EM
+#define TRACE_GFP_EM(a) TRACE_DEFINE_ENUM(___GFP_##a##_BIT);
+
+TRACE_GFP_FLAGS
+
+/* Just in case these are ever used */
+TRACE_DEFINE_ENUM(___GFP_UNUSED_BIT);
+TRACE_DEFINE_ENUM(___GFP_LAST_BIT);
+
#define gfpflag_string(flag) {(__force unsigned long)flag, #flag}
+/*
+ * For the values that match the bits, use the TRACE_GFP_FLAGS
+ * which will allow any updates to be included automatically.
+ */
+#undef TRACE_GFP_EM
+#define TRACE_GFP_EM(a) gfpflag_string(__GFP_##a),
+
#define __def_gfpflag_names \
gfpflag_string(GFP_TRANSHUGE), \
gfpflag_string(GFP_TRANSHUGE_LIGHT), \
@@ -28,122 +98,83 @@
gfpflag_string(GFP_NOIO), \
gfpflag_string(GFP_NOWAIT), \
gfpflag_string(GFP_DMA), \
- gfpflag_string(__GFP_HIGHMEM), \
gfpflag_string(GFP_DMA32), \
- gfpflag_string(__GFP_HIGH), \
- gfpflag_string(__GFP_ATOMIC), \
- gfpflag_string(__GFP_IO), \
- gfpflag_string(__GFP_FS), \
- gfpflag_string(__GFP_NOWARN), \
- gfpflag_string(__GFP_RETRY_MAYFAIL), \
- gfpflag_string(__GFP_NOFAIL), \
- gfpflag_string(__GFP_NORETRY), \
- gfpflag_string(__GFP_COMP), \
- gfpflag_string(__GFP_ZERO), \
- gfpflag_string(__GFP_NOMEMALLOC), \
- gfpflag_string(__GFP_MEMALLOC), \
- gfpflag_string(__GFP_HARDWALL), \
- gfpflag_string(__GFP_THISNODE), \
- gfpflag_string(__GFP_RECLAIMABLE), \
- gfpflag_string(__GFP_MOVABLE), \
- gfpflag_string(__GFP_ACCOUNT), \
- gfpflag_string(__GFP_WRITE), \
gfpflag_string(__GFP_RECLAIM), \
- gfpflag_string(__GFP_DIRECT_RECLAIM), \
- gfpflag_string(__GFP_KSWAPD_RECLAIM), \
- gfpflag_string(__GFP_ZEROTAGS)
-
-#ifdef CONFIG_KASAN_HW_TAGS
-#define __def_gfpflag_names_kasan , \
- gfpflag_string(__GFP_SKIP_ZERO), \
- gfpflag_string(__GFP_SKIP_KASAN_POISON), \
- gfpflag_string(__GFP_SKIP_KASAN_UNPOISON)
-#else
-#define __def_gfpflag_names_kasan
-#endif
+ TRACE_GFP_FLAGS \
+ { 0, NULL }
#define show_gfp_flags(flags) \
- (flags) ? __print_flags(flags, "|", \
- __def_gfpflag_names __def_gfpflag_names_kasan \
+ (flags) ? __print_flags(flags, "|", __def_gfpflag_names \
) : "none"
#ifdef CONFIG_MMU
-#define IF_HAVE_PG_MLOCK(flag,string) ,{1UL << flag, string}
+#define IF_HAVE_PG_MLOCK(_name) ,{1UL << PG_##_name, __stringify(_name)}
#else
-#define IF_HAVE_PG_MLOCK(flag,string)
-#endif
-
-#ifdef CONFIG_ARCH_USES_PG_UNCACHED
-#define IF_HAVE_PG_UNCACHED(flag,string) ,{1UL << flag, string}
-#else
-#define IF_HAVE_PG_UNCACHED(flag,string)
+#define IF_HAVE_PG_MLOCK(_name)
#endif
#ifdef CONFIG_MEMORY_FAILURE
-#define IF_HAVE_PG_HWPOISON(flag,string) ,{1UL << flag, string}
+#define IF_HAVE_PG_HWPOISON(_name) ,{1UL << PG_##_name, __stringify(_name)}
#else
-#define IF_HAVE_PG_HWPOISON(flag,string)
+#define IF_HAVE_PG_HWPOISON(_name)
#endif
#if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT)
-#define IF_HAVE_PG_IDLE(flag,string) ,{1UL << flag, string}
+#define IF_HAVE_PG_IDLE(_name) ,{1UL << PG_##_name, __stringify(_name)}
#else
-#define IF_HAVE_PG_IDLE(flag,string)
+#define IF_HAVE_PG_IDLE(_name)
#endif
-#ifdef CONFIG_ARCH_USES_PG_ARCH_X
-#define IF_HAVE_PG_ARCH_X(flag,string) ,{1UL << flag, string}
+#ifdef CONFIG_ARCH_USES_PG_ARCH_2
+#define IF_HAVE_PG_ARCH_2(_name) ,{1UL << PG_##_name, __stringify(_name)}
#else
-#define IF_HAVE_PG_ARCH_X(flag,string)
+#define IF_HAVE_PG_ARCH_2(_name)
#endif
-#ifdef CONFIG_KASAN_HW_TAGS
-#define IF_HAVE_PG_SKIP_KASAN_POISON(flag,string) ,{1UL << flag, string}
+#ifdef CONFIG_ARCH_USES_PG_ARCH_3
+#define IF_HAVE_PG_ARCH_3(_name) ,{1UL << PG_##_name, __stringify(_name)}
#else
-#define IF_HAVE_PG_SKIP_KASAN_POISON(flag,string)
+#define IF_HAVE_PG_ARCH_3(_name)
#endif
+#define DEF_PAGEFLAG_NAME(_name) { 1UL << PG_##_name, __stringify(_name) }
+
#define __def_pageflag_names \
- {1UL << PG_locked, "locked" }, \
- {1UL << PG_waiters, "waiters" }, \
- {1UL << PG_error, "error" }, \
- {1UL << PG_referenced, "referenced" }, \
- {1UL << PG_uptodate, "uptodate" }, \
- {1UL << PG_dirty, "dirty" }, \
- {1UL << PG_lru, "lru" }, \
- {1UL << PG_active, "active" }, \
- {1UL << PG_workingset, "workingset" }, \
- {1UL << PG_slab, "slab" }, \
- {1UL << PG_owner_priv_1, "owner_priv_1" }, \
- {1UL << PG_arch_1, "arch_1" }, \
- {1UL << PG_reserved, "reserved" }, \
- {1UL << PG_private, "private" }, \
- {1UL << PG_private_2, "private_2" }, \
- {1UL << PG_writeback, "writeback" }, \
- {1UL << PG_head, "head" }, \
- {1UL << PG_mappedtodisk, "mappedtodisk" }, \
- {1UL << PG_reclaim, "reclaim" }, \
- {1UL << PG_swapbacked, "swapbacked" }, \
- {1UL << PG_unevictable, "unevictable" } \
-IF_HAVE_PG_MLOCK(PG_mlocked, "mlocked" ) \
-IF_HAVE_PG_UNCACHED(PG_uncached, "uncached" ) \
-IF_HAVE_PG_HWPOISON(PG_hwpoison, "hwpoison" ) \
-IF_HAVE_PG_IDLE(PG_young, "young" ) \
-IF_HAVE_PG_IDLE(PG_idle, "idle" ) \
-IF_HAVE_PG_ARCH_X(PG_arch_2, "arch_2" ) \
-IF_HAVE_PG_ARCH_X(PG_arch_3, "arch_3" ) \
-IF_HAVE_PG_SKIP_KASAN_POISON(PG_skip_kasan_poison, "skip_kasan_poison")
+ DEF_PAGEFLAG_NAME(locked), \
+ DEF_PAGEFLAG_NAME(waiters), \
+ DEF_PAGEFLAG_NAME(referenced), \
+ DEF_PAGEFLAG_NAME(uptodate), \
+ DEF_PAGEFLAG_NAME(dirty), \
+ DEF_PAGEFLAG_NAME(lru), \
+ DEF_PAGEFLAG_NAME(active), \
+ DEF_PAGEFLAG_NAME(workingset), \
+ DEF_PAGEFLAG_NAME(owner_priv_1), \
+ DEF_PAGEFLAG_NAME(owner_2), \
+ DEF_PAGEFLAG_NAME(arch_1), \
+ DEF_PAGEFLAG_NAME(reserved), \
+ DEF_PAGEFLAG_NAME(private), \
+ DEF_PAGEFLAG_NAME(private_2), \
+ DEF_PAGEFLAG_NAME(writeback), \
+ DEF_PAGEFLAG_NAME(head), \
+ DEF_PAGEFLAG_NAME(reclaim), \
+ DEF_PAGEFLAG_NAME(swapbacked), \
+ DEF_PAGEFLAG_NAME(unevictable), \
+ DEF_PAGEFLAG_NAME(dropbehind) \
+IF_HAVE_PG_MLOCK(mlocked) \
+IF_HAVE_PG_HWPOISON(hwpoison) \
+IF_HAVE_PG_IDLE(idle) \
+IF_HAVE_PG_IDLE(young) \
+IF_HAVE_PG_ARCH_2(arch_2) \
+IF_HAVE_PG_ARCH_3(arch_3)
#define show_page_flags(flags) \
(flags) ? __print_flags(flags, "|", \
__def_pageflag_names \
) : "none"
-#if defined(CONFIG_X86)
-#define __VM_ARCH_SPECIFIC_1 {VM_PAT, "pat" }
-#elif defined(CONFIG_PPC)
+#if defined(CONFIG_PPC64)
#define __VM_ARCH_SPECIFIC_1 {VM_SAO, "sao" }
-#elif defined(CONFIG_PARISC) || defined(CONFIG_IA64)
+#elif defined(CONFIG_PARISC)
#define __VM_ARCH_SPECIFIC_1 {VM_GROWSUP, "growsup" }
#elif !defined(CONFIG_MMU)
#define __VM_ARCH_SPECIFIC_1 {VM_MAPPED_COPY,"mappedcopy" }
@@ -163,6 +194,12 @@ IF_HAVE_PG_SKIP_KASAN_POISON(PG_skip_kasan_poison, "skip_kasan_poison")
# define IF_HAVE_UFFD_MINOR(flag, name)
#endif
+#if defined(CONFIG_64BIT) || defined(CONFIG_PPC32)
+# define IF_HAVE_VM_DROPPABLE(flag, name) {flag, name},
+#else
+# define IF_HAVE_VM_DROPPABLE(flag, name)
+#endif
+
#define __def_vmaflag_names \
{VM_READ, "read" }, \
{VM_WRITE, "write" }, \
@@ -176,6 +213,7 @@ IF_HAVE_PG_SKIP_KASAN_POISON(PG_skip_kasan_poison, "skip_kasan_poison")
{VM_UFFD_MISSING, "uffd_missing" }, \
IF_HAVE_UFFD_MINOR(VM_UFFD_MINOR, "uffd_minor" ) \
{VM_PFNMAP, "pfnmap" }, \
+ {VM_MAYBE_GUARD, "maybe_guard" }, \
{VM_UFFD_WP, "uffd_wp" }, \
{VM_LOCKED, "locked" }, \
{VM_IO, "io" }, \
@@ -195,6 +233,7 @@ IF_HAVE_VM_SOFTDIRTY(VM_SOFTDIRTY, "softdirty" ) \
{VM_MIXEDMAP, "mixedmap" }, \
{VM_HUGEPAGE, "hugepage" }, \
{VM_NOHUGEPAGE, "nohugepage" }, \
+IF_HAVE_VM_DROPPABLE(VM_DROPPABLE, "droppable" ) \
{VM_MERGEABLE, "mergeable" } \
#define show_vma_flags(flags) \
@@ -222,8 +261,8 @@ IF_HAVE_VM_SOFTDIRTY(VM_SOFTDIRTY, "softdirty" ) \
#define compact_result_to_feedback(result) \
({ \
enum compact_result __result = result; \
- (compaction_failed(__result)) ? COMPACTION_FAILED : \
- (compaction_withdrawn(__result)) ? COMPACTION_WITHDRAWN : COMPACTION_PROGRESS; \
+ (__result == COMPACT_COMPLETE) ? COMPACTION_FAILED : \
+ (__result == COMPACT_SUCCESS) ? COMPACTION_PROGRESS : COMPACTION_WITHDRAWN; \
})
#define COMPACTION_FEEDBACK \
diff --git a/include/trace/events/module.h b/include/trace/events/module.h
index 097485c73c01..e5a006be9dc6 100644
--- a/include/trace/events/module.h
+++ b/include/trace/events/module.h
@@ -41,7 +41,7 @@ TRACE_EVENT(module_load,
TP_fast_assign(
__entry->taints = mod->taints;
- __assign_str(name, mod->name);
+ __assign_str(name);
),
TP_printk("%s %s", __get_str(name), show_module_flags(__entry->taints))
@@ -58,7 +58,7 @@ TRACE_EVENT(module_free,
),
TP_fast_assign(
- __assign_str(name, mod->name);
+ __assign_str(name);
),
TP_printk("%s", __get_str(name))
@@ -82,7 +82,7 @@ DECLARE_EVENT_CLASS(module_refcnt,
TP_fast_assign(
__entry->ip = ip;
__entry->refcnt = atomic_read(&mod->refcnt);
- __assign_str(name, mod->name);
+ __assign_str(name);
),
TP_printk("%s call_site=%ps refcnt=%d",
@@ -119,7 +119,7 @@ TRACE_EVENT(module_request,
TP_fast_assign(
__entry->ip = ip;
__entry->wait = wait;
- __assign_str(name, name);
+ __assign_str(name);
),
TP_printk("%s wait=%d call_site=%ps",
diff --git a/include/trace/events/mptcp.h b/include/trace/events/mptcp.h
index 563e48617374..085b749cdd97 100644
--- a/include/trace/events/mptcp.h
+++ b/include/trace/events/mptcp.h
@@ -34,7 +34,7 @@ TRACE_EVENT(mptcp_subflow_get_send,
struct sock *ssk;
__entry->active = mptcp_subflow_active(subflow);
- __entry->backup = subflow->backup;
+ __entry->backup = subflow->backup || subflow->request_bkup;
if (subflow->tcp_sock && sk_fullsock(subflow->tcp_sock))
__entry->free = sk_stream_memory_free(subflow->tcp_sock);
@@ -44,7 +44,7 @@ TRACE_EVENT(mptcp_subflow_get_send,
ssk = mptcp_subflow_tcp_sock(subflow);
if (ssk && sk_fullsock(ssk)) {
__entry->snd_wnd = tcp_sk(ssk)->snd_wnd;
- __entry->pace = ssk->sk_pacing_rate;
+ __entry->pace = READ_ONCE(ssk->sk_pacing_rate);
} else {
__entry->snd_wnd = 0;
__entry->pace = 0;
diff --git a/include/trace/events/napi.h b/include/trace/events/napi.h
index 6678cf8b235b..b567b9ffedc1 100644
--- a/include/trace/events/napi.h
+++ b/include/trace/events/napi.h
@@ -26,7 +26,7 @@ TRACE_EVENT(napi_poll,
TP_fast_assign(
__entry->napi = napi;
- __assign_str(dev_name, napi->dev ? napi->dev->name : NO_DEV);
+ __assign_str(dev_name);
__entry->work = work;
__entry->budget = budget;
),
@@ -36,6 +36,39 @@ TRACE_EVENT(napi_poll,
__entry->work, __entry->budget)
);
+TRACE_EVENT(dql_stall_detected,
+
+ TP_PROTO(unsigned short thrs, unsigned int len,
+ unsigned long last_reap, unsigned long hist_head,
+ unsigned long now, unsigned long *hist),
+
+ TP_ARGS(thrs, len, last_reap, hist_head, now, hist),
+
+ TP_STRUCT__entry(
+ __field( unsigned short, thrs)
+ __field( unsigned int, len)
+ __field( unsigned long, last_reap)
+ __field( unsigned long, hist_head)
+ __field( unsigned long, now)
+ __array( unsigned long, hist, 4)
+ ),
+
+ TP_fast_assign(
+ __entry->thrs = thrs;
+ __entry->len = len;
+ __entry->last_reap = last_reap;
+ __entry->hist_head = hist_head * BITS_PER_LONG;
+ __entry->now = now;
+ memcpy(__entry->hist, hist, sizeof(entry->hist));
+ ),
+
+ TP_printk("thrs %u len %u last_reap %lu hist_head %lu now %lu hist %016lx %016lx %016lx %016lx",
+ __entry->thrs, __entry->len,
+ __entry->last_reap, __entry->hist_head, __entry->now,
+ __entry->hist[0], __entry->hist[1],
+ __entry->hist[2], __entry->hist[3])
+);
+
#undef NO_DEV
#endif /* _TRACE_NAPI_H */
diff --git a/include/trace/events/nbd.h b/include/trace/events/nbd.h
index 9849956f34d8..390d98a05c9d 100644
--- a/include/trace/events/nbd.h
+++ b/include/trace/events/nbd.h
@@ -72,7 +72,7 @@ DECLARE_EVENT_CLASS(nbd_send_request,
),
TP_fast_assign(
- __entry->nbd_request = 0;
+ __entry->nbd_request = NULL;
__entry->dev_index = index;
__entry->request = rq;
),
diff --git a/include/trace/events/neigh.h b/include/trace/events/neigh.h
index 5eaa1fa99171..12362c35dbc0 100644
--- a/include/trace/events/neigh.h
+++ b/include/trace/events/neigh.h
@@ -39,15 +39,13 @@ TRACE_EVENT(neigh_create,
),
TP_fast_assign(
- struct in6_addr *pin6;
__be32 *p32;
__entry->family = tbl->family;
- __assign_str(dev, (dev ? dev->name : "NULL"));
+ __assign_str(dev);
__entry->entries = atomic_read(&tbl->gc_entries);
__entry->created = n != NULL;
__entry->gc_exempt = exempt_from_gc;
- pin6 = (struct in6_addr *)__entry->primary_key6;
p32 = (__be32 *)__entry->primary_key4;
if (tbl->family == AF_INET)
@@ -57,6 +55,8 @@ TRACE_EVENT(neigh_create,
#if IS_ENABLED(CONFIG_IPV6)
if (tbl->family == AF_INET6) {
+ struct in6_addr *pin6;
+
pin6 = (struct in6_addr *)__entry->primary_key6;
*pin6 = *(struct in6_addr *)pkey;
}
@@ -103,7 +103,7 @@ TRACE_EVENT(neigh_update,
__be32 *p32;
__entry->family = n->tbl->family;
- __assign_str(dev, (n->dev ? n->dev->name : "NULL"));
+ __assign_str(dev);
__entry->lladdr_len = lladdr_len;
memcpy(__entry->lladdr, n->ha, lladdr_len);
__entry->flags = n->flags;
@@ -180,7 +180,7 @@ DECLARE_EVENT_CLASS(neigh__update,
__be32 *p32;
__entry->family = n->tbl->family;
- __assign_str(dev, (n->dev ? n->dev->name : "NULL"));
+ __assign_str(dev);
__entry->lladdr_len = lladdr_len;
memcpy(__entry->lladdr, n->ha, lladdr_len);
__entry->flags = n->flags;
diff --git a/include/trace/events/net.h b/include/trace/events/net.h
index da611a7aaf97..fdd9ad474ce3 100644
--- a/include/trace/events/net.h
+++ b/include/trace/events/net.h
@@ -35,10 +35,11 @@ TRACE_EVENT(net_dev_start_xmit,
__field( u16, gso_size )
__field( u16, gso_segs )
__field( u16, gso_type )
+ __field( u64, net_cookie )
),
TP_fast_assign(
- __assign_str(name, dev->name);
+ __assign_str(name);
__entry->queue_mapping = skb->queue_mapping;
__entry->skbaddr = skb;
__entry->vlan_tagged = skb_vlan_tag_present(skb);
@@ -51,21 +52,24 @@ TRACE_EVENT(net_dev_start_xmit,
__entry->network_offset = skb_network_offset(skb);
__entry->transport_offset_valid =
skb_transport_header_was_set(skb);
- __entry->transport_offset = skb_transport_offset(skb);
+ __entry->transport_offset = skb_transport_header_was_set(skb) ?
+ skb_transport_offset(skb) : 0;
__entry->tx_flags = skb_shinfo(skb)->tx_flags;
__entry->gso_size = skb_shinfo(skb)->gso_size;
__entry->gso_segs = skb_shinfo(skb)->gso_segs;
__entry->gso_type = skb_shinfo(skb)->gso_type;
+ __entry->net_cookie = dev_net(dev)->net_cookie;
),
- TP_printk("dev=%s queue_mapping=%u skbaddr=%p vlan_tagged=%d vlan_proto=0x%04x vlan_tci=0x%04x protocol=0x%04x ip_summed=%d len=%u data_len=%u network_offset=%d transport_offset_valid=%d transport_offset=%d tx_flags=%d gso_size=%d gso_segs=%d gso_type=%#x",
+ TP_printk("dev=%s queue_mapping=%u skbaddr=%p vlan_tagged=%d vlan_proto=0x%04x vlan_tci=0x%04x protocol=0x%04x ip_summed=%d len=%u data_len=%u network_offset=%d transport_offset_valid=%d transport_offset=%d tx_flags=%d gso_size=%d gso_segs=%d gso_type=%#x net_cookie=%llu",
__get_str(name), __entry->queue_mapping, __entry->skbaddr,
__entry->vlan_tagged, __entry->vlan_proto, __entry->vlan_tci,
__entry->protocol, __entry->ip_summed, __entry->len,
__entry->data_len,
__entry->network_offset, __entry->transport_offset_valid,
__entry->transport_offset, __entry->tx_flags,
- __entry->gso_size, __entry->gso_segs, __entry->gso_type)
+ __entry->gso_size, __entry->gso_segs,
+ __entry->gso_type, __entry->net_cookie)
);
TRACE_EVENT(net_dev_xmit,
@@ -82,17 +86,21 @@ TRACE_EVENT(net_dev_xmit,
__field( unsigned int, len )
__field( int, rc )
__string( name, dev->name )
+ __field( u64, net_cookie )
),
TP_fast_assign(
__entry->skbaddr = skb;
__entry->len = skb_len;
__entry->rc = rc;
- __assign_str(name, dev->name);
+ __entry->net_cookie = dev_net(dev)->net_cookie;
+ __assign_str(name);
),
- TP_printk("dev=%s skbaddr=%p len=%u rc=%d",
- __get_str(name), __entry->skbaddr, __entry->len, __entry->rc)
+ TP_printk("dev=%s skbaddr=%p len=%u rc=%d net_cookie=%llu",
+ __get_str(name), __entry->skbaddr,
+ __entry->len, __entry->rc,
+ __entry->net_cookie)
);
TRACE_EVENT(net_dev_xmit_timeout,
@@ -106,16 +114,19 @@ TRACE_EVENT(net_dev_xmit_timeout,
__string( name, dev->name )
__string( driver, netdev_drivername(dev))
__field( int, queue_index )
+ __field( u64, net_cookie )
),
TP_fast_assign(
- __assign_str(name, dev->name);
- __assign_str(driver, netdev_drivername(dev));
+ __assign_str(name);
+ __assign_str(driver);
__entry->queue_index = queue_index;
+ __entry->net_cookie = dev_net(dev)->net_cookie;
),
- TP_printk("dev=%s driver=%s queue=%d",
- __get_str(name), __get_str(driver), __entry->queue_index)
+ TP_printk("dev=%s driver=%s queue=%d net_cookie=%llu",
+ __get_str(name), __get_str(driver),
+ __entry->queue_index, __entry->net_cookie)
);
DECLARE_EVENT_CLASS(net_dev_template,
@@ -128,16 +139,20 @@ DECLARE_EVENT_CLASS(net_dev_template,
__field( void *, skbaddr )
__field( unsigned int, len )
__string( name, skb->dev->name )
+ __field( u64, net_cookie )
),
TP_fast_assign(
__entry->skbaddr = skb;
__entry->len = skb->len;
- __assign_str(name, skb->dev->name);
+ __entry->net_cookie = dev_net(skb->dev)->net_cookie;
+ __assign_str(name);
),
- TP_printk("dev=%s skbaddr=%p len=%u",
- __get_str(name), __entry->skbaddr, __entry->len)
+ TP_printk("dev=%s skbaddr=%p len=%u net_cookie=%llu",
+ __get_str(name), __entry->skbaddr,
+ __entry->len,
+ __entry->net_cookie)
)
DEFINE_EVENT(net_dev_template, net_dev_queue,
@@ -187,10 +202,11 @@ DECLARE_EVENT_CLASS(net_dev_rx_verbose_template,
__field( unsigned char, nr_frags )
__field( u16, gso_size )
__field( u16, gso_type )
+ __field( u64, net_cookie )
),
TP_fast_assign(
- __assign_str(name, skb->dev->name);
+ __assign_str(name);
#ifdef CONFIG_NET_RX_BUSY_POLL
__entry->napi_id = skb->napi_id;
#else
@@ -213,16 +229,18 @@ DECLARE_EVENT_CLASS(net_dev_rx_verbose_template,
__entry->nr_frags = skb_shinfo(skb)->nr_frags;
__entry->gso_size = skb_shinfo(skb)->gso_size;
__entry->gso_type = skb_shinfo(skb)->gso_type;
+ __entry->net_cookie = dev_net(skb->dev)->net_cookie;
),
- TP_printk("dev=%s napi_id=%#x queue_mapping=%u skbaddr=%p vlan_tagged=%d vlan_proto=0x%04x vlan_tci=0x%04x protocol=0x%04x ip_summed=%d hash=0x%08x l4_hash=%d len=%u data_len=%u truesize=%u mac_header_valid=%d mac_header=%d nr_frags=%d gso_size=%d gso_type=%#x",
+ TP_printk("dev=%s napi_id=%#x queue_mapping=%u skbaddr=%p vlan_tagged=%d vlan_proto=0x%04x vlan_tci=0x%04x protocol=0x%04x ip_summed=%d hash=0x%08x l4_hash=%d len=%u data_len=%u truesize=%u mac_header_valid=%d mac_header=%d nr_frags=%d gso_size=%d gso_type=%#x net_cookie=%llu",
__get_str(name), __entry->napi_id, __entry->queue_mapping,
__entry->skbaddr, __entry->vlan_tagged, __entry->vlan_proto,
__entry->vlan_tci, __entry->protocol, __entry->ip_summed,
__entry->hash, __entry->l4_hash, __entry->len,
__entry->data_len, __entry->truesize,
__entry->mac_header_valid, __entry->mac_header,
- __entry->nr_frags, __entry->gso_size, __entry->gso_type)
+ __entry->nr_frags, __entry->gso_size,
+ __entry->gso_type, __entry->net_cookie)
);
DEFINE_EVENT(net_dev_rx_verbose_template, napi_gro_frags_entry,
diff --git a/include/trace/events/net_probe_common.h b/include/trace/events/net_probe_common.h
index 3930119cab08..976a58364bff 100644
--- a/include/trace/events/net_probe_common.h
+++ b/include/trace/events/net_probe_common.h
@@ -41,4 +41,75 @@
#endif
+#define TP_STORE_V4MAPPED(__entry, saddr, daddr) \
+ do { \
+ struct in6_addr *pin6; \
+ \
+ pin6 = (struct in6_addr *)__entry->saddr_v6; \
+ ipv6_addr_set_v4mapped(saddr, pin6); \
+ pin6 = (struct in6_addr *)__entry->daddr_v6; \
+ ipv6_addr_set_v4mapped(daddr, pin6); \
+ } while (0)
+
+#if IS_ENABLED(CONFIG_IPV6)
+#define TP_STORE_ADDRS(__entry, saddr, daddr, saddr6, daddr6) \
+ do { \
+ if (sk->sk_family == AF_INET6) { \
+ struct in6_addr *pin6; \
+ \
+ pin6 = (struct in6_addr *)__entry->saddr_v6; \
+ *pin6 = saddr6; \
+ pin6 = (struct in6_addr *)__entry->daddr_v6; \
+ *pin6 = daddr6; \
+ } else { \
+ TP_STORE_V4MAPPED(__entry, saddr, daddr); \
+ } \
+ } while (0)
+#else
+#define TP_STORE_ADDRS(__entry, saddr, daddr, saddr6, daddr6) \
+ TP_STORE_V4MAPPED(__entry, saddr, daddr)
+#endif
+
+#define TP_STORE_ADDR_PORTS_SKB_V4(skb, protoh, entry_saddr, entry_daddr) \
+ do { \
+ struct sockaddr_in *v4 = (void *)entry_saddr; \
+ \
+ v4->sin_family = AF_INET; \
+ v4->sin_port = protoh->source; \
+ v4->sin_addr.s_addr = ip_hdr(skb)->saddr; \
+ v4 = (void *)entry_daddr; \
+ v4->sin_family = AF_INET; \
+ v4->sin_port = protoh->dest; \
+ v4->sin_addr.s_addr = ip_hdr(skb)->daddr; \
+ } while (0)
+
+#if IS_ENABLED(CONFIG_IPV6)
+
+#define TP_STORE_ADDR_PORTS_SKB(skb, protoh, entry_saddr, entry_daddr) \
+ do { \
+ const struct iphdr *iph = ip_hdr(skb); \
+ \
+ if (iph->version == 6) { \
+ struct sockaddr_in6 *v6 = (void *)entry_saddr; \
+ \
+ v6->sin6_family = AF_INET6; \
+ v6->sin6_port = protoh->source; \
+ v6->sin6_addr = ipv6_hdr(skb)->saddr; \
+ v6 = (void *)entry_daddr; \
+ v6->sin6_family = AF_INET6; \
+ v6->sin6_port = protoh->dest; \
+ v6->sin6_addr = ipv6_hdr(skb)->daddr; \
+ } else \
+ TP_STORE_ADDR_PORTS_SKB_V4(skb, protoh, \
+ entry_saddr, \
+ entry_daddr); \
+ } while (0)
+
+#else
+
+#define TP_STORE_ADDR_PORTS_SKB(skb, protoh, entry_saddr, entry_daddr) \
+ TP_STORE_ADDR_PORTS_SKB_V4(skb, protoh, entry_saddr, entry_daddr)
+
+#endif
+
#endif
diff --git a/include/trace/events/netfs.h b/include/trace/events/netfs.h
index beec534cbaab..64a382fbc31a 100644
--- a/include/trace/events/netfs.h
+++ b/include/trace/events/netfs.h
@@ -16,38 +16,114 @@
* Define enums for tracing information.
*/
#define netfs_read_traces \
+ EM(netfs_read_trace_dio_read, "DIO-READ ") \
EM(netfs_read_trace_expanded, "EXPANDED ") \
EM(netfs_read_trace_readahead, "READAHEAD") \
EM(netfs_read_trace_readpage, "READPAGE ") \
+ EM(netfs_read_trace_read_gaps, "READ-GAPS") \
+ EM(netfs_read_trace_read_single, "READ-SNGL") \
+ EM(netfs_read_trace_prefetch_for_write, "PREFETCHW") \
E_(netfs_read_trace_write_begin, "WRITEBEGN")
+#define netfs_write_traces \
+ EM(netfs_write_trace_copy_to_cache, "COPY2CACH") \
+ EM(netfs_write_trace_dio_write, "DIO-WRITE") \
+ EM(netfs_write_trace_unbuffered_write, "UNB-WRITE") \
+ EM(netfs_write_trace_writeback, "WRITEBACK") \
+ EM(netfs_write_trace_writeback_single, "WB-SINGLE") \
+ E_(netfs_write_trace_writethrough, "WRITETHRU")
+
#define netfs_rreq_origins \
EM(NETFS_READAHEAD, "RA") \
EM(NETFS_READPAGE, "RP") \
- E_(NETFS_READ_FOR_WRITE, "RW")
+ EM(NETFS_READ_GAPS, "RG") \
+ EM(NETFS_READ_SINGLE, "R1") \
+ EM(NETFS_READ_FOR_WRITE, "RW") \
+ EM(NETFS_UNBUFFERED_READ, "UR") \
+ EM(NETFS_DIO_READ, "DR") \
+ EM(NETFS_WRITEBACK, "WB") \
+ EM(NETFS_WRITEBACK_SINGLE, "W1") \
+ EM(NETFS_WRITETHROUGH, "WT") \
+ EM(NETFS_UNBUFFERED_WRITE, "UW") \
+ EM(NETFS_DIO_WRITE, "DW") \
+ E_(NETFS_PGPRIV2_COPY_TO_CACHE, "2C")
#define netfs_rreq_traces \
EM(netfs_rreq_trace_assess, "ASSESS ") \
+ EM(netfs_rreq_trace_collect, "COLLECT") \
+ EM(netfs_rreq_trace_complete, "COMPLET") \
EM(netfs_rreq_trace_copy, "COPY ") \
+ EM(netfs_rreq_trace_dirty, "DIRTY ") \
EM(netfs_rreq_trace_done, "DONE ") \
+ EM(netfs_rreq_trace_end_copy_to_cache, "END-C2C") \
EM(netfs_rreq_trace_free, "FREE ") \
+ EM(netfs_rreq_trace_ki_complete, "KI-CMPL") \
+ EM(netfs_rreq_trace_recollect, "RECLLCT") \
+ EM(netfs_rreq_trace_redirty, "REDIRTY") \
EM(netfs_rreq_trace_resubmit, "RESUBMT") \
+ EM(netfs_rreq_trace_set_abandon, "S-ABNDN") \
+ EM(netfs_rreq_trace_set_pause, "PAUSE ") \
EM(netfs_rreq_trace_unlock, "UNLOCK ") \
- E_(netfs_rreq_trace_unmark, "UNMARK ")
+ EM(netfs_rreq_trace_unlock_pgpriv2, "UNLCK-2") \
+ EM(netfs_rreq_trace_unmark, "UNMARK ") \
+ EM(netfs_rreq_trace_unpause, "UNPAUSE") \
+ EM(netfs_rreq_trace_wait_ip, "WAIT-IP") \
+ EM(netfs_rreq_trace_wait_pause, "--PAUSED--") \
+ EM(netfs_rreq_trace_wait_quiesce, "WAIT-QUIESCE") \
+ EM(netfs_rreq_trace_waited_ip, "DONE-IP") \
+ EM(netfs_rreq_trace_waited_pause, "--UNPAUSED--") \
+ EM(netfs_rreq_trace_waited_quiesce, "DONE-QUIESCE") \
+ EM(netfs_rreq_trace_wake_ip, "WAKE-IP") \
+ EM(netfs_rreq_trace_wake_queue, "WAKE-Q ") \
+ E_(netfs_rreq_trace_write_done, "WR-DONE")
#define netfs_sreq_sources \
+ EM(NETFS_SOURCE_UNKNOWN, "----") \
EM(NETFS_FILL_WITH_ZEROES, "ZERO") \
EM(NETFS_DOWNLOAD_FROM_SERVER, "DOWN") \
EM(NETFS_READ_FROM_CACHE, "READ") \
- E_(NETFS_INVALID_READ, "INVL") \
+ EM(NETFS_INVALID_READ, "INVL") \
+ EM(NETFS_UPLOAD_TO_SERVER, "UPLD") \
+ E_(NETFS_WRITE_TO_CACHE, "WRIT")
#define netfs_sreq_traces \
+ EM(netfs_sreq_trace_abandoned, "ABNDN") \
+ EM(netfs_sreq_trace_add_donations, "+DON ") \
+ EM(netfs_sreq_trace_added, "ADD ") \
+ EM(netfs_sreq_trace_cache_nowrite, "CA-NW") \
+ EM(netfs_sreq_trace_cache_prepare, "CA-PR") \
+ EM(netfs_sreq_trace_cache_write, "CA-WR") \
+ EM(netfs_sreq_trace_cancel, "CANCL") \
+ EM(netfs_sreq_trace_clear, "CLEAR") \
+ EM(netfs_sreq_trace_consumed, "CONSM") \
+ EM(netfs_sreq_trace_discard, "DSCRD") \
+ EM(netfs_sreq_trace_donate_to_prev, "DON-P") \
+ EM(netfs_sreq_trace_donate_to_next, "DON-N") \
EM(netfs_sreq_trace_download_instead, "RDOWN") \
+ EM(netfs_sreq_trace_fail, "FAIL ") \
EM(netfs_sreq_trace_free, "FREE ") \
+ EM(netfs_sreq_trace_hit_eof, "EOF ") \
+ EM(netfs_sreq_trace_io_bad, "I-BAD") \
+ EM(netfs_sreq_trace_io_malformed, "I-MLF") \
+ EM(netfs_sreq_trace_io_unknown, "I-UNK") \
+ EM(netfs_sreq_trace_io_progress, "I-OK ") \
+ EM(netfs_sreq_trace_io_req_submitted, "I-RSB") \
+ EM(netfs_sreq_trace_io_retry_needed, "I-RTR") \
+ EM(netfs_sreq_trace_limited, "LIMIT") \
+ EM(netfs_sreq_trace_need_clear, "N-CLR") \
+ EM(netfs_sreq_trace_partial_read, "PARTR") \
+ EM(netfs_sreq_trace_need_retry, "ND-RT") \
EM(netfs_sreq_trace_prepare, "PREP ") \
- EM(netfs_sreq_trace_resubmit_short, "SHORT") \
+ EM(netfs_sreq_trace_prep_failed, "PRPFL") \
+ EM(netfs_sreq_trace_progress, "PRGRS") \
+ EM(netfs_sreq_trace_reprep_failed, "REPFL") \
+ EM(netfs_sreq_trace_retry, "RETRY") \
+ EM(netfs_sreq_trace_short, "SHORT") \
+ EM(netfs_sreq_trace_split, "SPLIT") \
EM(netfs_sreq_trace_submit, "SUBMT") \
+ EM(netfs_sreq_trace_superfluous, "SPRFL") \
EM(netfs_sreq_trace_terminated, "TERM ") \
+ EM(netfs_sreq_trace_wait_for, "_WAIT") \
EM(netfs_sreq_trace_write, "WRITE") \
EM(netfs_sreq_trace_write_skip, "SKIP ") \
E_(netfs_sreq_trace_write_term, "WTERM")
@@ -55,32 +131,106 @@
#define netfs_failures \
EM(netfs_fail_check_write_begin, "check-write-begin") \
EM(netfs_fail_copy_to_cache, "copy-to-cache") \
+ EM(netfs_fail_dio_read_short, "dio-read-short") \
+ EM(netfs_fail_dio_read_zero, "dio-read-zero") \
EM(netfs_fail_read, "read") \
EM(netfs_fail_short_read, "short-read") \
- E_(netfs_fail_prepare_write, "prep-write")
+ EM(netfs_fail_prepare_write, "prep-write") \
+ E_(netfs_fail_write, "write")
#define netfs_rreq_ref_traces \
- EM(netfs_rreq_trace_get_hold, "GET HOLD ") \
+ EM(netfs_rreq_trace_get_for_outstanding,"GET OUTSTND") \
EM(netfs_rreq_trace_get_subreq, "GET SUBREQ ") \
EM(netfs_rreq_trace_put_complete, "PUT COMPLT ") \
EM(netfs_rreq_trace_put_discard, "PUT DISCARD") \
EM(netfs_rreq_trace_put_failed, "PUT FAILED ") \
- EM(netfs_rreq_trace_put_hold, "PUT HOLD ") \
+ EM(netfs_rreq_trace_put_no_submit, "PUT NO-SUBM") \
+ EM(netfs_rreq_trace_put_return, "PUT RETURN ") \
EM(netfs_rreq_trace_put_subreq, "PUT SUBREQ ") \
- EM(netfs_rreq_trace_put_zero_len, "PUT ZEROLEN") \
+ EM(netfs_rreq_trace_put_work_ip, "PUT WORK IP ") \
+ EM(netfs_rreq_trace_see_work, "SEE WORK ") \
+ EM(netfs_rreq_trace_see_work_complete, "SEE WORK CP") \
E_(netfs_rreq_trace_new, "NEW ")
#define netfs_sreq_ref_traces \
EM(netfs_sreq_trace_get_copy_to_cache, "GET COPY2C ") \
- EM(netfs_sreq_trace_get_resubmit, "GET RESUBMIT") \
+ EM(netfs_sreq_trace_get_resubmit, "GET RESUBMT") \
+ EM(netfs_sreq_trace_get_submit, "GET SUBMIT ") \
EM(netfs_sreq_trace_get_short_read, "GET SHORTRD") \
EM(netfs_sreq_trace_new, "NEW ") \
+ EM(netfs_sreq_trace_put_abandon, "PUT ABANDON") \
+ EM(netfs_sreq_trace_put_cancel, "PUT CANCEL ") \
EM(netfs_sreq_trace_put_clear, "PUT CLEAR ") \
+ EM(netfs_sreq_trace_put_consumed, "PUT CONSUME") \
+ EM(netfs_sreq_trace_put_done, "PUT DONE ") \
EM(netfs_sreq_trace_put_failed, "PUT FAILED ") \
EM(netfs_sreq_trace_put_merged, "PUT MERGED ") \
EM(netfs_sreq_trace_put_no_copy, "PUT NO COPY") \
+ EM(netfs_sreq_trace_put_oom, "PUT OOM ") \
+ EM(netfs_sreq_trace_put_wip, "PUT WIP ") \
+ EM(netfs_sreq_trace_put_work, "PUT WORK ") \
E_(netfs_sreq_trace_put_terminated, "PUT TERM ")
+#define netfs_folio_traces \
+ EM(netfs_folio_is_uptodate, "mod-uptodate") \
+ EM(netfs_just_prefetch, "mod-prefetch") \
+ EM(netfs_whole_folio_modify, "mod-whole-f") \
+ EM(netfs_modify_and_clear, "mod-n-clear") \
+ EM(netfs_streaming_write, "mod-streamw") \
+ EM(netfs_streaming_write_cont, "mod-streamw+") \
+ EM(netfs_flush_content, "flush") \
+ EM(netfs_streaming_filled_page, "mod-streamw-f") \
+ EM(netfs_streaming_cont_filled_page, "mod-streamw-f+") \
+ EM(netfs_folio_trace_abandon, "abandon") \
+ EM(netfs_folio_trace_alloc_buffer, "alloc-buf") \
+ EM(netfs_folio_trace_cancel_copy, "cancel-copy") \
+ EM(netfs_folio_trace_cancel_store, "cancel-store") \
+ EM(netfs_folio_trace_clear, "clear") \
+ EM(netfs_folio_trace_clear_cc, "clear-cc") \
+ EM(netfs_folio_trace_clear_g, "clear-g") \
+ EM(netfs_folio_trace_clear_s, "clear-s") \
+ EM(netfs_folio_trace_copy_to_cache, "mark-copy") \
+ EM(netfs_folio_trace_end_copy, "end-copy") \
+ EM(netfs_folio_trace_filled_gaps, "filled-gaps") \
+ EM(netfs_folio_trace_kill, "kill") \
+ EM(netfs_folio_trace_kill_cc, "kill-cc") \
+ EM(netfs_folio_trace_kill_g, "kill-g") \
+ EM(netfs_folio_trace_kill_s, "kill-s") \
+ EM(netfs_folio_trace_mkwrite, "mkwrite") \
+ EM(netfs_folio_trace_mkwrite_plus, "mkwrite+") \
+ EM(netfs_folio_trace_not_under_wback, "!wback") \
+ EM(netfs_folio_trace_not_locked, "!locked") \
+ EM(netfs_folio_trace_put, "put") \
+ EM(netfs_folio_trace_read, "read") \
+ EM(netfs_folio_trace_read_done, "read-done") \
+ EM(netfs_folio_trace_read_gaps, "read-gaps") \
+ EM(netfs_folio_trace_read_unlock, "read-unlock") \
+ EM(netfs_folio_trace_redirtied, "redirtied") \
+ EM(netfs_folio_trace_store, "store") \
+ EM(netfs_folio_trace_store_copy, "store-copy") \
+ EM(netfs_folio_trace_store_plus, "store+") \
+ EM(netfs_folio_trace_wthru, "wthru") \
+ E_(netfs_folio_trace_wthru_plus, "wthru+")
+
+#define netfs_collect_contig_traces \
+ EM(netfs_contig_trace_collect, "Collect") \
+ EM(netfs_contig_trace_jump, "-->JUMP-->") \
+ E_(netfs_contig_trace_unlock, "Unlock")
+
+#define netfs_donate_traces \
+ EM(netfs_trace_donate_tail_to_prev, "tail-to-prev") \
+ EM(netfs_trace_donate_to_prev, "to-prev") \
+ EM(netfs_trace_donate_to_next, "to-next") \
+ E_(netfs_trace_donate_to_deferred_next, "defer-next")
+
+#define netfs_folioq_traces \
+ EM(netfs_trace_folioq_alloc_buffer, "alloc-buf") \
+ EM(netfs_trace_folioq_clear, "clear") \
+ EM(netfs_trace_folioq_delete, "delete") \
+ EM(netfs_trace_folioq_make_space, "make-space") \
+ EM(netfs_trace_folioq_rollbuf_init, "roll-init") \
+ E_(netfs_trace_folioq_read_progress, "r-progress")
+
#ifndef __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
#define __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
@@ -90,11 +240,16 @@
#define E_(a, b) a
enum netfs_read_trace { netfs_read_traces } __mode(byte);
+enum netfs_write_trace { netfs_write_traces } __mode(byte);
enum netfs_rreq_trace { netfs_rreq_traces } __mode(byte);
enum netfs_sreq_trace { netfs_sreq_traces } __mode(byte);
enum netfs_failure { netfs_failures } __mode(byte);
enum netfs_rreq_ref_trace { netfs_rreq_ref_traces } __mode(byte);
enum netfs_sreq_ref_trace { netfs_sreq_ref_traces } __mode(byte);
+enum netfs_folio_trace { netfs_folio_traces } __mode(byte);
+enum netfs_collect_contig_trace { netfs_collect_contig_traces } __mode(byte);
+enum netfs_donate_trace { netfs_donate_traces } __mode(byte);
+enum netfs_folioq_trace { netfs_folioq_traces } __mode(byte);
#endif
@@ -107,6 +262,7 @@ enum netfs_sreq_ref_trace { netfs_sreq_ref_traces } __mode(byte);
#define E_(a, b) TRACE_DEFINE_ENUM(a);
netfs_read_traces;
+netfs_write_traces;
netfs_rreq_origins;
netfs_rreq_traces;
netfs_sreq_sources;
@@ -114,6 +270,10 @@ netfs_sreq_traces;
netfs_failures;
netfs_rreq_ref_traces;
netfs_sreq_ref_traces;
+netfs_folio_traces;
+netfs_collect_contig_traces;
+netfs_donate_traces;
+netfs_folioq_traces;
/*
* Now redefine the EM() and E_() macros to map the enums to the strings that
@@ -132,29 +292,31 @@ TRACE_EVENT(netfs_read,
TP_ARGS(rreq, start, len, what),
TP_STRUCT__entry(
- __field(unsigned int, rreq )
- __field(unsigned int, cookie )
- __field(loff_t, start )
- __field(size_t, len )
- __field(enum netfs_read_trace, what )
- __field(unsigned int, netfs_inode )
+ __field(unsigned int, rreq)
+ __field(unsigned int, cookie)
+ __field(loff_t, i_size)
+ __field(loff_t, start)
+ __field(size_t, len)
+ __field(enum netfs_read_trace, what)
+ __field(unsigned int, netfs_inode)
),
TP_fast_assign(
__entry->rreq = rreq->debug_id;
__entry->cookie = rreq->cache_resources.debug_id;
+ __entry->i_size = rreq->i_size;
__entry->start = start;
__entry->len = len;
__entry->what = what;
__entry->netfs_inode = rreq->inode->i_ino;
),
- TP_printk("R=%08x %s c=%08x ni=%x s=%llx %zx",
+ TP_printk("R=%08x %s c=%08x ni=%x s=%llx l=%zx sz=%llx",
__entry->rreq,
__print_symbolic(__entry->what, netfs_read_traces),
__entry->cookie,
__entry->netfs_inode,
- __entry->start, __entry->len)
+ __entry->start, __entry->len, __entry->i_size)
);
TRACE_EVENT(netfs_rreq,
@@ -164,10 +326,10 @@ TRACE_EVENT(netfs_rreq,
TP_ARGS(rreq, what),
TP_STRUCT__entry(
- __field(unsigned int, rreq )
- __field(unsigned int, flags )
- __field(enum netfs_io_origin, origin )
- __field(enum netfs_rreq_trace, what )
+ __field(unsigned int, rreq)
+ __field(unsigned int, flags)
+ __field(enum netfs_io_origin, origin)
+ __field(enum netfs_rreq_trace, what)
),
TP_fast_assign(
@@ -191,15 +353,16 @@ TRACE_EVENT(netfs_sreq,
TP_ARGS(sreq, what),
TP_STRUCT__entry(
- __field(unsigned int, rreq )
- __field(unsigned short, index )
- __field(short, error )
- __field(unsigned short, flags )
- __field(enum netfs_io_source, source )
- __field(enum netfs_sreq_trace, what )
- __field(size_t, len )
- __field(size_t, transferred )
- __field(loff_t, start )
+ __field(unsigned int, rreq)
+ __field(unsigned short, index)
+ __field(short, error)
+ __field(unsigned short, flags)
+ __field(enum netfs_io_source, source)
+ __field(enum netfs_sreq_trace, what)
+ __field(u8, slot)
+ __field(size_t, len)
+ __field(size_t, transferred)
+ __field(loff_t, start)
),
TP_fast_assign(
@@ -212,15 +375,16 @@ TRACE_EVENT(netfs_sreq,
__entry->len = sreq->len;
__entry->transferred = sreq->transferred;
__entry->start = sreq->start;
+ __entry->slot = sreq->io_iter.folioq_slot;
),
- TP_printk("R=%08x[%u] %s %s f=%02x s=%llx %zx/%zx e=%d",
+ TP_printk("R=%08x[%x] %s %s f=%03x s=%llx %zx/%zx s=%u e=%d",
__entry->rreq, __entry->index,
__print_symbolic(__entry->source, netfs_sreq_sources),
__print_symbolic(__entry->what, netfs_sreq_traces),
__entry->flags,
__entry->start, __entry->transferred, __entry->len,
- __entry->error)
+ __entry->slot, __entry->error)
);
TRACE_EVENT(netfs_failure,
@@ -231,15 +395,15 @@ TRACE_EVENT(netfs_failure,
TP_ARGS(rreq, sreq, error, what),
TP_STRUCT__entry(
- __field(unsigned int, rreq )
- __field(short, index )
- __field(short, error )
- __field(unsigned short, flags )
- __field(enum netfs_io_source, source )
- __field(enum netfs_failure, what )
- __field(size_t, len )
- __field(size_t, transferred )
- __field(loff_t, start )
+ __field(unsigned int, rreq)
+ __field(short, index)
+ __field(short, error)
+ __field(unsigned short, flags)
+ __field(enum netfs_io_source, source)
+ __field(enum netfs_failure, what)
+ __field(size_t, len)
+ __field(size_t, transferred)
+ __field(loff_t, start)
),
TP_fast_assign(
@@ -254,7 +418,7 @@ TRACE_EVENT(netfs_failure,
__entry->start = sreq ? sreq->start : 0;
),
- TP_printk("R=%08x[%d] %s f=%02x s=%llx %zx/%zx %s e=%d",
+ TP_printk("R=%08x[%x] %s f=%02x s=%llx %zx/%zx %s e=%d",
__entry->rreq, __entry->index,
__print_symbolic(__entry->source, netfs_sreq_sources),
__entry->flags,
@@ -270,9 +434,9 @@ TRACE_EVENT(netfs_rreq_ref,
TP_ARGS(rreq_debug_id, ref, what),
TP_STRUCT__entry(
- __field(unsigned int, rreq )
- __field(int, ref )
- __field(enum netfs_rreq_ref_trace, what )
+ __field(unsigned int, rreq)
+ __field(int, ref)
+ __field(enum netfs_rreq_ref_trace, what)
),
TP_fast_assign(
@@ -294,10 +458,10 @@ TRACE_EVENT(netfs_sreq_ref,
TP_ARGS(rreq_debug_id, subreq_debug_index, ref, what),
TP_STRUCT__entry(
- __field(unsigned int, rreq )
- __field(unsigned int, subreq )
- __field(int, ref )
- __field(enum netfs_sreq_ref_trace, what )
+ __field(unsigned int, rreq)
+ __field(unsigned int, subreq)
+ __field(int, ref)
+ __field(enum netfs_sreq_ref_trace, what)
),
TP_fast_assign(
@@ -314,6 +478,304 @@ TRACE_EVENT(netfs_sreq_ref,
__entry->ref)
);
+TRACE_EVENT(netfs_folio,
+ TP_PROTO(struct folio *folio, enum netfs_folio_trace why),
+
+ TP_ARGS(folio, why),
+
+ TP_STRUCT__entry(
+ __field(ino_t, ino)
+ __field(pgoff_t, index)
+ __field(unsigned int, nr)
+ __field(enum netfs_folio_trace, why)
+ ),
+
+ TP_fast_assign(
+ struct address_space *__m = READ_ONCE(folio->mapping);
+ __entry->ino = __m ? __m->host->i_ino : 0;
+ __entry->why = why;
+ __entry->index = folio->index;
+ __entry->nr = folio_nr_pages(folio);
+ ),
+
+ TP_printk("i=%05lx ix=%05lx-%05lx %s",
+ __entry->ino, __entry->index, __entry->index + __entry->nr - 1,
+ __print_symbolic(__entry->why, netfs_folio_traces))
+ );
+
+TRACE_EVENT(netfs_write_iter,
+ TP_PROTO(const struct kiocb *iocb, const struct iov_iter *from),
+
+ TP_ARGS(iocb, from),
+
+ TP_STRUCT__entry(
+ __field(unsigned long long, start)
+ __field(size_t, len)
+ __field(unsigned int, flags)
+ __field(unsigned int, ino)
+ ),
+
+ TP_fast_assign(
+ __entry->start = iocb->ki_pos;
+ __entry->len = iov_iter_count(from);
+ __entry->ino = iocb->ki_filp->f_inode->i_ino;
+ __entry->flags = iocb->ki_flags;
+ ),
+
+ TP_printk("WRITE-ITER i=%x s=%llx l=%zx f=%x",
+ __entry->ino, __entry->start, __entry->len, __entry->flags)
+ );
+
+TRACE_EVENT(netfs_write,
+ TP_PROTO(const struct netfs_io_request *wreq,
+ enum netfs_write_trace what),
+
+ TP_ARGS(wreq, what),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, wreq)
+ __field(unsigned int, cookie)
+ __field(unsigned int, ino)
+ __field(enum netfs_write_trace, what)
+ __field(unsigned long long, start)
+ __field(unsigned long long, len)
+ ),
+
+ TP_fast_assign(
+ struct netfs_inode *__ctx = netfs_inode(wreq->inode);
+ struct fscache_cookie *__cookie = netfs_i_cookie(__ctx);
+ __entry->wreq = wreq->debug_id;
+ __entry->cookie = __cookie ? __cookie->debug_id : 0;
+ __entry->ino = wreq->inode->i_ino;
+ __entry->what = what;
+ __entry->start = wreq->start;
+ __entry->len = wreq->len;
+ ),
+
+ TP_printk("R=%08x %s c=%08x i=%x by=%llx-%llx",
+ __entry->wreq,
+ __print_symbolic(__entry->what, netfs_write_traces),
+ __entry->cookie,
+ __entry->ino,
+ __entry->start, __entry->start + __entry->len - 1)
+ );
+
+TRACE_EVENT(netfs_copy2cache,
+ TP_PROTO(const struct netfs_io_request *rreq,
+ const struct netfs_io_request *creq),
+
+ TP_ARGS(rreq, creq),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, rreq)
+ __field(unsigned int, creq)
+ __field(unsigned int, cookie)
+ __field(unsigned int, ino)
+ ),
+
+ TP_fast_assign(
+ struct netfs_inode *__ctx = netfs_inode(rreq->inode);
+ struct fscache_cookie *__cookie = netfs_i_cookie(__ctx);
+ __entry->rreq = rreq->debug_id;
+ __entry->creq = creq->debug_id;
+ __entry->cookie = __cookie ? __cookie->debug_id : 0;
+ __entry->ino = rreq->inode->i_ino;
+ ),
+
+ TP_printk("R=%08x CR=%08x c=%08x i=%x ",
+ __entry->rreq,
+ __entry->creq,
+ __entry->cookie,
+ __entry->ino)
+ );
+
+TRACE_EVENT(netfs_collect,
+ TP_PROTO(const struct netfs_io_request *wreq),
+
+ TP_ARGS(wreq),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, wreq)
+ __field(unsigned int, len)
+ __field(unsigned long long, transferred)
+ __field(unsigned long long, start)
+ ),
+
+ TP_fast_assign(
+ __entry->wreq = wreq->debug_id;
+ __entry->start = wreq->start;
+ __entry->len = wreq->len;
+ __entry->transferred = wreq->transferred;
+ ),
+
+ TP_printk("R=%08x s=%llx-%llx",
+ __entry->wreq,
+ __entry->start + __entry->transferred,
+ __entry->start + __entry->len)
+ );
+
+TRACE_EVENT(netfs_collect_sreq,
+ TP_PROTO(const struct netfs_io_request *wreq,
+ const struct netfs_io_subrequest *subreq),
+
+ TP_ARGS(wreq, subreq),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, wreq)
+ __field(unsigned int, subreq)
+ __field(unsigned int, stream)
+ __field(unsigned int, len)
+ __field(unsigned int, transferred)
+ __field(unsigned long long, start)
+ ),
+
+ TP_fast_assign(
+ __entry->wreq = wreq->debug_id;
+ __entry->subreq = subreq->debug_index;
+ __entry->stream = subreq->stream_nr;
+ __entry->start = subreq->start;
+ __entry->len = subreq->len;
+ __entry->transferred = subreq->transferred;
+ ),
+
+ TP_printk("R=%08x[%u:%02x] s=%llx t=%x/%x",
+ __entry->wreq, __entry->stream, __entry->subreq,
+ __entry->start, __entry->transferred, __entry->len)
+ );
+
+TRACE_EVENT(netfs_collect_folio,
+ TP_PROTO(const struct netfs_io_request *wreq,
+ const struct folio *folio,
+ unsigned long long fend,
+ unsigned long long collected_to),
+
+ TP_ARGS(wreq, folio, fend, collected_to),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, wreq)
+ __field(unsigned long, index)
+ __field(unsigned long long, fend)
+ __field(unsigned long long, cleaned_to)
+ __field(unsigned long long, collected_to)
+ ),
+
+ TP_fast_assign(
+ __entry->wreq = wreq->debug_id;
+ __entry->index = folio->index;
+ __entry->fend = fend;
+ __entry->cleaned_to = wreq->cleaned_to;
+ __entry->collected_to = collected_to;
+ ),
+
+ TP_printk("R=%08x ix=%05lx r=%llx-%llx t=%llx/%llx",
+ __entry->wreq, __entry->index,
+ (unsigned long long)__entry->index * PAGE_SIZE, __entry->fend,
+ __entry->cleaned_to, __entry->collected_to)
+ );
+
+TRACE_EVENT(netfs_collect_state,
+ TP_PROTO(const struct netfs_io_request *wreq,
+ unsigned long long collected_to,
+ unsigned int notes),
+
+ TP_ARGS(wreq, collected_to, notes),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, wreq)
+ __field(unsigned int, notes)
+ __field(unsigned long long, collected_to)
+ __field(unsigned long long, cleaned_to)
+ ),
+
+ TP_fast_assign(
+ __entry->wreq = wreq->debug_id;
+ __entry->notes = notes;
+ __entry->collected_to = collected_to;
+ __entry->cleaned_to = wreq->cleaned_to;
+ ),
+
+ TP_printk("R=%08x col=%llx cln=%llx n=%x",
+ __entry->wreq, __entry->collected_to,
+ __entry->cleaned_to,
+ __entry->notes)
+ );
+
+TRACE_EVENT(netfs_collect_gap,
+ TP_PROTO(const struct netfs_io_request *wreq,
+ const struct netfs_io_stream *stream,
+ unsigned long long jump_to, char type),
+
+ TP_ARGS(wreq, stream, jump_to, type),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, wreq)
+ __field(unsigned char, stream)
+ __field(unsigned char, type)
+ __field(unsigned long long, from)
+ __field(unsigned long long, to)
+ ),
+
+ TP_fast_assign(
+ __entry->wreq = wreq->debug_id;
+ __entry->stream = stream->stream_nr;
+ __entry->from = stream->collected_to;
+ __entry->to = jump_to;
+ __entry->type = type;
+ ),
+
+ TP_printk("R=%08x[%x:] %llx->%llx %c",
+ __entry->wreq, __entry->stream,
+ __entry->from, __entry->to, __entry->type)
+ );
+
+TRACE_EVENT(netfs_collect_stream,
+ TP_PROTO(const struct netfs_io_request *wreq,
+ const struct netfs_io_stream *stream),
+
+ TP_ARGS(wreq, stream),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, wreq)
+ __field(unsigned char, stream)
+ __field(unsigned long long, collected_to)
+ __field(unsigned long long, front)
+ ),
+
+ TP_fast_assign(
+ __entry->wreq = wreq->debug_id;
+ __entry->stream = stream->stream_nr;
+ __entry->collected_to = stream->collected_to;
+ __entry->front = stream->front ? stream->front->start : UINT_MAX;
+ ),
+
+ TP_printk("R=%08x[%x:] cto=%llx frn=%llx",
+ __entry->wreq, __entry->stream,
+ __entry->collected_to, __entry->front)
+ );
+
+TRACE_EVENT(netfs_folioq,
+ TP_PROTO(const struct folio_queue *fq,
+ enum netfs_folioq_trace trace),
+
+ TP_ARGS(fq, trace),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, rreq)
+ __field(unsigned int, id)
+ __field(enum netfs_folioq_trace, trace)
+ ),
+
+ TP_fast_assign(
+ __entry->rreq = fq ? fq->rreq_id : 0;
+ __entry->id = fq ? fq->debug_id : 0;
+ __entry->trace = trace;
+ ),
+
+ TP_printk("R=%08x fq=%x %s",
+ __entry->rreq, __entry->id,
+ __print_symbolic(__entry->trace, netfs_folioq_traces))
+ );
+
#undef EM
#undef E_
#endif /* _TRACE_NETFS_H */
diff --git a/include/trace/events/netlink.h b/include/trace/events/netlink.h
index 3b7be3b386a4..f036b8a20505 100644
--- a/include/trace/events/netlink.h
+++ b/include/trace/events/netlink.h
@@ -17,7 +17,7 @@ TRACE_EVENT(netlink_extack,
),
TP_fast_assign(
- __assign_str(msg, msg);
+ __assign_str(msg);
),
TP_printk("msg=%s", __get_str(msg))
diff --git a/include/trace/events/nilfs2.h b/include/trace/events/nilfs2.h
index 8efc6236f57c..8880c11733dd 100644
--- a/include/trace/events/nilfs2.h
+++ b/include/trace/events/nilfs2.h
@@ -200,7 +200,11 @@ TRACE_EVENT(nilfs2_mdt_submit_block,
__field(struct inode *, inode)
__field(unsigned long, ino)
__field(unsigned long, blkoff)
- __field(enum req_op, mode)
+ /*
+ * Use field_struct() to avoid is_signed_type() on the
+ * bitwise type enum req_op.
+ */
+ __field_struct(enum req_op, mode)
),
TP_fast_assign(
diff --git a/include/trace/events/notifier.h b/include/trace/events/notifier.h
new file mode 100644
index 000000000000..26b298a31950
--- /dev/null
+++ b/include/trace/events/notifier.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM notifier
+
+#if !defined(_TRACE_NOTIFIERS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_NOTIFIERS_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(notifier_info,
+
+ TP_PROTO(void *cb),
+
+ TP_ARGS(cb),
+
+ TP_STRUCT__entry(
+ __field(void *, cb)
+ ),
+
+ TP_fast_assign(
+ __entry->cb = cb;
+ ),
+
+ TP_printk("%ps", __entry->cb)
+);
+
+/*
+ * notifier_register - called upon notifier callback registration
+ *
+ * @cb: callback pointer
+ *
+ */
+DEFINE_EVENT(notifier_info, notifier_register,
+
+ TP_PROTO(void *cb),
+
+ TP_ARGS(cb)
+);
+
+/*
+ * notifier_unregister - called upon notifier callback unregistration
+ *
+ * @cb: callback pointer
+ *
+ */
+DEFINE_EVENT(notifier_info, notifier_unregister,
+
+ TP_PROTO(void *cb),
+
+ TP_ARGS(cb)
+);
+
+/*
+ * notifier_run - called upon notifier callback execution
+ *
+ * @cb: callback pointer
+ *
+ */
+DEFINE_EVENT(notifier_info, notifier_run,
+
+ TP_PROTO(void *cb),
+
+ TP_ARGS(cb)
+);
+
+#endif /* _TRACE_NOTIFIERS_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/oom.h b/include/trace/events/oom.h
index 26a11e4a2c36..9f0a5d1482c4 100644
--- a/include/trace/events/oom.h
+++ b/include/trace/events/oom.h
@@ -7,6 +7,8 @@
#include <linux/tracepoint.h>
#include <trace/events/mmflags.h>
+#define PG_COUNT_TO_KB(x) ((x) << (PAGE_SHIFT - 10))
+
TRACE_EVENT(oom_score_adj_update,
TP_PROTO(struct task_struct *task),
@@ -53,8 +55,8 @@ TRACE_EVENT(reclaim_retry_zone,
),
TP_fast_assign(
- __entry->node = zone_to_nid(zoneref->zone);
- __entry->zone_idx = zoneref->zone_idx;
+ __entry->node = zonelist_node_idx(zoneref);
+ __entry->zone_idx = zonelist_zone_idx(zoneref);
__entry->order = order;
__entry->reclaimable = reclaimable;
__entry->available = available;
@@ -72,19 +74,45 @@ TRACE_EVENT(reclaim_retry_zone,
);
TRACE_EVENT(mark_victim,
- TP_PROTO(int pid),
+ TP_PROTO(struct task_struct *task, uid_t uid),
- TP_ARGS(pid),
+ TP_ARGS(task, uid),
TP_STRUCT__entry(
__field(int, pid)
+ __string(comm, task->comm)
+ __field(unsigned long, total_vm)
+ __field(unsigned long, anon_rss)
+ __field(unsigned long, file_rss)
+ __field(unsigned long, shmem_rss)
+ __field(uid_t, uid)
+ __field(unsigned long, pgtables)
+ __field(short, oom_score_adj)
),
TP_fast_assign(
- __entry->pid = pid;
+ __entry->pid = task->pid;
+ __assign_str(comm);
+ __entry->total_vm = PG_COUNT_TO_KB(task->mm->total_vm);
+ __entry->anon_rss = PG_COUNT_TO_KB(get_mm_counter(task->mm, MM_ANONPAGES));
+ __entry->file_rss = PG_COUNT_TO_KB(get_mm_counter(task->mm, MM_FILEPAGES));
+ __entry->shmem_rss = PG_COUNT_TO_KB(get_mm_counter(task->mm, MM_SHMEMPAGES));
+ __entry->uid = uid;
+ __entry->pgtables = mm_pgtables_bytes(task->mm) >> 10;
+ __entry->oom_score_adj = task->signal->oom_score_adj;
),
- TP_printk("pid=%d", __entry->pid)
+ TP_printk("pid=%d comm=%s total-vm=%lukB anon-rss=%lukB file-rss:%lukB shmem-rss:%lukB uid=%u pgtables=%lukB oom_score_adj=%hd",
+ __entry->pid,
+ __get_str(comm),
+ __entry->total_vm,
+ __entry->anon_rss,
+ __entry->file_rss,
+ __entry->shmem_rss,
+ __entry->uid,
+ __entry->pgtables,
+ __entry->oom_score_adj
+ )
);
TRACE_EVENT(wake_reaper,
diff --git a/include/trace/events/osnoise.h b/include/trace/events/osnoise.h
index 82f741ec0f57..3f4273623801 100644
--- a/include/trace/events/osnoise.h
+++ b/include/trace/events/osnoise.h
@@ -3,9 +3,105 @@
#define TRACE_SYSTEM osnoise
#if !defined(_OSNOISE_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+
+#ifndef _OSNOISE_TRACE_H
#define _OSNOISE_TRACE_H
+/*
+ * osnoise sample structure definition. Used to store the statistics of a
+ * sample run.
+ */
+struct osnoise_sample {
+ u64 runtime; /* runtime */
+ u64 noise; /* noise */
+ u64 max_sample; /* max single noise sample */
+ int hw_count; /* # HW (incl. hypervisor) interference */
+ int nmi_count; /* # NMIs during this sample */
+ int irq_count; /* # IRQs during this sample */
+ int softirq_count; /* # softirqs during this sample */
+ int thread_count; /* # threads during this sample */
+};
+
+#ifdef CONFIG_TIMERLAT_TRACER
+/*
+ * timerlat sample structure definition. Used to store the statistics of
+ * a sample run.
+ */
+struct timerlat_sample {
+ u64 timer_latency; /* timer_latency */
+ unsigned int seqnum; /* unique sequence */
+ int context; /* timer context */
+};
+#endif // CONFIG_TIMERLAT_TRACER
+#endif // _OSNOISE_TRACE_H
#include <linux/tracepoint.h>
+TRACE_EVENT(osnoise_sample,
+
+ TP_PROTO(struct osnoise_sample *s),
+
+ TP_ARGS(s),
+
+ TP_STRUCT__entry(
+ __field( u64, runtime )
+ __field( u64, noise )
+ __field( u64, max_sample )
+ __field( int, hw_count )
+ __field( int, irq_count )
+ __field( int, nmi_count )
+ __field( int, softirq_count )
+ __field( int, thread_count )
+ ),
+
+ TP_fast_assign(
+ __entry->runtime = s->runtime;
+ __entry->noise = s->noise;
+ __entry->max_sample = s->max_sample;
+ __entry->hw_count = s->hw_count;
+ __entry->irq_count = s->irq_count;
+ __entry->nmi_count = s->nmi_count;
+ __entry->softirq_count = s->softirq_count;
+ __entry->thread_count = s->thread_count;
+ ),
+
+ TP_printk("runtime=%llu noise=%llu max_sample=%llu hw_count=%d"
+ " irq_count=%d nmi_count=%d softirq_count=%d"
+ " thread_count=%d",
+ __entry->runtime,
+ __entry->noise,
+ __entry->max_sample,
+ __entry->hw_count,
+ __entry->irq_count,
+ __entry->nmi_count,
+ __entry->softirq_count,
+ __entry->thread_count)
+);
+
+#ifdef CONFIG_TIMERLAT_TRACER
+TRACE_EVENT(timerlat_sample,
+
+ TP_PROTO(struct timerlat_sample *s),
+
+ TP_ARGS(s),
+
+ TP_STRUCT__entry(
+ __field( u64, timer_latency )
+ __field( unsigned int, seqnum )
+ __field( int, context )
+ ),
+
+ TP_fast_assign(
+ __entry->timer_latency = s->timer_latency;
+ __entry->seqnum = s->seqnum;
+ __entry->context = s->context;
+ ),
+
+ TP_printk("timer_latency=%llu seqnum=%u context=%d",
+ __entry->timer_latency,
+ __entry->seqnum,
+ __entry->context)
+);
+#endif // CONFIG_TIMERLAT_TRACER
+
TRACE_EVENT(thread_noise,
TP_PROTO(struct task_struct *t, u64 start, u64 duration),
@@ -75,7 +171,7 @@ TRACE_EVENT(irq_noise,
),
TP_fast_assign(
- __assign_str(desc, desc);
+ __assign_str(desc);
__entry->vector = vector;
__entry->start = start;
__entry->duration = duration;
diff --git a/include/trace/events/page_pool.h b/include/trace/events/page_pool.h
index ca534501158b..31825ed30032 100644
--- a/include/trace/events/page_pool.h
+++ b/include/trace/events/page_pool.h
@@ -9,7 +9,7 @@
#include <linux/tracepoint.h>
#include <trace/events/mmflags.h>
-#include <net/page_pool.h>
+#include <net/page_pool/types.h>
TRACE_EVENT(page_pool_release,
@@ -42,51 +42,53 @@ TRACE_EVENT(page_pool_release,
TRACE_EVENT(page_pool_state_release,
TP_PROTO(const struct page_pool *pool,
- const struct page *page, u32 release),
+ netmem_ref netmem, u32 release),
- TP_ARGS(pool, page, release),
+ TP_ARGS(pool, netmem, release),
TP_STRUCT__entry(
__field(const struct page_pool *, pool)
- __field(const struct page *, page)
+ __field(unsigned long, netmem)
__field(u32, release)
__field(unsigned long, pfn)
),
TP_fast_assign(
__entry->pool = pool;
- __entry->page = page;
+ __entry->netmem = (__force unsigned long)netmem;
__entry->release = release;
- __entry->pfn = page_to_pfn(page);
+ __entry->pfn = netmem_pfn_trace(netmem);
),
- TP_printk("page_pool=%p page=%p pfn=0x%lx release=%u",
- __entry->pool, __entry->page, __entry->pfn, __entry->release)
+ TP_printk("page_pool=%p netmem=%p is_net_iov=%lu pfn=0x%lx release=%u",
+ __entry->pool, (void *)__entry->netmem,
+ __entry->netmem & NET_IOV, __entry->pfn, __entry->release)
);
TRACE_EVENT(page_pool_state_hold,
TP_PROTO(const struct page_pool *pool,
- const struct page *page, u32 hold),
+ netmem_ref netmem, u32 hold),
- TP_ARGS(pool, page, hold),
+ TP_ARGS(pool, netmem, hold),
TP_STRUCT__entry(
__field(const struct page_pool *, pool)
- __field(const struct page *, page)
+ __field(unsigned long, netmem)
__field(u32, hold)
__field(unsigned long, pfn)
),
TP_fast_assign(
__entry->pool = pool;
- __entry->page = page;
+ __entry->netmem = (__force unsigned long)netmem;
__entry->hold = hold;
- __entry->pfn = page_to_pfn(page);
+ __entry->pfn = netmem_pfn_trace(netmem);
),
- TP_printk("page_pool=%p page=%p pfn=0x%lx hold=%u",
- __entry->pool, __entry->page, __entry->pfn, __entry->hold)
+ TP_printk("page_pool=%p netmem=%p is_net_iov=%lu, pfn=0x%lx hold=%u",
+ __entry->pool, (void *)__entry->netmem,
+ __entry->netmem & NET_IOV, __entry->pfn, __entry->hold)
);
TRACE_EVENT(page_pool_update_nid,
diff --git a/include/trace/events/page_ref.h b/include/trace/events/page_ref.h
index 8a99c1cd417b..ea6b5c4baf3d 100644
--- a/include/trace/events/page_ref.h
+++ b/include/trace/events/page_ref.h
@@ -28,9 +28,9 @@ DECLARE_EVENT_CLASS(page_ref_mod_template,
TP_fast_assign(
__entry->pfn = page_to_pfn(page);
- __entry->flags = page->flags;
+ __entry->flags = page->flags.f;
__entry->count = page_ref_count(page);
- __entry->mapcount = page_mapcount(page);
+ __entry->mapcount = atomic_read(&page->_mapcount);
__entry->mapping = page->mapping;
__entry->mt = get_pageblock_migratetype(page);
__entry->val = v;
@@ -77,9 +77,9 @@ DECLARE_EVENT_CLASS(page_ref_mod_and_test_template,
TP_fast_assign(
__entry->pfn = page_to_pfn(page);
- __entry->flags = page->flags;
+ __entry->flags = page->flags.f;
__entry->count = page_ref_count(page);
- __entry->mapcount = page_mapcount(page);
+ __entry->mapcount = atomic_read(&page->_mapcount);
__entry->mapping = page->mapping;
__entry->mt = get_pageblock_migratetype(page);
__entry->val = v;
diff --git a/include/trace/events/power.h b/include/trace/events/power.h
index 77f14f7a11d4..370f8df2fdb4 100644
--- a/include/trace/events/power.h
+++ b/include/trace/events/power.h
@@ -62,28 +62,45 @@ TRACE_EVENT(cpu_idle_miss,
(unsigned long)__entry->state, (__entry->below)?"below":"above")
);
-TRACE_EVENT(powernv_throttle,
+#ifdef CONFIG_ARM_PSCI_CPUIDLE
+DECLARE_EVENT_CLASS(psci_domain_idle,
- TP_PROTO(int chip_id, const char *reason, int pmax),
+ TP_PROTO(unsigned int cpu_id, unsigned int state, bool s2idle),
- TP_ARGS(chip_id, reason, pmax),
+ TP_ARGS(cpu_id, state, s2idle),
TP_STRUCT__entry(
- __field(int, chip_id)
- __string(reason, reason)
- __field(int, pmax)
+ __field(u32, cpu_id)
+ __field(u32, state)
+ __field(bool, s2idle)
),
TP_fast_assign(
- __entry->chip_id = chip_id;
- __assign_str(reason, reason);
- __entry->pmax = pmax;
+ __entry->cpu_id = cpu_id;
+ __entry->state = state;
+ __entry->s2idle = s2idle;
),
- TP_printk("Chip %d Pmax %d %s", __entry->chip_id,
- __entry->pmax, __get_str(reason))
+ TP_printk("cpu_id=%lu state=0x%lx is_s2idle=%s",
+ (unsigned long)__entry->cpu_id, (unsigned long)__entry->state,
+ (__entry->s2idle)?"yes":"no")
+);
+
+DEFINE_EVENT(psci_domain_idle, psci_domain_idle_enter,
+
+ TP_PROTO(unsigned int cpu_id, unsigned int state, bool s2idle),
+
+ TP_ARGS(cpu_id, state, s2idle)
);
+DEFINE_EVENT(psci_domain_idle, psci_domain_idle_exit,
+
+ TP_PROTO(unsigned int cpu_id, unsigned int state, bool s2idle),
+
+ TP_ARGS(cpu_id, state, s2idle)
+);
+#endif
+
TRACE_EVENT(pstate_sample,
TP_PROTO(u32 core_busy,
@@ -162,7 +179,8 @@ TRACE_EVENT(pstate_sample,
{ PM_EVENT_HIBERNATE, "hibernate" }, \
{ PM_EVENT_THAW, "thaw" }, \
{ PM_EVENT_RESTORE, "restore" }, \
- { PM_EVENT_RECOVER, "recover" })
+ { PM_EVENT_RECOVER, "recover" }, \
+ { PM_EVENT_POWEROFF, "poweroff" })
DEFINE_EVENT(cpu, cpu_frequency,
@@ -195,6 +213,7 @@ TRACE_EVENT(cpu_frequency_limits,
(unsigned long)__entry->cpu_id)
);
+#ifdef CONFIG_PM_SLEEP
TRACE_EVENT(device_pm_callback_start,
TP_PROTO(struct device *dev, const char *pm_ops, int event),
@@ -210,11 +229,10 @@ TRACE_EVENT(device_pm_callback_start,
),
TP_fast_assign(
- __assign_str(device, dev_name(dev));
- __assign_str(driver, dev_driver_string(dev));
- __assign_str(parent,
- dev->parent ? dev_name(dev->parent) : "none");
- __assign_str(pm_ops, pm_ops ? pm_ops : "none ");
+ __assign_str(device);
+ __assign_str(driver);
+ __assign_str(parent);
+ __assign_str(pm_ops);
__entry->event = event;
),
@@ -236,14 +254,15 @@ TRACE_EVENT(device_pm_callback_end,
),
TP_fast_assign(
- __assign_str(device, dev_name(dev));
- __assign_str(driver, dev_driver_string(dev));
+ __assign_str(device);
+ __assign_str(driver);
__entry->error = error;
),
TP_printk("%s %s, err=%d",
__get_str(driver), __get_str(device), __entry->error)
);
+#endif
TRACE_EVENT(suspend_resume,
@@ -279,7 +298,7 @@ DECLARE_EVENT_CLASS(wakeup_source,
),
TP_fast_assign(
- __assign_str(name, name);
+ __assign_str(name);
__entry->state = state;
),
@@ -301,53 +320,7 @@ DEFINE_EVENT(wakeup_source, wakeup_source_deactivate,
TP_ARGS(name, state)
);
-/*
- * The clock events are used for clock enable/disable and for
- * clock rate change
- */
-DECLARE_EVENT_CLASS(clock,
-
- TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
-
- TP_ARGS(name, state, cpu_id),
-
- TP_STRUCT__entry(
- __string( name, name )
- __field( u64, state )
- __field( u64, cpu_id )
- ),
-
- TP_fast_assign(
- __assign_str(name, name);
- __entry->state = state;
- __entry->cpu_id = cpu_id;
- ),
-
- TP_printk("%s state=%lu cpu_id=%lu", __get_str(name),
- (unsigned long)__entry->state, (unsigned long)__entry->cpu_id)
-);
-
-DEFINE_EVENT(clock, clock_enable,
-
- TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
-
- TP_ARGS(name, state, cpu_id)
-);
-
-DEFINE_EVENT(clock, clock_disable,
-
- TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
-
- TP_ARGS(name, state, cpu_id)
-);
-
-DEFINE_EVENT(clock, clock_set_rate,
-
- TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
-
- TP_ARGS(name, state, cpu_id)
-);
-
+#ifdef CONFIG_ARCH_OMAP2PLUS
/*
* The power domain events are used for power domains transitions
*/
@@ -364,7 +337,7 @@ DECLARE_EVENT_CLASS(power_domain,
),
TP_fast_assign(
- __assign_str(name, name);
+ __assign_str(name);
__entry->state = state;
__entry->cpu_id = cpu_id;
),
@@ -379,6 +352,7 @@ DEFINE_EVENT(power_domain, power_domain_target,
TP_ARGS(name, state, cpu_id)
);
+#endif
/*
* CPU latency QoS events used for global CPU latency QoS list updates
@@ -486,7 +460,7 @@ DECLARE_EVENT_CLASS(dev_pm_qos_request,
),
TP_fast_assign(
- __assign_str(name, name);
+ __assign_str(name);
__entry->type = type;
__entry->new_value = new_value;
),
diff --git a/include/trace/events/preemptirq.h b/include/trace/events/preemptirq.h
index 3f249e150c0c..f99562d2b496 100644
--- a/include/trace/events/preemptirq.h
+++ b/include/trace/events/preemptirq.h
@@ -43,8 +43,6 @@ DEFINE_EVENT(preemptirq_template, irq_enable,
#else
#define trace_irq_enable(...)
#define trace_irq_disable(...)
-#define trace_irq_enable_rcuidle(...)
-#define trace_irq_disable_rcuidle(...)
#endif
#ifdef CONFIG_TRACE_PREEMPT_TOGGLE
@@ -58,8 +56,6 @@ DEFINE_EVENT(preemptirq_template, preempt_enable,
#else
#define trace_preempt_enable(...)
#define trace_preempt_disable(...)
-#define trace_preempt_enable_rcuidle(...)
-#define trace_preempt_disable_rcuidle(...)
#endif
#endif /* _TRACE_PREEMPTIRQ_H */
@@ -69,10 +65,6 @@ DEFINE_EVENT(preemptirq_template, preempt_enable,
#else /* !CONFIG_PREEMPTIRQ_TRACEPOINTS */
#define trace_irq_enable(...)
#define trace_irq_disable(...)
-#define trace_irq_enable_rcuidle(...)
-#define trace_irq_disable_rcuidle(...)
#define trace_preempt_enable(...)
#define trace_preempt_disable(...)
-#define trace_preempt_enable_rcuidle(...)
-#define trace_preempt_disable_rcuidle(...)
#endif
diff --git a/include/trace/events/pwc.h b/include/trace/events/pwc.h
index a2da764a3b41..0543702542d9 100644
--- a/include/trace/events/pwc.h
+++ b/include/trace/events/pwc.h
@@ -26,7 +26,7 @@ TRACE_EVENT(pwc_handler_enter,
__entry->urb__actual_length = urb->actual_length;
__entry->fbuf__filled = (pdev->fill_buf
? pdev->fill_buf->filled : 0);
- __assign_str(name, pdev->v4l2_dev.name);
+ __assign_str(name);
),
TP_printk("dev=%s (fbuf=%p filled=%d) urb=%p (status=%d actual_length=%u)",
__get_str(name),
@@ -50,7 +50,7 @@ TRACE_EVENT(pwc_handler_exit,
__entry->urb = urb;
__entry->fbuf = pdev->fill_buf;
__entry->fbuf__filled = pdev->fill_buf->filled;
- __assign_str(name, pdev->v4l2_dev.name);
+ __assign_str(name);
),
TP_printk(" dev=%s (fbuf=%p filled=%d) urb=%p",
__get_str(name),
diff --git a/include/trace/events/pwm.h b/include/trace/events/pwm.h
index 12b35e4ff917..8ba898fd335c 100644
--- a/include/trace/events/pwm.h
+++ b/include/trace/events/pwm.h
@@ -8,14 +8,135 @@
#include <linux/pwm.h>
#include <linux/tracepoint.h>
+#define TP_PROTO_pwm(args...) \
+ TP_PROTO(struct pwm_device *pwm, args)
+
+#define TP_ARGS_pwm(args...) \
+ TP_ARGS(pwm, args)
+
+#define TP_STRUCT__entry_pwm(args...) \
+ TP_STRUCT__entry( \
+ __field(unsigned int, chipid) \
+ __field(unsigned int, hwpwm) \
+ args)
+
+#define TP_fast_assign_pwm(args...) \
+ TP_fast_assign( \
+ __entry->chipid = pwm->chip->id; \
+ __entry->hwpwm = pwm->hwpwm; \
+ args)
+
+#define TP_printk_pwm(fmt, args...) \
+ TP_printk("pwmchip%u.%u: " fmt, __entry->chipid, __entry->hwpwm, args)
+
+#define __field_pwmwf(wf) \
+ __field(u64, wf ## _period_length_ns) \
+ __field(u64, wf ## _duty_length_ns) \
+ __field(u64, wf ## _duty_offset_ns) \
+
+#define fast_assign_pwmwf(wf) \
+ __entry->wf ## _period_length_ns = wf->period_length_ns; \
+ __entry->wf ## _duty_length_ns = wf->duty_length_ns; \
+ __entry->wf ## _duty_offset_ns = wf->duty_offset_ns
+
+#define printk_pwmwf_format(wf) \
+ "%lld/%lld [+%lld]"
+
+#define printk_pwmwf_formatargs(wf) \
+ __entry->wf ## _duty_length_ns, __entry->wf ## _period_length_ns, __entry->wf ## _duty_offset_ns
+
+TRACE_EVENT(pwm_round_waveform_tohw,
+
+ TP_PROTO_pwm(const struct pwm_waveform *wf, void *wfhw, int err),
+
+ TP_ARGS_pwm(wf, wfhw, err),
+
+ TP_STRUCT__entry_pwm(
+ __field_pwmwf(wf)
+ __field(void *, wfhw)
+ __field(int, err)
+ ),
+
+ TP_fast_assign_pwm(
+ fast_assign_pwmwf(wf);
+ __entry->wfhw = wfhw;
+ __entry->err = err;
+ ),
+
+ TP_printk_pwm(printk_pwmwf_format(wf) " > %p err=%d",
+ printk_pwmwf_formatargs(wf), __entry->wfhw, __entry->err)
+);
+
+TRACE_EVENT(pwm_round_waveform_fromhw,
+
+ TP_PROTO_pwm(const void *wfhw, struct pwm_waveform *wf, int err),
+
+ TP_ARGS_pwm(wfhw, wf, err),
+
+ TP_STRUCT__entry_pwm(
+ __field(const void *, wfhw)
+ __field_pwmwf(wf)
+ __field(int, err)
+ ),
+
+ TP_fast_assign_pwm(
+ __entry->wfhw = wfhw;
+ fast_assign_pwmwf(wf);
+ __entry->err = err;
+ ),
+
+ TP_printk_pwm("%p > " printk_pwmwf_format(wf) " err=%d",
+ __entry->wfhw, printk_pwmwf_formatargs(wf), __entry->err)
+);
+
+TRACE_EVENT(pwm_read_waveform,
+
+ TP_PROTO_pwm(void *wfhw, int err),
+
+ TP_ARGS_pwm(wfhw, err),
+
+ TP_STRUCT__entry_pwm(
+ __field(void *, wfhw)
+ __field(int, err)
+ ),
+
+ TP_fast_assign_pwm(
+ __entry->wfhw = wfhw;
+ __entry->err = err;
+ ),
+
+ TP_printk_pwm("%p err=%d",
+ __entry->wfhw, __entry->err)
+);
+
+TRACE_EVENT(pwm_write_waveform,
+
+ TP_PROTO_pwm(const void *wfhw, int err),
+
+ TP_ARGS_pwm(wfhw, err),
+
+ TP_STRUCT__entry_pwm(
+ __field(const void *, wfhw)
+ __field(int, err)
+ ),
+
+ TP_fast_assign_pwm(
+ __entry->wfhw = wfhw;
+ __entry->err = err;
+ ),
+
+ TP_printk_pwm("%p err=%d",
+ __entry->wfhw, __entry->err)
+);
+
+
DECLARE_EVENT_CLASS(pwm,
TP_PROTO(struct pwm_device *pwm, const struct pwm_state *state, int err),
TP_ARGS(pwm, state, err),
- TP_STRUCT__entry(
- __field(struct pwm_device *, pwm)
+ TP_STRUCT__entry_pwm(
__field(u64, period)
__field(u64, duty_cycle)
__field(enum pwm_polarity, polarity)
@@ -23,8 +144,7 @@ DECLARE_EVENT_CLASS(pwm,
__field(int, err)
),
- TP_fast_assign(
- __entry->pwm = pwm;
+ TP_fast_assign_pwm(
__entry->period = state->period;
__entry->duty_cycle = state->duty_cycle;
__entry->polarity = state->polarity;
@@ -32,8 +152,8 @@ DECLARE_EVENT_CLASS(pwm,
__entry->err = err;
),
- TP_printk("%p: period=%llu duty_cycle=%llu polarity=%d enabled=%d err=%d",
- __entry->pwm, __entry->period, __entry->duty_cycle,
+ TP_printk_pwm("period=%llu duty_cycle=%llu polarity=%d enabled=%d err=%d",
+ __entry->period, __entry->duty_cycle,
__entry->polarity, __entry->enabled, __entry->err)
);
diff --git a/include/trace/events/qdisc.h b/include/trace/events/qdisc.h
index a3995925cb05..ff33f41a9db7 100644
--- a/include/trace/events/qdisc.h
+++ b/include/trace/events/qdisc.h
@@ -81,15 +81,15 @@ TRACE_EVENT(qdisc_reset,
TP_ARGS(q),
TP_STRUCT__entry(
- __string( dev, qdisc_dev(q) )
- __string( kind, q->ops->id )
- __field( u32, parent )
- __field( u32, handle )
+ __string( dev, qdisc_dev(q) ? qdisc_dev(q)->name : "(null)" )
+ __string( kind, q->ops->id )
+ __field( u32, parent )
+ __field( u32, handle )
),
TP_fast_assign(
- __assign_str(dev, qdisc_dev(q));
- __assign_str(kind, q->ops->id);
+ __assign_str(dev);
+ __assign_str(kind);
__entry->parent = q->parent;
__entry->handle = q->handle;
),
@@ -106,15 +106,15 @@ TRACE_EVENT(qdisc_destroy,
TP_ARGS(q),
TP_STRUCT__entry(
- __string( dev, qdisc_dev(q) )
- __string( kind, q->ops->id )
- __field( u32, parent )
- __field( u32, handle )
+ __string( dev, qdisc_dev(q)->name )
+ __string( kind, q->ops->id )
+ __field( u32, parent )
+ __field( u32, handle )
),
TP_fast_assign(
- __assign_str(dev, qdisc_dev(q));
- __assign_str(kind, q->ops->id);
+ __assign_str(dev);
+ __assign_str(kind);
__entry->parent = q->parent;
__entry->handle = q->handle;
),
@@ -137,8 +137,8 @@ TRACE_EVENT(qdisc_create,
),
TP_fast_assign(
- __assign_str(dev, dev->name);
- __assign_str(kind, ops->id);
+ __assign_str(dev);
+ __assign_str(kind);
__entry->parent = parent;
),
diff --git a/include/trace/events/qla.h b/include/trace/events/qla.h
index e7fd55e7dc3d..8800c35525a1 100644
--- a/include/trace/events/qla.h
+++ b/include/trace/events/qla.h
@@ -25,7 +25,7 @@ DECLARE_EVENT_CLASS(qla_log_event,
__vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
- __assign_str(buf, buf);
+ __assign_str(buf);
__assign_vstr(msg, vaf->fmt, vaf->va);
),
diff --git a/include/trace/events/qrtr.h b/include/trace/events/qrtr.h
index b1de14c3bb93..14f822983741 100644
--- a/include/trace/events/qrtr.h
+++ b/include/trace/events/qrtr.h
@@ -10,15 +10,16 @@
TRACE_EVENT(qrtr_ns_service_announce_new,
- TP_PROTO(__le32 service, __le32 instance, __le32 node, __le32 port),
+ TP_PROTO(unsigned int service, unsigned int instance,
+ unsigned int node, unsigned int port),
TP_ARGS(service, instance, node, port),
TP_STRUCT__entry(
- __field(__le32, service)
- __field(__le32, instance)
- __field(__le32, node)
- __field(__le32, port)
+ __field(unsigned int, service)
+ __field(unsigned int, instance)
+ __field(unsigned int, node)
+ __field(unsigned int, port)
),
TP_fast_assign(
@@ -36,15 +37,16 @@ TRACE_EVENT(qrtr_ns_service_announce_new,
TRACE_EVENT(qrtr_ns_service_announce_del,
- TP_PROTO(__le32 service, __le32 instance, __le32 node, __le32 port),
+ TP_PROTO(unsigned int service, unsigned int instance,
+ unsigned int node, unsigned int port),
TP_ARGS(service, instance, node, port),
TP_STRUCT__entry(
- __field(__le32, service)
- __field(__le32, instance)
- __field(__le32, node)
- __field(__le32, port)
+ __field(unsigned int, service)
+ __field(unsigned int, instance)
+ __field(unsigned int, node)
+ __field(unsigned int, port)
),
TP_fast_assign(
@@ -62,15 +64,16 @@ TRACE_EVENT(qrtr_ns_service_announce_del,
TRACE_EVENT(qrtr_ns_server_add,
- TP_PROTO(__le32 service, __le32 instance, __le32 node, __le32 port),
+ TP_PROTO(unsigned int service, unsigned int instance,
+ unsigned int node, unsigned int port),
TP_ARGS(service, instance, node, port),
TP_STRUCT__entry(
- __field(__le32, service)
- __field(__le32, instance)
- __field(__le32, node)
- __field(__le32, port)
+ __field(unsigned int, service)
+ __field(unsigned int, instance)
+ __field(unsigned int, node)
+ __field(unsigned int, port)
),
TP_fast_assign(
@@ -99,7 +102,7 @@ TRACE_EVENT(qrtr_ns_message,
),
TP_fast_assign(
- __assign_str(ctrl_pkt_str, ctrl_pkt_str);
+ __assign_str(ctrl_pkt_str);
__entry->sq_node = sq_node;
__entry->sq_port = sq_port;
),
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index 90b2fb0292cb..5fbdabe3faea 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -207,7 +207,7 @@ TRACE_EVENT_RCU(rcu_exp_grace_period,
__entry->gpevent = gpevent;
),
- TP_printk("%s %ld %s",
+ TP_printk("%s %#lx %s",
__entry->rcuname, __entry->gpseq, __entry->gpevent)
);
@@ -466,40 +466,40 @@ TRACE_EVENT(rcu_stall_warning,
/*
* Tracepoint for dyntick-idle entry/exit events. These take 2 strings
* as argument:
- * polarity: "Start", "End", "StillNonIdle" for entering, exiting or still not
- * being in dyntick-idle mode.
+ * polarity: "Start", "End", "StillWatching" for entering, exiting or still not
+ * being in EQS mode.
* context: "USER" or "IDLE" or "IRQ".
- * NMIs nested in IRQs are inferred with dynticks_nesting > 1 in IRQ context.
+ * NMIs nested in IRQs are inferred with nesting > 1 in IRQ context.
*
* These events also take a pair of numbers, which indicate the nesting
* depth before and after the event of interest, and a third number that is
- * the ->dynticks counter. Note that task-related and interrupt-related
+ * the RCU_WATCHING counter. Note that task-related and interrupt-related
* events use two separate counters, and that the "++=" and "--=" events
* for irq/NMI will change the counter by two, otherwise by one.
*/
-TRACE_EVENT_RCU(rcu_dyntick,
+TRACE_EVENT_RCU(rcu_watching,
- TP_PROTO(const char *polarity, long oldnesting, long newnesting, int dynticks),
+ TP_PROTO(const char *polarity, long oldnesting, long newnesting, int counter),
- TP_ARGS(polarity, oldnesting, newnesting, dynticks),
+ TP_ARGS(polarity, oldnesting, newnesting, counter),
TP_STRUCT__entry(
__field(const char *, polarity)
__field(long, oldnesting)
__field(long, newnesting)
- __field(int, dynticks)
+ __field(int, counter)
),
TP_fast_assign(
__entry->polarity = polarity;
__entry->oldnesting = oldnesting;
__entry->newnesting = newnesting;
- __entry->dynticks = dynticks;
+ __entry->counter = counter;
),
TP_printk("%s %lx %lx %#3x", __entry->polarity,
__entry->oldnesting, __entry->newnesting,
- __entry->dynticks & 0xfff)
+ __entry->counter & 0xfff)
);
/*
@@ -561,40 +561,6 @@ TRACE_EVENT_RCU(rcu_segcb_stats,
);
/*
- * Tracepoint for the registration of a single RCU callback of the special
- * kvfree() form. The first argument is the RCU type, the second argument
- * is a pointer to the RCU callback, the third argument is the offset
- * of the callback within the enclosing RCU-protected data structure,
- * the fourth argument is the number of lazy callbacks queued, and the
- * fifth argument is the total number of callbacks queued.
- */
-TRACE_EVENT_RCU(rcu_kvfree_callback,
-
- TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset,
- long qlen),
-
- TP_ARGS(rcuname, rhp, offset, qlen),
-
- TP_STRUCT__entry(
- __field(const char *, rcuname)
- __field(void *, rhp)
- __field(unsigned long, offset)
- __field(long, qlen)
- ),
-
- TP_fast_assign(
- __entry->rcuname = rcuname;
- __entry->rhp = rhp;
- __entry->offset = offset;
- __entry->qlen = qlen;
- ),
-
- TP_printk("%s rhp=%p func=%ld %ld",
- __entry->rcuname, __entry->rhp, __entry->offset,
- __entry->qlen)
-);
-
-/*
* Tracepoint for marking the beginning rcu_do_batch, performed to start
* RCU callback invocation. The first argument is the RCU flavor,
* the second is the number of lazy callbacks queued, the third is
@@ -708,6 +674,33 @@ TRACE_EVENT_RCU(rcu_invoke_kfree_bulk_callback,
);
/*
+ * Tracepoint for a normal synchronize_rcu() states. The first argument
+ * is the RCU flavor, the second argument is a pointer to rcu_head the
+ * last one is an event.
+ */
+TRACE_EVENT_RCU(rcu_sr_normal,
+
+ TP_PROTO(const char *rcuname, struct rcu_head *rhp, const char *srevent),
+
+ TP_ARGS(rcuname, rhp, srevent),
+
+ TP_STRUCT__entry(
+ __field(const char *, rcuname)
+ __field(void *, rhp)
+ __field(const char *, srevent)
+ ),
+
+ TP_fast_assign(
+ __entry->rcuname = rcuname;
+ __entry->rhp = rhp;
+ __entry->srevent = srevent;
+ ),
+
+ TP_printk("%s rhp=0x%p event=%s",
+ __entry->rcuname, __entry->rhp, __entry->srevent)
+);
+
+/*
* Tracepoint for exiting rcu_do_batch after RCU callbacks have been
* invoked. The first argument is the name of the RCU flavor,
* the second argument is number of callbacks actually invoked,
@@ -768,7 +761,7 @@ TRACE_EVENT_RCU(rcu_torture_read,
TP_ARGS(rcutorturename, rhp, secs, c_old, c),
TP_STRUCT__entry(
- __field(char, rcutorturename[RCUTORTURENAME_LEN])
+ __array(char, rcutorturename, RCUTORTURENAME_LEN)
__field(struct rcu_head *, rhp)
__field(unsigned long, secs)
__field(unsigned long, c_old)
@@ -776,9 +769,7 @@ TRACE_EVENT_RCU(rcu_torture_read,
),
TP_fast_assign(
- strncpy(__entry->rcutorturename, rcutorturename,
- RCUTORTURENAME_LEN);
- __entry->rcutorturename[RCUTORTURENAME_LEN - 1] = 0;
+ strscpy(__entry->rcutorturename, rcutorturename, RCUTORTURENAME_LEN);
__entry->rhp = rhp;
__entry->secs = secs;
__entry->c_old = c_old;
diff --git a/include/trace/events/readahead.h b/include/trace/events/readahead.h
new file mode 100644
index 000000000000..0997ac5eceab
--- /dev/null
+++ b/include/trace/events/readahead.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM readahead
+
+#if !defined(_TRACE_FILEMAP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_READAHEAD_H
+
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+
+TRACE_EVENT(page_cache_ra_unbounded,
+ TP_PROTO(struct inode *inode, pgoff_t index, unsigned long nr_to_read,
+ unsigned long lookahead_size),
+
+ TP_ARGS(inode, index, nr_to_read, lookahead_size),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, i_ino)
+ __field(dev_t, s_dev)
+ __field(pgoff_t, index)
+ __field(unsigned long, nr_to_read)
+ __field(unsigned long, lookahead_size)
+ ),
+
+ TP_fast_assign(
+ __entry->i_ino = inode->i_ino;
+ __entry->s_dev = inode->i_sb->s_dev;
+ __entry->index = index;
+ __entry->nr_to_read = nr_to_read;
+ __entry->lookahead_size = lookahead_size;
+ ),
+
+ TP_printk(
+ "dev=%d:%d ino=%lx index=%lu nr_to_read=%lu lookahead_size=%lu",
+ MAJOR(__entry->s_dev), MINOR(__entry->s_dev), __entry->i_ino,
+ __entry->index, __entry->nr_to_read, __entry->lookahead_size
+ )
+);
+
+TRACE_EVENT(page_cache_ra_order,
+ TP_PROTO(struct inode *inode, pgoff_t index, struct file_ra_state *ra),
+
+ TP_ARGS(inode, index, ra),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, i_ino)
+ __field(dev_t, s_dev)
+ __field(pgoff_t, index)
+ __field(unsigned int, order)
+ __field(unsigned int, size)
+ __field(unsigned int, async_size)
+ __field(unsigned int, ra_pages)
+ ),
+
+ TP_fast_assign(
+ __entry->i_ino = inode->i_ino;
+ __entry->s_dev = inode->i_sb->s_dev;
+ __entry->index = index;
+ __entry->order = ra->order;
+ __entry->size = ra->size;
+ __entry->async_size = ra->async_size;
+ __entry->ra_pages = ra->ra_pages;
+ ),
+
+ TP_printk(
+ "dev=%d:%d ino=%lx index=%lu order=%u size=%u async_size=%u ra_pages=%u",
+ MAJOR(__entry->s_dev), MINOR(__entry->s_dev), __entry->i_ino,
+ __entry->index, __entry->order, __entry->size,
+ __entry->async_size, __entry->ra_pages
+ )
+);
+
+DECLARE_EVENT_CLASS(page_cache_ra_op,
+ TP_PROTO(struct inode *inode, pgoff_t index, struct file_ra_state *ra,
+ unsigned long req_count),
+
+ TP_ARGS(inode, index, ra, req_count),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, i_ino)
+ __field(dev_t, s_dev)
+ __field(pgoff_t, index)
+ __field(unsigned int, order)
+ __field(unsigned int, size)
+ __field(unsigned int, async_size)
+ __field(unsigned int, ra_pages)
+ __field(unsigned int, mmap_miss)
+ __field(loff_t, prev_pos)
+ __field(unsigned long, req_count)
+ ),
+
+ TP_fast_assign(
+ __entry->i_ino = inode->i_ino;
+ __entry->s_dev = inode->i_sb->s_dev;
+ __entry->index = index;
+ __entry->order = ra->order;
+ __entry->size = ra->size;
+ __entry->async_size = ra->async_size;
+ __entry->ra_pages = ra->ra_pages;
+ __entry->mmap_miss = ra->mmap_miss;
+ __entry->prev_pos = ra->prev_pos;
+ __entry->req_count = req_count;
+ ),
+
+ TP_printk(
+ "dev=%d:%d ino=%lx index=%lu req_count=%lu order=%u size=%u async_size=%u ra_pages=%u mmap_miss=%u prev_pos=%lld",
+ MAJOR(__entry->s_dev), MINOR(__entry->s_dev), __entry->i_ino,
+ __entry->index, __entry->req_count, __entry->order,
+ __entry->size, __entry->async_size, __entry->ra_pages,
+ __entry->mmap_miss, __entry->prev_pos
+ )
+);
+
+DEFINE_EVENT(page_cache_ra_op, page_cache_sync_ra,
+ TP_PROTO(struct inode *inode, pgoff_t index, struct file_ra_state *ra,
+ unsigned long req_count),
+ TP_ARGS(inode, index, ra, req_count)
+);
+
+DEFINE_EVENT(page_cache_ra_op, page_cache_async_ra,
+ TP_PROTO(struct inode *inode, pgoff_t index, struct file_ra_state *ra,
+ unsigned long req_count),
+ TP_ARGS(inode, index, ra, req_count)
+);
+
+#endif /* _TRACE_FILEMAP_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/regulator.h b/include/trace/events/regulator.h
index 72b3ba93b0a5..c58481a5d955 100644
--- a/include/trace/events/regulator.h
+++ b/include/trace/events/regulator.h
@@ -23,7 +23,7 @@ DECLARE_EVENT_CLASS(regulator_basic,
),
TP_fast_assign(
- __assign_str(name, name);
+ __assign_str(name);
),
TP_printk("name=%s", __get_str(name))
@@ -119,7 +119,7 @@ DECLARE_EVENT_CLASS(regulator_range,
),
TP_fast_assign(
- __assign_str(name, name);
+ __assign_str(name);
__entry->min = min;
__entry->max = max;
),
@@ -152,7 +152,7 @@ DECLARE_EVENT_CLASS(regulator_value,
),
TP_fast_assign(
- __assign_str(name, name);
+ __assign_str(name);
__entry->val = val;
),
diff --git a/include/trace/events/rpcgss.h b/include/trace/events/rpcgss.h
index 3f121eed369e..8aeae06cf434 100644
--- a/include/trace/events/rpcgss.h
+++ b/include/trace/events/rpcgss.h
@@ -54,7 +54,7 @@ TRACE_DEFINE_ENUM(GSS_S_UNSEQ_TOKEN);
TRACE_DEFINE_ENUM(GSS_S_GAP_TOKEN);
#define show_gss_status(x) \
- __print_flags(x, "|", \
+ __print_symbolic(x, \
{ GSS_S_BAD_MECH, "GSS_S_BAD_MECH" }, \
{ GSS_S_BAD_NAME, "GSS_S_BAD_NAME" }, \
{ GSS_S_BAD_NAMETYPE, "GSS_S_BAD_NAMETYPE" }, \
@@ -154,7 +154,7 @@ DECLARE_EVENT_CLASS(rpcgss_ctx_class,
TP_fast_assign(
__entry->cred = gc;
__entry->service = gc->gc_service;
- __assign_str(principal, gc->gc_principal);
+ __assign_str(principal);
),
TP_printk("cred=%p service=%s principal='%s'",
@@ -189,7 +189,7 @@ DECLARE_EVENT_CLASS(rpcgss_svc_gssapi_class,
TP_fast_assign(
__entry->xid = __be32_to_cpu(rqstp->rq_xid);
__entry->maj_stat = maj_stat;
- __assign_str(addr, rqstp->rq_xprt->xpt_remotebuf);
+ __assign_str(addr);
),
TP_printk("addr=%s xid=0x%08x maj_stat=%s",
@@ -206,8 +206,30 @@ DECLARE_EVENT_CLASS(rpcgss_svc_gssapi_class,
), \
TP_ARGS(rqstp, maj_stat))
+DEFINE_SVC_GSSAPI_EVENT(wrap);
DEFINE_SVC_GSSAPI_EVENT(unwrap);
DEFINE_SVC_GSSAPI_EVENT(mic);
+DEFINE_SVC_GSSAPI_EVENT(get_mic);
+
+TRACE_EVENT(rpcgss_svc_wrap_failed,
+ TP_PROTO(
+ const struct svc_rqst *rqstp
+ ),
+
+ TP_ARGS(rqstp),
+
+ TP_STRUCT__entry(
+ __field(u32, xid)
+ __string(addr, rqstp->rq_xprt->xpt_remotebuf)
+ ),
+
+ TP_fast_assign(
+ __entry->xid = be32_to_cpu(rqstp->rq_xid);
+ __assign_str(addr);
+ ),
+
+ TP_printk("addr=%s xid=0x%08x", __get_str(addr), __entry->xid)
+);
TRACE_EVENT(rpcgss_svc_unwrap_failed,
TP_PROTO(
@@ -223,7 +245,7 @@ TRACE_EVENT(rpcgss_svc_unwrap_failed,
TP_fast_assign(
__entry->xid = be32_to_cpu(rqstp->rq_xid);
- __assign_str(addr, rqstp->rq_xprt->xpt_remotebuf);
+ __assign_str(addr);
),
TP_printk("addr=%s xid=0x%08x", __get_str(addr), __entry->xid)
@@ -249,7 +271,7 @@ TRACE_EVENT(rpcgss_svc_seqno_bad,
__entry->expected = expected;
__entry->received = received;
__entry->xid = __be32_to_cpu(rqstp->rq_xid);
- __assign_str(addr, rqstp->rq_xprt->xpt_remotebuf);
+ __assign_str(addr);
),
TP_printk("addr=%s xid=0x%08x expected seqno %u, received seqno %u",
@@ -277,7 +299,7 @@ TRACE_EVENT(rpcgss_svc_accept_upcall,
__entry->minor_status = minor_status;
__entry->major_status = major_status;
__entry->xid = be32_to_cpu(rqstp->rq_xid);
- __assign_str(addr, rqstp->rq_xprt->xpt_remotebuf);
+ __assign_str(addr);
),
TP_printk("addr=%s xid=0x%08x major_status=%s (0x%08lx) minor_status=%u",
@@ -305,7 +327,7 @@ TRACE_EVENT(rpcgss_svc_authenticate,
TP_fast_assign(
__entry->xid = be32_to_cpu(rqstp->rq_xid);
__entry->seqno = gc->gc_seq;
- __assign_str(addr, rqstp->rq_xprt->xpt_remotebuf);
+ __assign_str(addr);
),
TP_printk("addr=%s xid=0x%08x seqno=%u", __get_str(addr),
@@ -387,7 +409,7 @@ TRACE_EVENT(rpcgss_seqno,
__entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client->cl_clid;
__entry->xid = be32_to_cpu(rqst->rq_xid);
- __entry->seqno = rqst->rq_seqno;
+ __entry->seqno = *rqst->rq_seqnos;
),
TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x seqno=%u",
@@ -418,7 +440,7 @@ TRACE_EVENT(rpcgss_need_reencode,
__entry->client_id = task->tk_client->cl_clid;
__entry->xid = be32_to_cpu(task->tk_rqstp->rq_xid);
__entry->seq_xmit = seq_xmit;
- __entry->seqno = task->tk_rqstp->rq_seqno;
+ __entry->seqno = *task->tk_rqstp->rq_seqnos;
__entry->ret = ret;
),
@@ -541,7 +563,7 @@ TRACE_EVENT(rpcgss_upcall_msg,
),
TP_fast_assign(
- __assign_str(msg, buf);
+ __assign_str(msg);
),
TP_printk("msg='%s'", __get_str(msg))
@@ -587,7 +609,7 @@ TRACE_EVENT(rpcgss_context,
__field(unsigned int, timeout)
__field(u32, window_size)
__field(int, len)
- __string(acceptor, data)
+ __string_len(acceptor, data, len)
),
TP_fast_assign(
@@ -596,7 +618,7 @@ TRACE_EVENT(rpcgss_context,
__entry->timeout = timeout;
__entry->window_size = window_size;
__entry->len = len;
- strncpy(__get_str(acceptor), data, len);
+ __assign_str(acceptor);
),
TP_printk("win_size=%u expiry=%lu now=%lu timeout=%u acceptor=%.*s",
@@ -655,7 +677,7 @@ TRACE_EVENT(rpcgss_oid_to_mech,
),
TP_fast_assign(
- __assign_str(oid, oid);
+ __assign_str(oid);
),
TP_printk("mech for oid %s was not found", __get_str(oid))
diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h
index 8f461e04e5f0..e6a72646c507 100644
--- a/include/trace/events/rpcrdma.h
+++ b/include/trace/events/rpcrdma.h
@@ -22,47 +22,37 @@
** Event classes
**/
-DECLARE_EVENT_CLASS(rpcrdma_completion_class,
+DECLARE_EVENT_CLASS(rpcrdma_simple_cid_class,
TP_PROTO(
- const struct ib_wc *wc,
const struct rpc_rdma_cid *cid
),
- TP_ARGS(wc, cid),
+ TP_ARGS(cid),
TP_STRUCT__entry(
__field(u32, cq_id)
__field(int, completion_id)
- __field(unsigned long, status)
- __field(unsigned int, vendor_err)
),
TP_fast_assign(
__entry->cq_id = cid->ci_queue_id;
__entry->completion_id = cid->ci_completion_id;
- __entry->status = wc->status;
- if (wc->status)
- __entry->vendor_err = wc->vendor_err;
- else
- __entry->vendor_err = 0;
),
- TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)",
- __entry->cq_id, __entry->completion_id,
- rdma_show_wc_status(__entry->status),
- __entry->status, __entry->vendor_err
+ TP_printk("cq.id=%d cid=%d",
+ __entry->cq_id, __entry->completion_id
)
);
-#define DEFINE_COMPLETION_EVENT(name) \
- DEFINE_EVENT(rpcrdma_completion_class, name, \
+#define DEFINE_SIMPLE_CID_EVENT(name) \
+ DEFINE_EVENT(rpcrdma_simple_cid_class, name, \
TP_PROTO( \
- const struct ib_wc *wc, \
const struct rpc_rdma_cid *cid \
), \
- TP_ARGS(wc, cid))
+ TP_ARGS(cid) \
+ )
-DECLARE_EVENT_CLASS(rpcrdma_send_completion_class,
+DECLARE_EVENT_CLASS(rpcrdma_completion_class,
TP_PROTO(
const struct ib_wc *wc,
const struct rpc_rdma_cid *cid
@@ -73,20 +63,29 @@ DECLARE_EVENT_CLASS(rpcrdma_send_completion_class,
TP_STRUCT__entry(
__field(u32, cq_id)
__field(int, completion_id)
+ __field(unsigned long, status)
+ __field(unsigned int, vendor_err)
),
TP_fast_assign(
__entry->cq_id = cid->ci_queue_id;
__entry->completion_id = cid->ci_completion_id;
+ __entry->status = wc->status;
+ if (wc->status)
+ __entry->vendor_err = wc->vendor_err;
+ else
+ __entry->vendor_err = 0;
),
- TP_printk("cq.id=%u cid=%d",
- __entry->cq_id, __entry->completion_id
+ TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)",
+ __entry->cq_id, __entry->completion_id,
+ rdma_show_wc_status(__entry->status),
+ __entry->status, __entry->vendor_err
)
);
-#define DEFINE_SEND_COMPLETION_EVENT(name) \
- DEFINE_EVENT(rpcrdma_send_completion_class, name, \
+#define DEFINE_COMPLETION_EVENT(name) \
+ DEFINE_EVENT(rpcrdma_completion_class, name, \
TP_PROTO( \
const struct ib_wc *wc, \
const struct rpc_rdma_cid *cid \
@@ -305,8 +304,8 @@ DECLARE_EVENT_CLASS(xprtrdma_reply_class,
__entry->xid = be32_to_cpu(rep->rr_xid);
__entry->version = be32_to_cpu(rep->rr_vers);
__entry->proc = be32_to_cpu(rep->rr_proc);
- __assign_str(addr, rpcrdma_addrstr(rep->rr_rxprt));
- __assign_str(port, rpcrdma_portstr(rep->rr_rxprt));
+ __assign_str(addr);
+ __assign_str(port);
),
TP_printk("peer=[%s]:%s xid=0x%08x version=%u proc=%u",
@@ -336,8 +335,8 @@ DECLARE_EVENT_CLASS(xprtrdma_rxprt,
),
TP_fast_assign(
- __assign_str(addr, rpcrdma_addrstr(r_xprt));
- __assign_str(port, rpcrdma_portstr(r_xprt));
+ __assign_str(addr);
+ __assign_str(port);
),
TP_printk("peer=[%s]:%s",
@@ -370,8 +369,8 @@ DECLARE_EVENT_CLASS(xprtrdma_connect_class,
TP_fast_assign(
__entry->rc = rc;
__entry->connect_status = r_xprt->rx_ep->re_connect_status;
- __assign_str(addr, rpcrdma_addrstr(r_xprt));
- __assign_str(port, rpcrdma_portstr(r_xprt));
+ __assign_str(addr);
+ __assign_str(port);
),
TP_printk("peer=[%s]:%s rc=%d connection status=%d",
@@ -609,8 +608,8 @@ DECLARE_EVENT_CLASS(xprtrdma_callback_class,
TP_fast_assign(
__entry->xid = be32_to_cpu(rqst->rq_xid);
- __assign_str(addr, rpcrdma_addrstr(r_xprt));
- __assign_str(port, rpcrdma_portstr(r_xprt));
+ __assign_str(addr);
+ __assign_str(port);
),
TP_printk("peer=[%s]:%s xid=0x%08x",
@@ -670,6 +669,29 @@ TRACE_EVENT(xprtrdma_inline_thresh,
DEFINE_CONN_EVENT(connect);
DEFINE_CONN_EVENT(disconnect);
+TRACE_EVENT(xprtrdma_device_removal,
+ TP_PROTO(
+ const struct rdma_cm_id *id
+ ),
+
+ TP_ARGS(id),
+
+ TP_STRUCT__entry(
+ __string(name, id->device->name)
+ __array(unsigned char, addr, sizeof(struct sockaddr_in6))
+ ),
+
+ TP_fast_assign(
+ __assign_str(name);
+ memcpy(__entry->addr, &id->route.addr.dst_addr,
+ sizeof(struct sockaddr_in6));
+ ),
+
+ TP_printk("device %s to be removed, disconnecting %pISpc\n",
+ __get_str(name), __entry->addr
+ )
+);
+
DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc);
TRACE_EVENT(xprtrdma_op_connect,
@@ -688,8 +710,8 @@ TRACE_EVENT(xprtrdma_op_connect,
TP_fast_assign(
__entry->delay = delay;
- __assign_str(addr, rpcrdma_addrstr(r_xprt));
- __assign_str(port, rpcrdma_portstr(r_xprt));
+ __assign_str(addr);
+ __assign_str(port);
),
TP_printk("peer=[%s]:%s delay=%lu",
@@ -717,8 +739,8 @@ TRACE_EVENT(xprtrdma_op_set_cto,
TP_fast_assign(
__entry->connect = connect;
__entry->reconnect = reconnect;
- __assign_str(addr, rpcrdma_addrstr(r_xprt));
- __assign_str(port, rpcrdma_portstr(r_xprt));
+ __assign_str(addr);
+ __assign_str(port);
),
TP_printk("peer=[%s]:%s connect=%lu reconnect=%lu",
@@ -747,8 +769,8 @@ TRACE_EVENT(xprtrdma_createmrs,
TP_fast_assign(
__entry->count = count;
- __assign_str(addr, rpcrdma_addrstr(r_xprt));
- __assign_str(port, rpcrdma_portstr(r_xprt));
+ __assign_str(addr);
+ __assign_str(port);
),
TP_printk("peer=[%s]:%s created %u MRs",
@@ -776,8 +798,8 @@ TRACE_EVENT(xprtrdma_nomrs_err,
__entry->task_id = rqst->rq_task->tk_pid;
__entry->client_id = rqst->rq_task->tk_client->cl_clid;
- __assign_str(addr, rpcrdma_addrstr(r_xprt));
- __assign_str(port, rpcrdma_portstr(r_xprt));
+ __assign_str(addr);
+ __assign_str(port);
),
TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " peer=[%s]:%s",
@@ -978,27 +1000,7 @@ TRACE_EVENT(xprtrdma_post_send_err,
)
);
-TRACE_EVENT(xprtrdma_post_recv,
- TP_PROTO(
- const struct rpcrdma_rep *rep
- ),
-
- TP_ARGS(rep),
-
- TP_STRUCT__entry(
- __field(u32, cq_id)
- __field(int, completion_id)
- ),
-
- TP_fast_assign(
- __entry->cq_id = rep->rr_cid.ci_queue_id;
- __entry->completion_id = rep->rr_cid.ci_completion_id;
- ),
-
- TP_printk("cq.id=%d cid=%d",
- __entry->cq_id, __entry->completion_id
- )
-);
+DEFINE_SIMPLE_CID_EVENT(xprtrdma_post_recv);
TRACE_EVENT(xprtrdma_post_recvs,
TP_PROTO(
@@ -1022,8 +1024,8 @@ TRACE_EVENT(xprtrdma_post_recvs,
__entry->cq_id = ep->re_attr.recv_cq->res.id;
__entry->count = count;
__entry->posted = ep->re_receive_count;
- __assign_str(addr, rpcrdma_addrstr(r_xprt));
- __assign_str(port, rpcrdma_portstr(r_xprt));
+ __assign_str(addr);
+ __assign_str(port);
),
TP_printk("peer=[%s]:%s cq.id=%d %u new recvs, %d active",
@@ -1052,8 +1054,8 @@ TRACE_EVENT(xprtrdma_post_recvs_err,
__entry->cq_id = ep->re_attr.recv_cq->res.id;
__entry->status = status;
- __assign_str(addr, rpcrdma_addrstr(r_xprt));
- __assign_str(port, rpcrdma_portstr(r_xprt));
+ __assign_str(addr);
+ __assign_str(port);
),
TP_printk("peer=[%s]:%s cq.id=%d rc=%d",
@@ -1466,8 +1468,8 @@ TRACE_EVENT(xprtrdma_cb_setup,
TP_fast_assign(
__entry->reqs = reqs;
- __assign_str(addr, rpcrdma_addrstr(r_xprt));
- __assign_str(port, rpcrdma_portstr(r_xprt));
+ __assign_str(addr);
+ __assign_str(port);
),
TP_printk("peer=[%s]:%s %u reqs",
@@ -1497,7 +1499,7 @@ DECLARE_EVENT_CLASS(svcrdma_accept_class,
TP_fast_assign(
__entry->status = status;
- __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
+ __assign_str(addr);
),
TP_printk("addr=%s status=%ld",
@@ -1667,7 +1669,7 @@ TRACE_EVENT(svcrdma_encode_wseg,
__entry->offset = offset;
),
- TP_printk("cq_id=%u cid=%d segno=%u %u@0x%016llx:0x%08x",
+ TP_printk("cq.id=%u cid=%d segno=%u %u@0x%016llx:0x%08x",
__entry->cq_id, __entry->completion_id,
__entry->segno, __entry->length,
(unsigned long long)__entry->offset, __entry->handle
@@ -1703,7 +1705,7 @@ TRACE_EVENT(svcrdma_decode_rseg,
__entry->offset = segment->rs_offset;
),
- TP_printk("cq_id=%u cid=%d segno=%u position=%u %u@0x%016llx:0x%08x",
+ TP_printk("cq.id=%u cid=%d segno=%u position=%u %u@0x%016llx:0x%08x",
__entry->cq_id, __entry->completion_id,
__entry->segno, __entry->position, __entry->length,
(unsigned long long)__entry->offset, __entry->handle
@@ -1740,7 +1742,7 @@ TRACE_EVENT(svcrdma_decode_wseg,
__entry->offset = segment->rs_offset;
),
- TP_printk("cq_id=%u cid=%d segno=%u %u@0x%016llx:0x%08x",
+ TP_printk("cq.id=%u cid=%d segno=%u %u@0x%016llx:0x%08x",
__entry->cq_id, __entry->completion_id,
__entry->segno, __entry->length,
(unsigned long long)__entry->offset, __entry->handle
@@ -1783,29 +1785,29 @@ DEFINE_ERROR_EVENT(chunk);
DECLARE_EVENT_CLASS(svcrdma_dma_map_class,
TP_PROTO(
- const struct svcxprt_rdma *rdma,
+ const struct rpc_rdma_cid *cid,
u64 dma_addr,
u32 length
),
- TP_ARGS(rdma, dma_addr, length),
+ TP_ARGS(cid, dma_addr, length),
TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, completion_id)
__field(u64, dma_addr)
__field(u32, length)
- __string(device, rdma->sc_cm_id->device->name)
- __string(addr, rdma->sc_xprt.xpt_remotebuf)
),
TP_fast_assign(
+ __entry->cq_id = cid->ci_queue_id;
+ __entry->completion_id = cid->ci_completion_id;
__entry->dma_addr = dma_addr;
__entry->length = length;
- __assign_str(device, rdma->sc_cm_id->device->name);
- __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
),
- TP_printk("addr=%s device=%s dma_addr=%llu length=%u",
- __get_str(addr), __get_str(device),
+ TP_printk("cq.id=%u cid=%d dma_addr=%llu length=%u",
+ __entry->cq_id, __entry->completion_id,
__entry->dma_addr, __entry->length
)
);
@@ -1813,11 +1815,12 @@ DECLARE_EVENT_CLASS(svcrdma_dma_map_class,
#define DEFINE_SVC_DMA_EVENT(name) \
DEFINE_EVENT(svcrdma_dma_map_class, svcrdma_##name, \
TP_PROTO( \
- const struct svcxprt_rdma *rdma,\
+ const struct rpc_rdma_cid *cid, \
u64 dma_addr, \
u32 length \
), \
- TP_ARGS(rdma, dma_addr, length))
+ TP_ARGS(cid, dma_addr, length) \
+ )
DEFINE_SVC_DMA_EVENT(dma_map_page);
DEFINE_SVC_DMA_EVENT(dma_map_err);
@@ -1826,33 +1829,37 @@ DEFINE_SVC_DMA_EVENT(dma_unmap_page);
TRACE_EVENT(svcrdma_dma_map_rw_err,
TP_PROTO(
const struct svcxprt_rdma *rdma,
+ u64 offset,
+ u32 handle,
unsigned int nents,
int status
),
- TP_ARGS(rdma, nents, status),
+ TP_ARGS(rdma, offset, handle, nents, status),
TP_STRUCT__entry(
- __field(int, status)
+ __field(u32, cq_id)
+ __field(u32, handle)
+ __field(u64, offset)
__field(unsigned int, nents)
- __string(device, rdma->sc_cm_id->device->name)
- __string(addr, rdma->sc_xprt.xpt_remotebuf)
+ __field(int, status)
),
TP_fast_assign(
- __entry->status = status;
+ __entry->cq_id = rdma->sc_sq_cq->res.id;
+ __entry->handle = handle;
+ __entry->offset = offset;
__entry->nents = nents;
- __assign_str(device, rdma->sc_cm_id->device->name);
- __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
+ __entry->status = status;
),
- TP_printk("addr=%s device=%s nents=%u status=%d",
- __get_str(addr), __get_str(device), __entry->nents,
- __entry->status
+ TP_printk("cq.id=%u 0x%016llx:0x%08x nents=%u status=%d",
+ __entry->cq_id, (unsigned long long)__entry->offset,
+ __entry->handle, __entry->nents, __entry->status
)
);
-TRACE_EVENT(svcrdma_no_rwctx_err,
+TRACE_EVENT(svcrdma_rwctx_empty,
TP_PROTO(
const struct svcxprt_rdma *rdma,
unsigned int num_sges
@@ -1861,79 +1868,75 @@ TRACE_EVENT(svcrdma_no_rwctx_err,
TP_ARGS(rdma, num_sges),
TP_STRUCT__entry(
+ __field(u32, cq_id)
__field(unsigned int, num_sges)
- __string(device, rdma->sc_cm_id->device->name)
- __string(addr, rdma->sc_xprt.xpt_remotebuf)
),
TP_fast_assign(
+ __entry->cq_id = rdma->sc_sq_cq->res.id;
__entry->num_sges = num_sges;
- __assign_str(device, rdma->sc_cm_id->device->name);
- __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
),
- TP_printk("addr=%s device=%s num_sges=%d",
- __get_str(addr), __get_str(device), __entry->num_sges
+ TP_printk("cq.id=%u num_sges=%d",
+ __entry->cq_id, __entry->num_sges
)
);
TRACE_EVENT(svcrdma_page_overrun_err,
TP_PROTO(
- const struct svcxprt_rdma *rdma,
- const struct svc_rqst *rqst,
+ const struct rpc_rdma_cid *cid,
unsigned int pageno
),
- TP_ARGS(rdma, rqst, pageno),
+ TP_ARGS(cid, pageno),
TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, completion_id)
__field(unsigned int, pageno)
- __field(u32, xid)
- __string(device, rdma->sc_cm_id->device->name)
- __string(addr, rdma->sc_xprt.xpt_remotebuf)
),
TP_fast_assign(
+ __entry->cq_id = cid->ci_queue_id;
+ __entry->completion_id = cid->ci_completion_id;
__entry->pageno = pageno;
- __entry->xid = __be32_to_cpu(rqst->rq_xid);
- __assign_str(device, rdma->sc_cm_id->device->name);
- __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
),
- TP_printk("addr=%s device=%s xid=0x%08x pageno=%u", __get_str(addr),
- __get_str(device), __entry->xid, __entry->pageno
+ TP_printk("cq.id=%u cid=%d pageno=%u",
+ __entry->cq_id, __entry->completion_id,
+ __entry->pageno
)
);
TRACE_EVENT(svcrdma_small_wrch_err,
TP_PROTO(
- const struct svcxprt_rdma *rdma,
+ const struct rpc_rdma_cid *cid,
unsigned int remaining,
unsigned int seg_no,
unsigned int num_segs
),
- TP_ARGS(rdma, remaining, seg_no, num_segs),
+ TP_ARGS(cid, remaining, seg_no, num_segs),
TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, completion_id)
__field(unsigned int, remaining)
__field(unsigned int, seg_no)
__field(unsigned int, num_segs)
- __string(device, rdma->sc_cm_id->device->name)
- __string(addr, rdma->sc_xprt.xpt_remotebuf)
),
TP_fast_assign(
+ __entry->cq_id = cid->ci_queue_id;
+ __entry->completion_id = cid->ci_completion_id;
__entry->remaining = remaining;
__entry->seg_no = seg_no;
__entry->num_segs = num_segs;
- __assign_str(device, rdma->sc_cm_id->device->name);
- __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
),
- TP_printk("addr=%s device=%s remaining=%u seg_no=%u num_segs=%u",
- __get_str(addr), __get_str(device), __entry->remaining,
- __entry->seg_no, __entry->num_segs
+ TP_printk("cq.id=%u cid=%d remaining=%u seg_no=%u num_segs=%u",
+ __entry->cq_id, __entry->completion_id,
+ __entry->remaining, __entry->seg_no, __entry->num_segs
)
);
@@ -1959,7 +1962,7 @@ TRACE_EVENT(svcrdma_send_pullup,
__entry->msglen = msglen;
),
- TP_printk("cq_id=%u cid=%d hdr=%u msg=%u (total %u)",
+ TP_printk("cq.id=%u cid=%d hdr=%u msg=%u (total %u)",
__entry->cq_id, __entry->completion_id,
__entry->hdrlen, __entry->msglen,
__entry->hdrlen + __entry->msglen)
@@ -1982,7 +1985,7 @@ TRACE_EVENT(svcrdma_send_err,
TP_fast_assign(
__entry->status = status;
__entry->xid = __be32_to_cpu(rqst->rq_xid);
- __assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
+ __assign_str(addr);
),
TP_printk("addr=%s xid=0x%08x status=%d", __get_str(addr),
@@ -2014,37 +2017,17 @@ TRACE_EVENT(svcrdma_post_send,
wr->ex.invalidate_rkey : 0;
),
- TP_printk("cq_id=%u cid=%d num_sge=%u inv_rkey=0x%08x",
+ TP_printk("cq.id=%u cid=%d num_sge=%u inv_rkey=0x%08x",
__entry->cq_id, __entry->completion_id,
__entry->num_sge, __entry->inv_rkey
)
);
-DEFINE_SEND_COMPLETION_EVENT(svcrdma_wc_send);
+DEFINE_SIMPLE_CID_EVENT(svcrdma_wc_send);
DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_send_flush);
DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_send_err);
-TRACE_EVENT(svcrdma_post_recv,
- TP_PROTO(
- const struct svc_rdma_recv_ctxt *ctxt
- ),
-
- TP_ARGS(ctxt),
-
- TP_STRUCT__entry(
- __field(u32, cq_id)
- __field(int, completion_id)
- ),
-
- TP_fast_assign(
- __entry->cq_id = ctxt->rc_cid.ci_queue_id;
- __entry->completion_id = ctxt->rc_cid.ci_completion_id;
- ),
-
- TP_printk("cq.id=%d cid=%d",
- __entry->cq_id, __entry->completion_id
- )
-);
+DEFINE_SIMPLE_CID_EVENT(svcrdma_post_recv);
DEFINE_RECEIVE_SUCCESS_EVENT(svcrdma_wc_recv);
DEFINE_RECEIVE_FLUSH_EVENT(svcrdma_wc_recv_flush);
@@ -2065,7 +2048,7 @@ TRACE_EVENT(svcrdma_rq_post_err,
TP_fast_assign(
__entry->status = status;
- __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
+ __assign_str(addr);
),
TP_printk("addr=%s status=%d",
@@ -2112,6 +2095,14 @@ DEFINE_POST_CHUNK_EVENT(read);
DEFINE_POST_CHUNK_EVENT(write);
DEFINE_POST_CHUNK_EVENT(reply);
+DEFINE_EVENT(svcrdma_post_chunk_class, svcrdma_cc_release,
+ TP_PROTO(
+ const struct rpc_rdma_cid *cid,
+ int sqecount
+ ),
+ TP_ARGS(cid, sqecount)
+);
+
TRACE_EVENT(svcrdma_wc_read,
TP_PROTO(
const struct ib_wc *wc,
@@ -2144,11 +2135,16 @@ TRACE_EVENT(svcrdma_wc_read,
DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_read_flush);
DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_read_err);
+DEFINE_SIMPLE_CID_EVENT(svcrdma_read_finished);
-DEFINE_SEND_COMPLETION_EVENT(svcrdma_wc_write);
+DEFINE_SIMPLE_CID_EVENT(svcrdma_wc_write);
DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_write_flush);
DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_write_err);
+DEFINE_SIMPLE_CID_EVENT(svcrdma_wc_reply);
+DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_reply_flush);
+DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_reply_err);
+
TRACE_EVENT(svcrdma_qp_error,
TP_PROTO(
const struct ib_event *event,
@@ -2165,7 +2161,7 @@ TRACE_EVENT(svcrdma_qp_error,
TP_fast_assign(
__entry->event = event->event;
- __assign_str(device, event->device->name);
+ __assign_str(device);
snprintf(__entry->addr, sizeof(__entry->addr) - 1,
"%pISpc", sap);
),
@@ -2176,68 +2172,170 @@ TRACE_EVENT(svcrdma_qp_error,
)
);
-DECLARE_EVENT_CLASS(svcrdma_sendqueue_event,
+TRACE_EVENT(svcrdma_device_removal,
TP_PROTO(
- const struct svcxprt_rdma *rdma
+ const struct rdma_cm_id *id
),
- TP_ARGS(rdma),
+ TP_ARGS(id),
TP_STRUCT__entry(
+ __string(name, id->device->name)
+ __array(unsigned char, addr, sizeof(struct sockaddr_in6))
+ ),
+
+ TP_fast_assign(
+ __assign_str(name);
+ memcpy(__entry->addr, &id->route.addr.dst_addr,
+ sizeof(struct sockaddr_in6));
+ ),
+
+ TP_printk("device %s to be removed, disconnecting %pISpc\n",
+ __get_str(name), __entry->addr
+ )
+);
+
+DECLARE_EVENT_CLASS(svcrdma_sendqueue_class,
+ TP_PROTO(
+ const struct svcxprt_rdma *rdma,
+ const struct rpc_rdma_cid *cid
+ ),
+
+ TP_ARGS(rdma, cid),
+
+ TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, completion_id)
__field(int, avail)
__field(int, depth)
- __string(addr, rdma->sc_xprt.xpt_remotebuf)
),
TP_fast_assign(
+ __entry->cq_id = cid->ci_queue_id;
+ __entry->completion_id = cid->ci_completion_id;
__entry->avail = atomic_read(&rdma->sc_sq_avail);
__entry->depth = rdma->sc_sq_depth;
- __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
),
- TP_printk("addr=%s sc_sq_avail=%d/%d",
- __get_str(addr), __entry->avail, __entry->depth
+ TP_printk("cq.id=%u cid=%d sc_sq_avail=%d/%d",
+ __entry->cq_id, __entry->completion_id,
+ __entry->avail, __entry->depth
)
);
#define DEFINE_SQ_EVENT(name) \
- DEFINE_EVENT(svcrdma_sendqueue_event, svcrdma_sq_##name,\
- TP_PROTO( \
- const struct svcxprt_rdma *rdma \
- ), \
- TP_ARGS(rdma))
+ DEFINE_EVENT(svcrdma_sendqueue_class, name, \
+ TP_PROTO( \
+ const struct svcxprt_rdma *rdma, \
+ const struct rpc_rdma_cid *cid \
+ ), \
+ TP_ARGS(rdma, cid) \
+ )
-DEFINE_SQ_EVENT(full);
-DEFINE_SQ_EVENT(retry);
+DEFINE_SQ_EVENT(svcrdma_sq_full);
+DEFINE_SQ_EVENT(svcrdma_sq_retry);
TRACE_EVENT(svcrdma_sq_post_err,
TP_PROTO(
const struct svcxprt_rdma *rdma,
+ const struct rpc_rdma_cid *cid,
int status
),
- TP_ARGS(rdma, status),
+ TP_ARGS(rdma, cid, status),
TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, completion_id)
__field(int, avail)
__field(int, depth)
__field(int, status)
- __string(addr, rdma->sc_xprt.xpt_remotebuf)
),
TP_fast_assign(
+ __entry->cq_id = cid->ci_queue_id;
+ __entry->completion_id = cid->ci_completion_id;
__entry->avail = atomic_read(&rdma->sc_sq_avail);
__entry->depth = rdma->sc_sq_depth;
__entry->status = status;
- __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
),
- TP_printk("addr=%s sc_sq_avail=%d/%d status=%d",
- __get_str(addr), __entry->avail, __entry->depth,
- __entry->status
+ TP_printk("cq.id=%u cid=%d sc_sq_avail=%d/%d status=%d",
+ __entry->cq_id, __entry->completion_id,
+ __entry->avail, __entry->depth, __entry->status
)
);
+DECLARE_EVENT_CLASS(rpcrdma_client_device_class,
+ TP_PROTO(
+ const struct ib_device *device
+ ),
+
+ TP_ARGS(device),
+
+ TP_STRUCT__entry(
+ __string(name, device->name)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name);
+ ),
+
+ TP_printk("device=%s",
+ __get_str(name)
+ )
+);
+
+#define DEFINE_CLIENT_DEVICE_EVENT(name) \
+ DEFINE_EVENT(rpcrdma_client_device_class, name, \
+ TP_PROTO( \
+ const struct ib_device *device \
+ ), \
+ TP_ARGS(device) \
+ )
+
+DEFINE_CLIENT_DEVICE_EVENT(rpcrdma_client_completion);
+DEFINE_CLIENT_DEVICE_EVENT(rpcrdma_client_add_one);
+DEFINE_CLIENT_DEVICE_EVENT(rpcrdma_client_remove_one);
+DEFINE_CLIENT_DEVICE_EVENT(rpcrdma_client_wait_on);
+DEFINE_CLIENT_DEVICE_EVENT(rpcrdma_client_remove_one_done);
+
+DECLARE_EVENT_CLASS(rpcrdma_client_register_class,
+ TP_PROTO(
+ const struct ib_device *device,
+ const struct rpcrdma_notification *rn
+ ),
+
+ TP_ARGS(device, rn),
+
+ TP_STRUCT__entry(
+ __string(name, device->name)
+ __field(void *, callback)
+ __field(u32, index)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name);
+ __entry->callback = rn->rn_done;
+ __entry->index = rn->rn_index;
+ ),
+
+ TP_printk("device=%s index=%u done callback=%pS\n",
+ __get_str(name), __entry->index, __entry->callback
+ )
+);
+
+#define DEFINE_CLIENT_REGISTER_EVENT(name) \
+ DEFINE_EVENT(rpcrdma_client_register_class, name, \
+ TP_PROTO( \
+ const struct ib_device *device, \
+ const struct rpcrdma_notification *rn \
+ ), \
+ TP_ARGS(device, rn))
+
+DEFINE_CLIENT_REGISTER_EVENT(rpcrdma_client_register);
+DEFINE_CLIENT_REGISTER_EVENT(rpcrdma_client_unregister);
+
#endif /* _TRACE_RPCRDMA_H */
#include <trace/define_trace.h>
diff --git a/include/trace/events/rpm.h b/include/trace/events/rpm.h
index 3c716214dab1..2b0b4b6ef862 100644
--- a/include/trace/events/rpm.h
+++ b/include/trace/events/rpm.h
@@ -33,7 +33,7 @@ DECLARE_EVENT_CLASS(rpm_internal,
),
TP_fast_assign(
- __assign_str(name, dev_name(dev));
+ __assign_str(name);
__entry->flags = flags;
__entry->usage_count = atomic_read(
&dev->power.usage_count);
@@ -92,7 +92,7 @@ TRACE_EVENT(rpm_return_int,
),
TP_fast_assign(
- __assign_str(name, dev_name(dev));
+ __assign_str(name);
__entry->ip = ip;
__entry->ret = ret;
),
@@ -101,6 +101,48 @@ TRACE_EVENT(rpm_return_int,
__entry->ret)
);
+#define RPM_STATUS_STRINGS \
+ EM(RPM_INVALID, "RPM_INVALID") \
+ EM(RPM_ACTIVE, "RPM_ACTIVE") \
+ EM(RPM_RESUMING, "RPM_RESUMING") \
+ EM(RPM_SUSPENDED, "RPM_SUSPENDED") \
+ EMe(RPM_SUSPENDING, "RPM_SUSPENDING")
+
+/* Enums require being exported to userspace, for user tool parsing. */
+#undef EM
+#undef EMe
+#define EM(a, b) TRACE_DEFINE_ENUM(a);
+#define EMe(a, b) TRACE_DEFINE_ENUM(a);
+
+RPM_STATUS_STRINGS
+
+/*
+ * Now redefine the EM() and EMe() macros to map the enums to the strings that
+ * will be printed in the output.
+ */
+#undef EM
+#undef EMe
+#define EM(a, b) { a, b },
+#define EMe(a, b) { a, b }
+
+TRACE_EVENT(rpm_status,
+ TP_PROTO(struct device *dev, enum rpm_status status),
+ TP_ARGS(dev, status),
+
+ TP_STRUCT__entry(
+ __string(name, dev_name(dev))
+ __field(int, status)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name);
+ __entry->status = status;
+ ),
+
+ TP_printk("%s status=%s", __get_str(name),
+ __print_symbolic(__entry->status, RPM_STATUS_STRINGS))
+);
+
#endif /* _TRACE_RUNTIME_POWER_H */
/* This part must be outside protection */
diff --git a/include/trace/events/rseq.h b/include/trace/events/rseq.h
index a04a64bc1a00..ce85d650bf4b 100644
--- a/include/trace/events/rseq.h
+++ b/include/trace/events/rseq.h
@@ -16,13 +16,18 @@ TRACE_EVENT(rseq_update,
TP_STRUCT__entry(
__field(s32, cpu_id)
+ __field(s32, node_id)
+ __field(s32, mm_cid)
),
TP_fast_assign(
- __entry->cpu_id = raw_smp_processor_id();
+ __entry->cpu_id = t->rseq.ids.cpu_id;
+ __entry->node_id = cpu_to_node(__entry->cpu_id);
+ __entry->mm_cid = t->rseq.ids.mm_cid;
),
- TP_printk("cpu_id=%d", __entry->cpu_id)
+ TP_printk("cpu_id=%d node_id=%d mm_cid=%d", __entry->cpu_id,
+ __entry->node_id, __entry->mm_cid)
);
TRACE_EVENT(rseq_ip_fixup,
diff --git a/include/trace/events/rust_sample.h b/include/trace/events/rust_sample.h
new file mode 100644
index 000000000000..dbc80ca2e465
--- /dev/null
+++ b/include/trace/events/rust_sample.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Tracepoints for `samples/rust/rust_print.rs`.
+ *
+ * Copyright (C) 2024 Google, Inc.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rust_sample
+
+#if !defined(_RUST_SAMPLE_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _RUST_SAMPLE_TRACE_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(rust_sample_loaded,
+ TP_PROTO(int magic_number),
+ TP_ARGS(magic_number),
+ TP_STRUCT__entry(
+ __field(int, magic_number)
+ ),
+ TP_fast_assign(
+ __entry->magic_number = magic_number;
+ ),
+ TP_printk("magic=%d", __entry->magic_number)
+);
+
+#endif /* _RUST_SAMPLE_TRACE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/rv.h b/include/trace/events/rv.h
deleted file mode 100644
index 56592da9301c..000000000000
--- a/include/trace/events/rv.h
+++ /dev/null
@@ -1,142 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM rv
-
-#if !defined(_TRACE_RV_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_RV_H
-
-#include <linux/rv.h>
-#include <linux/tracepoint.h>
-
-#ifdef CONFIG_DA_MON_EVENTS_IMPLICIT
-DECLARE_EVENT_CLASS(event_da_monitor,
-
- TP_PROTO(char *state, char *event, char *next_state, bool final_state),
-
- TP_ARGS(state, event, next_state, final_state),
-
- TP_STRUCT__entry(
- __array( char, state, MAX_DA_NAME_LEN )
- __array( char, event, MAX_DA_NAME_LEN )
- __array( char, next_state, MAX_DA_NAME_LEN )
- __field( bool, final_state )
- ),
-
- TP_fast_assign(
- memcpy(__entry->state, state, MAX_DA_NAME_LEN);
- memcpy(__entry->event, event, MAX_DA_NAME_LEN);
- memcpy(__entry->next_state, next_state, MAX_DA_NAME_LEN);
- __entry->final_state = final_state;
- ),
-
- TP_printk("%s x %s -> %s %s",
- __entry->state,
- __entry->event,
- __entry->next_state,
- __entry->final_state ? "(final)" : "")
-);
-
-DECLARE_EVENT_CLASS(error_da_monitor,
-
- TP_PROTO(char *state, char *event),
-
- TP_ARGS(state, event),
-
- TP_STRUCT__entry(
- __array( char, state, MAX_DA_NAME_LEN )
- __array( char, event, MAX_DA_NAME_LEN )
- ),
-
- TP_fast_assign(
- memcpy(__entry->state, state, MAX_DA_NAME_LEN);
- memcpy(__entry->event, event, MAX_DA_NAME_LEN);
- ),
-
- TP_printk("event %s not expected in the state %s",
- __entry->event,
- __entry->state)
-);
-
-#ifdef CONFIG_RV_MON_WIP
-DEFINE_EVENT(event_da_monitor, event_wip,
- TP_PROTO(char *state, char *event, char *next_state, bool final_state),
- TP_ARGS(state, event, next_state, final_state));
-
-DEFINE_EVENT(error_da_monitor, error_wip,
- TP_PROTO(char *state, char *event),
- TP_ARGS(state, event));
-#endif /* CONFIG_RV_MON_WIP */
-#endif /* CONFIG_DA_MON_EVENTS_IMPLICIT */
-
-#ifdef CONFIG_DA_MON_EVENTS_ID
-DECLARE_EVENT_CLASS(event_da_monitor_id,
-
- TP_PROTO(int id, char *state, char *event, char *next_state, bool final_state),
-
- TP_ARGS(id, state, event, next_state, final_state),
-
- TP_STRUCT__entry(
- __field( int, id )
- __array( char, state, MAX_DA_NAME_LEN )
- __array( char, event, MAX_DA_NAME_LEN )
- __array( char, next_state, MAX_DA_NAME_LEN )
- __field( bool, final_state )
- ),
-
- TP_fast_assign(
- memcpy(__entry->state, state, MAX_DA_NAME_LEN);
- memcpy(__entry->event, event, MAX_DA_NAME_LEN);
- memcpy(__entry->next_state, next_state, MAX_DA_NAME_LEN);
- __entry->id = id;
- __entry->final_state = final_state;
- ),
-
- TP_printk("%d: %s x %s -> %s %s",
- __entry->id,
- __entry->state,
- __entry->event,
- __entry->next_state,
- __entry->final_state ? "(final)" : "")
-);
-
-DECLARE_EVENT_CLASS(error_da_monitor_id,
-
- TP_PROTO(int id, char *state, char *event),
-
- TP_ARGS(id, state, event),
-
- TP_STRUCT__entry(
- __field( int, id )
- __array( char, state, MAX_DA_NAME_LEN )
- __array( char, event, MAX_DA_NAME_LEN )
- ),
-
- TP_fast_assign(
- memcpy(__entry->state, state, MAX_DA_NAME_LEN);
- memcpy(__entry->event, event, MAX_DA_NAME_LEN);
- __entry->id = id;
- ),
-
- TP_printk("%d: event %s not expected in the state %s",
- __entry->id,
- __entry->event,
- __entry->state)
-);
-
-#ifdef CONFIG_RV_MON_WWNR
-/* id is the pid of the task */
-DEFINE_EVENT(event_da_monitor_id, event_wwnr,
- TP_PROTO(int id, char *state, char *event, char *next_state, bool final_state),
- TP_ARGS(id, state, event, next_state, final_state));
-
-DEFINE_EVENT(error_da_monitor_id, error_wwnr,
- TP_PROTO(int id, char *state, char *event),
- TP_ARGS(id, state, event));
-#endif /* CONFIG_RV_MON_WWNR */
-
-#endif /* CONFIG_DA_MON_EVENTS_ID */
-#endif /* _TRACE_RV_H */
-
-/* This part ust be outside protection */
-#undef TRACE_INCLUDE_PATH
-#include <trace/define_trace.h>
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index 283db0ea3db4..de6f6d25767c 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -25,6 +25,7 @@
EM(afs_abort_probeuuid_negative, "afs-probeuuid-neg") \
EM(afs_abort_send_data_error, "afs-send-data") \
EM(afs_abort_unmarshal_error, "afs-unmarshal") \
+ EM(afs_abort_unsupported_sec_class, "afs-unsup-sec-class") \
/* rxperf errors */ \
EM(rxperf_abort_general_error, "rxperf-error") \
EM(rxperf_abort_oom, "rxperf-oom") \
@@ -68,6 +69,38 @@
EM(rxkad_abort_resp_tkt_sname, "rxkad-resp-tk-sname") \
EM(rxkad_abort_resp_unknown_tkt, "rxkad-resp-unknown-tkt") \
EM(rxkad_abort_resp_version, "rxkad-resp-version") \
+ /* RxGK security errors */ \
+ EM(rxgk_abort_1_verify_mic_eproto, "rxgk1-vfy-mic-eproto") \
+ EM(rxgk_abort_2_decrypt_eproto, "rxgk2-dec-eproto") \
+ EM(rxgk_abort_2_short_data, "rxgk2-short-data") \
+ EM(rxgk_abort_2_short_encdata, "rxgk2-short-encdata") \
+ EM(rxgk_abort_2_short_header, "rxgk2-short-hdr") \
+ EM(rxgk_abort_bad_key_number, "rxgk-bad-key-num") \
+ EM(rxgk_abort_chall_key_expired, "rxgk-chall-key-exp") \
+ EM(rxgk_abort_chall_no_key, "rxgk-chall-nokey") \
+ EM(rxgk_abort_chall_short, "rxgk-chall-short") \
+ EM(rxgk_abort_resp_auth_dec, "rxgk-resp-auth-dec") \
+ EM(rxgk_abort_resp_bad_callid, "rxgk-resp-bad-callid") \
+ EM(rxgk_abort_resp_bad_nonce, "rxgk-resp-bad-nonce") \
+ EM(rxgk_abort_resp_bad_param, "rxgk-resp-bad-param") \
+ EM(rxgk_abort_resp_call_ctr, "rxgk-resp-call-ctr") \
+ EM(rxgk_abort_resp_call_state, "rxgk-resp-call-state") \
+ EM(rxgk_abort_resp_internal_error, "rxgk-resp-int-error") \
+ EM(rxgk_abort_resp_nopkg, "rxgk-resp-nopkg") \
+ EM(rxgk_abort_resp_short_applen, "rxgk-resp-short-applen") \
+ EM(rxgk_abort_resp_short_auth, "rxgk-resp-short-auth") \
+ EM(rxgk_abort_resp_short_call_list, "rxgk-resp-short-callls") \
+ EM(rxgk_abort_resp_short_packet, "rxgk-resp-short-packet") \
+ EM(rxgk_abort_resp_short_yfs_klen, "rxgk-resp-short-yfs-klen") \
+ EM(rxgk_abort_resp_short_yfs_key, "rxgk-resp-short-yfs-key") \
+ EM(rxgk_abort_resp_short_yfs_tkt, "rxgk-resp-short-yfs-tkt") \
+ EM(rxgk_abort_resp_tok_dec, "rxgk-resp-tok-dec") \
+ EM(rxgk_abort_resp_tok_internal_error, "rxgk-resp-tok-int-err") \
+ EM(rxgk_abort_resp_tok_keyerr, "rxgk-resp-tok-keyerr") \
+ EM(rxgk_abort_resp_tok_nokey, "rxgk-resp-tok-nokey") \
+ EM(rxgk_abort_resp_tok_nopkg, "rxgk-resp-tok-nopkg") \
+ EM(rxgk_abort_resp_tok_short, "rxgk-resp-tok-short") \
+ EM(rxgk_abort_resp_xdr_align, "rxgk-resp-xdr-align") \
/* rxrpc errors */ \
EM(rxrpc_abort_call_improper_term, "call-improper-term") \
EM(rxrpc_abort_call_reset, "call-reset") \
@@ -77,13 +110,14 @@
EM(rxrpc_abort_call_timeout, "call-timeout") \
EM(rxrpc_abort_no_service_key, "no-serv-key") \
EM(rxrpc_abort_nomem, "nomem") \
+ EM(rxrpc_abort_response_sendmsg, "resp-sendmsg") \
EM(rxrpc_abort_service_not_offered, "serv-not-offered") \
EM(rxrpc_abort_shut_down, "shut-down") \
EM(rxrpc_abort_unsupported_security, "unsup-sec") \
EM(rxrpc_badmsg_bad_abort, "bad-abort") \
EM(rxrpc_badmsg_bad_jumbo, "bad-jumbo") \
EM(rxrpc_badmsg_short_ack, "short-ack") \
- EM(rxrpc_badmsg_short_ack_info, "short-ack-info") \
+ EM(rxrpc_badmsg_short_ack_trailer, "short-ack-trailer") \
EM(rxrpc_badmsg_short_hdr, "short-hdr") \
EM(rxrpc_badmsg_unsupported_packet, "unsup-pkt") \
EM(rxrpc_badmsg_zero_call, "zero-call") \
@@ -117,8 +151,11 @@
#define rxrpc_call_poke_traces \
EM(rxrpc_call_poke_abort, "Abort") \
EM(rxrpc_call_poke_complete, "Compl") \
+ EM(rxrpc_call_poke_conn_abort, "Conn-abort") \
EM(rxrpc_call_poke_error, "Error") \
EM(rxrpc_call_poke_idle, "Idle") \
+ EM(rxrpc_call_poke_rx_packet, "Rx-packet") \
+ EM(rxrpc_call_poke_set_timeout, "Set-timo") \
EM(rxrpc_call_poke_start, "Start") \
EM(rxrpc_call_poke_timer, "Timer") \
E_(rxrpc_call_poke_timer_now, "Timer-now")
@@ -126,26 +163,37 @@
#define rxrpc_skb_traces \
EM(rxrpc_skb_eaten_by_unshare, "ETN unshare ") \
EM(rxrpc_skb_eaten_by_unshare_nomem, "ETN unshar-nm") \
+ EM(rxrpc_skb_get_call_rx, "GET call-rx ") \
EM(rxrpc_skb_get_conn_secured, "GET conn-secd") \
EM(rxrpc_skb_get_conn_work, "GET conn-work") \
EM(rxrpc_skb_get_local_work, "GET locl-work") \
+ EM(rxrpc_skb_get_post_oob, "GET post-oob ") \
EM(rxrpc_skb_get_reject_work, "GET rej-work ") \
EM(rxrpc_skb_get_to_recvmsg, "GET to-recv ") \
EM(rxrpc_skb_get_to_recvmsg_oos, "GET to-recv-o") \
EM(rxrpc_skb_new_encap_rcv, "NEW encap-rcv") \
EM(rxrpc_skb_new_error_report, "NEW error-rpt") \
EM(rxrpc_skb_new_jumbo_subpacket, "NEW jumbo-sub") \
+ EM(rxrpc_skb_new_response_rxgk, "NEW resp-rxgk") \
+ EM(rxrpc_skb_new_response_rxkad, "NEW resp-rxkd") \
EM(rxrpc_skb_new_unshared, "NEW unshared ") \
+ EM(rxrpc_skb_put_call_rx, "PUT call-rx ") \
+ EM(rxrpc_skb_put_challenge, "PUT challenge") \
EM(rxrpc_skb_put_conn_secured, "PUT conn-secd") \
EM(rxrpc_skb_put_conn_work, "PUT conn-work") \
EM(rxrpc_skb_put_error_report, "PUT error-rep") \
EM(rxrpc_skb_put_input, "PUT input ") \
EM(rxrpc_skb_put_jumbo_subpacket, "PUT jumbo-sub") \
+ EM(rxrpc_skb_put_oob, "PUT oob ") \
EM(rxrpc_skb_put_purge, "PUT purge ") \
+ EM(rxrpc_skb_put_purge_oob, "PUT purge-oob") \
+ EM(rxrpc_skb_put_response, "PUT response ") \
EM(rxrpc_skb_put_rotate, "PUT rotate ") \
EM(rxrpc_skb_put_unknown, "PUT unknown ") \
EM(rxrpc_skb_see_conn_work, "SEE conn-work") \
+ EM(rxrpc_skb_see_oob_challenge, "SEE oob-chall") \
EM(rxrpc_skb_see_recvmsg, "SEE recvmsg ") \
+ EM(rxrpc_skb_see_recvmsg_oob, "SEE recvm-oob") \
EM(rxrpc_skb_see_reject, "SEE reject ") \
EM(rxrpc_skb_see_rotate, "SEE rotate ") \
E_(rxrpc_skb_see_version, "SEE version ")
@@ -163,7 +211,7 @@
EM(rxrpc_local_put_for_use, "PUT for-use ") \
EM(rxrpc_local_put_kill_conn, "PUT conn-kil") \
EM(rxrpc_local_put_peer, "PUT peer ") \
- EM(rxrpc_local_put_prealloc_conn, "PUT conn-pre") \
+ EM(rxrpc_local_put_prealloc_peer, "PUT peer-pre") \
EM(rxrpc_local_put_release_sock, "PUT rel-sock") \
EM(rxrpc_local_stop, "STOP ") \
EM(rxrpc_local_stopped, "STOPPED ") \
@@ -178,7 +226,9 @@
#define rxrpc_peer_traces \
EM(rxrpc_peer_free, "FREE ") \
EM(rxrpc_peer_get_accept, "GET accept ") \
+ EM(rxrpc_peer_get_application, "GET app ") \
EM(rxrpc_peer_get_bundle, "GET bundle ") \
+ EM(rxrpc_peer_get_call, "GET call ") \
EM(rxrpc_peer_get_client_conn, "GET cln-conn") \
EM(rxrpc_peer_get_input, "GET input ") \
EM(rxrpc_peer_get_input_error, "GET inpt-err") \
@@ -187,6 +237,7 @@
EM(rxrpc_peer_get_service_conn, "GET srv-conn") \
EM(rxrpc_peer_new_client, "NEW client ") \
EM(rxrpc_peer_new_prealloc, "NEW prealloc") \
+ EM(rxrpc_peer_put_application, "PUT app ") \
EM(rxrpc_peer_put_bundle, "PUT bundle ") \
EM(rxrpc_peer_put_call, "PUT call ") \
EM(rxrpc_peer_put_conn, "PUT conn ") \
@@ -208,19 +259,24 @@
EM(rxrpc_conn_free, "FREE ") \
EM(rxrpc_conn_get_activate_call, "GET act-call") \
EM(rxrpc_conn_get_call_input, "GET inp-call") \
+ EM(rxrpc_conn_get_challenge_input, "GET inp-chal") \
EM(rxrpc_conn_get_conn_input, "GET inp-conn") \
EM(rxrpc_conn_get_idle, "GET idle ") \
EM(rxrpc_conn_get_poke_abort, "GET pk-abort") \
+ EM(rxrpc_conn_get_poke_response, "GET response") \
+ EM(rxrpc_conn_get_poke_secured, "GET secured ") \
EM(rxrpc_conn_get_poke_timer, "GET poke ") \
EM(rxrpc_conn_get_service_conn, "GET svc-conn") \
EM(rxrpc_conn_new_client, "NEW client ") \
EM(rxrpc_conn_new_service, "NEW service ") \
EM(rxrpc_conn_put_call, "PUT call ") \
EM(rxrpc_conn_put_call_input, "PUT inp-call") \
+ EM(rxrpc_conn_put_challenge_input, "PUT inp-chal") \
EM(rxrpc_conn_put_conn_input, "PUT inp-conn") \
EM(rxrpc_conn_put_discard_idle, "PUT disc-idl") \
EM(rxrpc_conn_put_local_dead, "PUT loc-dead") \
EM(rxrpc_conn_put_noreuse, "PUT noreuse ") \
+ EM(rxrpc_conn_put_oob, "PUT oob ") \
EM(rxrpc_conn_put_poke, "PUT poke ") \
EM(rxrpc_conn_put_service_reaped, "PUT svc-reap") \
EM(rxrpc_conn_put_unbundle, "PUT unbundle") \
@@ -266,26 +322,31 @@
EM(rxrpc_call_put_kernel, "PUT kernel ") \
EM(rxrpc_call_put_poke, "PUT poke ") \
EM(rxrpc_call_put_recvmsg, "PUT recvmsg ") \
+ EM(rxrpc_call_put_release_recvmsg_q, "PUT rls-rcmq") \
EM(rxrpc_call_put_release_sock, "PUT rls-sock") \
EM(rxrpc_call_put_release_sock_tba, "PUT rls-sk-a") \
EM(rxrpc_call_put_sendmsg, "PUT sendmsg ") \
- EM(rxrpc_call_put_unnotify, "PUT unnotify") \
EM(rxrpc_call_put_userid_exists, "PUT u-exists") \
EM(rxrpc_call_put_userid, "PUT user-id ") \
EM(rxrpc_call_see_accept, "SEE accept ") \
EM(rxrpc_call_see_activate_client, "SEE act-clnt") \
+ EM(rxrpc_call_see_already_released, "SEE alrdy-rl") \
EM(rxrpc_call_see_connect_failed, "SEE con-fail") \
EM(rxrpc_call_see_connected, "SEE connect ") \
+ EM(rxrpc_call_see_conn_abort, "SEE conn-abt") \
+ EM(rxrpc_call_see_discard, "SEE discard ") \
EM(rxrpc_call_see_disconnected, "SEE disconn ") \
EM(rxrpc_call_see_distribute_error, "SEE dist-err") \
EM(rxrpc_call_see_input, "SEE input ") \
+ EM(rxrpc_call_see_notify_released, "SEE nfy-rlsd") \
+ EM(rxrpc_call_see_recvmsg, "SEE recvmsg ") \
EM(rxrpc_call_see_release, "SEE release ") \
EM(rxrpc_call_see_userid_exists, "SEE u-exists") \
+ EM(rxrpc_call_see_waiting_call, "SEE q-conn ") \
E_(rxrpc_call_see_zap, "SEE zap ")
#define rxrpc_txqueue_traces \
EM(rxrpc_txqueue_await_reply, "AWR") \
- EM(rxrpc_txqueue_dequeue, "DEQ") \
EM(rxrpc_txqueue_end, "END") \
EM(rxrpc_txqueue_queue, "QUE") \
EM(rxrpc_txqueue_queue_last, "QLS") \
@@ -293,6 +354,13 @@
EM(rxrpc_txqueue_rotate_last, "RLS") \
E_(rxrpc_txqueue_wait, "WAI")
+#define rxrpc_txdata_traces \
+ EM(rxrpc_txdata_inject_loss, " *INJ-LOSS*") \
+ EM(rxrpc_txdata_new_data, " ") \
+ EM(rxrpc_txdata_retransmit, " *RETRANS*") \
+ EM(rxrpc_txdata_tlp_new_data, " *TLP-NEW*") \
+ E_(rxrpc_txdata_tlp_retransmit, " *TLP-RETRANS*")
+
#define rxrpc_receive_traces \
EM(rxrpc_receive_end, "END") \
EM(rxrpc_receive_front, "FRN") \
@@ -314,10 +382,12 @@
EM(rxrpc_recvmsg_full, "FULL") \
EM(rxrpc_recvmsg_hole, "HOLE") \
EM(rxrpc_recvmsg_next, "NEXT") \
+ EM(rxrpc_recvmsg_oobq, "OOBQ") \
EM(rxrpc_recvmsg_requeue, "REQU") \
EM(rxrpc_recvmsg_return, "RETN") \
EM(rxrpc_recvmsg_terminal, "TERM") \
EM(rxrpc_recvmsg_to_be_accepted, "TBAC") \
+ EM(rxrpc_recvmsg_unqueue, "UNQU") \
E_(rxrpc_recvmsg_wait, "WAIT")
#define rxrpc_rtt_tx_traces \
@@ -327,57 +397,51 @@
E_(rxrpc_rtt_tx_ping, "PING")
#define rxrpc_rtt_rx_traces \
- EM(rxrpc_rtt_rx_cancel, "CNCL") \
+ EM(rxrpc_rtt_rx_data_ack, "DACK") \
EM(rxrpc_rtt_rx_obsolete, "OBSL") \
EM(rxrpc_rtt_rx_lost, "LOST") \
- EM(rxrpc_rtt_rx_ping_response, "PONG") \
- E_(rxrpc_rtt_rx_requested_ack, "RACK")
+ E_(rxrpc_rtt_rx_ping_response, "PONG")
#define rxrpc_timer_traces \
- EM(rxrpc_timer_begin, "Begin ") \
- EM(rxrpc_timer_exp_ack, "ExpAck") \
- EM(rxrpc_timer_exp_hard, "ExpHrd") \
- EM(rxrpc_timer_exp_idle, "ExpIdl") \
- EM(rxrpc_timer_exp_keepalive, "ExpKA ") \
- EM(rxrpc_timer_exp_lost_ack, "ExpLoA") \
- EM(rxrpc_timer_exp_normal, "ExpNml") \
- EM(rxrpc_timer_exp_ping, "ExpPng") \
- EM(rxrpc_timer_exp_resend, "ExpRsn") \
- EM(rxrpc_timer_init_for_reply, "IniRpl") \
- EM(rxrpc_timer_init_for_send_reply, "SndRpl") \
- EM(rxrpc_timer_restart, "Restrt") \
- EM(rxrpc_timer_set_for_ack, "SetAck") \
- EM(rxrpc_timer_set_for_hard, "SetHrd") \
- EM(rxrpc_timer_set_for_idle, "SetIdl") \
- EM(rxrpc_timer_set_for_keepalive, "KeepAl") \
- EM(rxrpc_timer_set_for_lost_ack, "SetLoA") \
- EM(rxrpc_timer_set_for_normal, "SetNml") \
- EM(rxrpc_timer_set_for_ping, "SetPng") \
- EM(rxrpc_timer_set_for_resend, "SetRTx") \
- E_(rxrpc_timer_set_for_send, "SetSnd")
+ EM(rxrpc_timer_trace_delayed_ack, "DelayAck ") \
+ EM(rxrpc_timer_trace_expect_rx, "ExpectRx ") \
+ EM(rxrpc_timer_trace_hard, "HardLimit") \
+ EM(rxrpc_timer_trace_idle, "IdleLimit") \
+ EM(rxrpc_timer_trace_keepalive, "KeepAlive") \
+ EM(rxrpc_timer_trace_ping, "DelayPing") \
+ EM(rxrpc_timer_trace_rack_off, "RACK-OFF ") \
+ EM(rxrpc_timer_trace_rack_zwp, "RACK-ZWP ") \
+ EM(rxrpc_timer_trace_rack_reo, "RACK-Reo ") \
+ EM(rxrpc_timer_trace_rack_tlp_pto, "TLP-PTO ") \
+ E_(rxrpc_timer_trace_rack_rto, "RTO ")
#define rxrpc_propose_ack_traces \
EM(rxrpc_propose_ack_client_tx_end, "ClTxEnd") \
+ EM(rxrpc_propose_ack_delayed_ack, "DlydAck") \
EM(rxrpc_propose_ack_input_data, "DataIn ") \
EM(rxrpc_propose_ack_input_data_hole, "DataInH") \
- EM(rxrpc_propose_ack_ping_for_check_life, "ChkLife") \
EM(rxrpc_propose_ack_ping_for_keepalive, "KeepAlv") \
EM(rxrpc_propose_ack_ping_for_lost_ack, "LostAck") \
EM(rxrpc_propose_ack_ping_for_lost_reply, "LostRpl") \
+ EM(rxrpc_propose_ack_ping_for_0_retrans, "0-Retrn") \
+ EM(rxrpc_propose_ack_ping_for_mtu_probe, "MTUProb") \
+ EM(rxrpc_propose_ack_ping_for_old_rtt, "OldRtt ") \
EM(rxrpc_propose_ack_ping_for_params, "Params ") \
+ EM(rxrpc_propose_ack_ping_for_rtt, "Rtt ") \
EM(rxrpc_propose_ack_processing_op, "ProcOp ") \
EM(rxrpc_propose_ack_respond_to_ack, "Rsp2Ack") \
EM(rxrpc_propose_ack_respond_to_ping, "Rsp2Png") \
+ EM(rxrpc_propose_ack_retransmit, "Retrans") \
EM(rxrpc_propose_ack_retry_tx, "RetryTx") \
EM(rxrpc_propose_ack_rotate_rx, "RxAck ") \
EM(rxrpc_propose_ack_rx_idle, "RxIdle ") \
E_(rxrpc_propose_ack_terminal_ack, "ClTerm ")
-#define rxrpc_congest_modes \
- EM(RXRPC_CALL_CONGEST_AVOIDANCE, "CongAvoid") \
- EM(RXRPC_CALL_FAST_RETRANSMIT, "FastReTx ") \
- EM(RXRPC_CALL_PACKET_LOSS, "PktLoss ") \
- E_(RXRPC_CALL_SLOW_START, "SlowStart")
+#define rxrpc_ca_states \
+ EM(RXRPC_CA_CONGEST_AVOIDANCE, "CongAvoid") \
+ EM(RXRPC_CA_FAST_RETRANSMIT, "FastReTx ") \
+ EM(RXRPC_CA_PACKET_LOSS, "PktLoss ") \
+ E_(RXRPC_CA_SLOW_START, "SlowStart")
#define rxrpc_congest_changes \
EM(rxrpc_cong_begin_retransmission, " Retrans") \
@@ -421,6 +485,13 @@
EM(RXRPC_ACK_IDLE, "IDL") \
E_(RXRPC_ACK__INVALID, "-?-")
+#define rxrpc_sack_traces \
+ EM(rxrpc_sack_advance, "ADV") \
+ EM(rxrpc_sack_fill, "FIL") \
+ EM(rxrpc_sack_nack, "NAK") \
+ EM(rxrpc_sack_none, "---") \
+ E_(rxrpc_sack_oos, "OOS")
+
#define rxrpc_completions \
EM(RXRPC_CALL_SUCCEEDED, "Succeeded") \
EM(RXRPC_CALL_REMOTELY_ABORTED, "RemoteAbort") \
@@ -436,14 +507,15 @@
EM(rxrpc_tx_point_call_final_resend, "CallFinalResend") \
EM(rxrpc_tx_point_conn_abort, "ConnAbort") \
EM(rxrpc_tx_point_reject, "Reject") \
+ EM(rxrpc_tx_point_rxgk_challenge, "RxGKChall") \
EM(rxrpc_tx_point_rxkad_challenge, "RxkadChall") \
- EM(rxrpc_tx_point_rxkad_response, "RxkadResp") \
+ EM(rxrpc_tx_point_response, "Response") \
EM(rxrpc_tx_point_version_keepalive, "VerKeepalive") \
E_(rxrpc_tx_point_version_reply, "VerReply")
#define rxrpc_req_ack_traces \
EM(rxrpc_reqack_ack_lost, "ACK-LOST ") \
- EM(rxrpc_reqack_already_on, "ALREADY-ON") \
+ EM(rxrpc_reqack_app_stall, "APP-STALL ") \
EM(rxrpc_reqack_more_rtt, "MORE-RTT ") \
EM(rxrpc_reqack_no_srv_last, "NO-SRVLAST") \
EM(rxrpc_reqack_old_rtt, "OLD-RTT ") \
@@ -453,21 +525,62 @@
/* ---- Must update size of stat_why_req_ack[] if more are added! */
#define rxrpc_txbuf_traces \
- EM(rxrpc_txbuf_alloc_ack, "ALLOC ACK ") \
EM(rxrpc_txbuf_alloc_data, "ALLOC DATA ") \
+ EM(rxrpc_txbuf_alloc_response, "ALLOC RESP ") \
EM(rxrpc_txbuf_free, "FREE ") \
EM(rxrpc_txbuf_get_buffer, "GET BUFFER ") \
EM(rxrpc_txbuf_get_trans, "GET TRANS ") \
EM(rxrpc_txbuf_get_retrans, "GET RETRANS") \
- EM(rxrpc_txbuf_put_ack_tx, "PUT ACK TX ") \
EM(rxrpc_txbuf_put_cleaned, "PUT CLEANED") \
EM(rxrpc_txbuf_put_nomem, "PUT NOMEM ") \
EM(rxrpc_txbuf_put_rotated, "PUT ROTATED") \
+ EM(rxrpc_txbuf_put_response_tx, "PUT RESP TX") \
EM(rxrpc_txbuf_put_send_aborted, "PUT SEND-X ") \
EM(rxrpc_txbuf_put_trans, "PUT TRANS ") \
+ EM(rxrpc_txbuf_see_lost, "SEE LOST ") \
EM(rxrpc_txbuf_see_out_of_step, "OUT-OF-STEP") \
- EM(rxrpc_txbuf_see_send_more, "SEE SEND+ ") \
- E_(rxrpc_txbuf_see_unacked, "SEE UNACKED")
+ E_(rxrpc_txbuf_see_send_more, "SEE SEND+ ")
+
+#define rxrpc_tq_traces \
+ EM(rxrpc_tq_alloc, "ALLOC") \
+ EM(rxrpc_tq_cleaned, "CLEAN") \
+ EM(rxrpc_tq_decant, "DCNT ") \
+ EM(rxrpc_tq_decant_advance, "DCNT>") \
+ EM(rxrpc_tq_queue, "QUEUE") \
+ EM(rxrpc_tq_queue_dup, "QUE!!") \
+ EM(rxrpc_tq_rotate, "ROT ") \
+ EM(rxrpc_tq_rotate_and_free, "ROT-F") \
+ EM(rxrpc_tq_rotate_and_keep, "ROT-K") \
+ EM(rxrpc_tq_transmit, "XMIT ") \
+ E_(rxrpc_tq_transmit_advance, "XMIT>")
+
+#define rxrpc_pmtud_reduce_traces \
+ EM(rxrpc_pmtud_reduce_ack, "Ack ") \
+ EM(rxrpc_pmtud_reduce_icmp, "Icmp ") \
+ E_(rxrpc_pmtud_reduce_route, "Route")
+
+#define rxrpc_rotate_traces \
+ EM(rxrpc_rotate_trace_hack, "hard-ack") \
+ EM(rxrpc_rotate_trace_sack, "soft-ack") \
+ E_(rxrpc_rotate_trace_snak, "soft-nack")
+
+#define rxrpc_rack_timer_modes \
+ EM(RXRPC_CALL_RACKTIMER_OFF, "---") \
+ EM(RXRPC_CALL_RACKTIMER_RACK_REORDER, "REO") \
+ EM(RXRPC_CALL_RACKTIMER_TLP_PTO, "TLP") \
+ E_(RXRPC_CALL_RACKTIMER_RTO, "RTO")
+
+#define rxrpc_tlp_probe_traces \
+ EM(rxrpc_tlp_probe_trace_busy, "busy") \
+ EM(rxrpc_tlp_probe_trace_transmit_new, "transmit-new") \
+ E_(rxrpc_tlp_probe_trace_retransmit, "retransmit")
+
+#define rxrpc_tlp_ack_traces \
+ EM(rxrpc_tlp_ack_trace_acked, "acked") \
+ EM(rxrpc_tlp_ack_trace_dup_acked, "dup-acked") \
+ EM(rxrpc_tlp_ack_trace_hard_beyond, "hard-beyond") \
+ EM(rxrpc_tlp_ack_trace_incomplete, "incomplete") \
+ E_(rxrpc_tlp_ack_trace_new_data, "new-data")
/*
* Generate enums for tracing information.
@@ -489,17 +602,24 @@ enum rxrpc_congest_change { rxrpc_congest_changes } __mode(byte);
enum rxrpc_conn_trace { rxrpc_conn_traces } __mode(byte);
enum rxrpc_local_trace { rxrpc_local_traces } __mode(byte);
enum rxrpc_peer_trace { rxrpc_peer_traces } __mode(byte);
+enum rxrpc_pmtud_reduce_trace { rxrpc_pmtud_reduce_traces } __mode(byte);
enum rxrpc_propose_ack_outcome { rxrpc_propose_ack_outcomes } __mode(byte);
enum rxrpc_propose_ack_trace { rxrpc_propose_ack_traces } __mode(byte);
enum rxrpc_receive_trace { rxrpc_receive_traces } __mode(byte);
enum rxrpc_recvmsg_trace { rxrpc_recvmsg_traces } __mode(byte);
enum rxrpc_req_ack_trace { rxrpc_req_ack_traces } __mode(byte);
+enum rxrpc_rotate_trace { rxrpc_rotate_traces } __mode(byte);
enum rxrpc_rtt_rx_trace { rxrpc_rtt_rx_traces } __mode(byte);
enum rxrpc_rtt_tx_trace { rxrpc_rtt_tx_traces } __mode(byte);
+enum rxrpc_sack_trace { rxrpc_sack_traces } __mode(byte);
enum rxrpc_skb_trace { rxrpc_skb_traces } __mode(byte);
enum rxrpc_timer_trace { rxrpc_timer_traces } __mode(byte);
+enum rxrpc_tlp_ack_trace { rxrpc_tlp_ack_traces } __mode(byte);
+enum rxrpc_tlp_probe_trace { rxrpc_tlp_probe_traces } __mode(byte);
+enum rxrpc_tq_trace { rxrpc_tq_traces } __mode(byte);
enum rxrpc_tx_point { rxrpc_tx_points } __mode(byte);
enum rxrpc_txbuf_trace { rxrpc_txbuf_traces } __mode(byte);
+enum rxrpc_txdata_trace { rxrpc_txdata_traces } __mode(byte);
enum rxrpc_txqueue_trace { rxrpc_txqueue_traces } __mode(byte);
#endif /* end __RXRPC_DECLARE_TRACE_ENUMS_ONCE_ONLY */
@@ -517,23 +637,31 @@ enum rxrpc_txqueue_trace { rxrpc_txqueue_traces } __mode(byte);
rxrpc_abort_reasons;
rxrpc_bundle_traces;
+rxrpc_ca_states;
rxrpc_call_poke_traces;
rxrpc_call_traces;
rxrpc_client_traces;
rxrpc_congest_changes;
-rxrpc_congest_modes;
rxrpc_conn_traces;
rxrpc_local_traces;
+rxrpc_pmtud_reduce_traces;
rxrpc_propose_ack_traces;
+rxrpc_rack_timer_modes;
rxrpc_receive_traces;
rxrpc_recvmsg_traces;
rxrpc_req_ack_traces;
+rxrpc_rotate_traces;
rxrpc_rtt_rx_traces;
rxrpc_rtt_tx_traces;
+rxrpc_sack_traces;
rxrpc_skb_traces;
rxrpc_timer_traces;
+rxrpc_tlp_ack_traces;
+rxrpc_tlp_probe_traces;
+rxrpc_tq_traces;
rxrpc_tx_points;
rxrpc_txbuf_traces;
+rxrpc_txdata_traces;
rxrpc_txqueue_traces;
/*
@@ -552,10 +680,10 @@ TRACE_EVENT(rxrpc_local,
TP_ARGS(local_debug_id, op, ref, usage),
TP_STRUCT__entry(
- __field(unsigned int, local )
- __field(int, op )
- __field(int, ref )
- __field(int, usage )
+ __field(unsigned int, local)
+ __field(int, op)
+ __field(int, ref)
+ __field(int, usage)
),
TP_fast_assign(
@@ -572,15 +700,29 @@ TRACE_EVENT(rxrpc_local,
__entry->usage)
);
+TRACE_EVENT(rxrpc_iothread_rx,
+ TP_PROTO(struct rxrpc_local *local, unsigned int nr_rx),
+ TP_ARGS(local, nr_rx),
+ TP_STRUCT__entry(
+ __field(unsigned int, local)
+ __field(unsigned int, nr_rx)
+ ),
+ TP_fast_assign(
+ __entry->local = local->debug_id;
+ __entry->nr_rx = nr_rx;
+ ),
+ TP_printk("L=%08x nrx=%u", __entry->local, __entry->nr_rx)
+ );
+
TRACE_EVENT(rxrpc_peer,
TP_PROTO(unsigned int peer_debug_id, int ref, enum rxrpc_peer_trace why),
TP_ARGS(peer_debug_id, ref, why),
TP_STRUCT__entry(
- __field(unsigned int, peer )
- __field(int, ref )
- __field(enum rxrpc_peer_trace, why )
+ __field(unsigned int, peer)
+ __field(int, ref)
+ __field(enum rxrpc_peer_trace, why)
),
TP_fast_assign(
@@ -601,9 +743,9 @@ TRACE_EVENT(rxrpc_bundle,
TP_ARGS(bundle_debug_id, ref, why),
TP_STRUCT__entry(
- __field(unsigned int, bundle )
- __field(int, ref )
- __field(int, why )
+ __field(unsigned int, bundle)
+ __field(int, ref)
+ __field(int, why)
),
TP_fast_assign(
@@ -624,9 +766,9 @@ TRACE_EVENT(rxrpc_conn,
TP_ARGS(conn_debug_id, ref, why),
TP_STRUCT__entry(
- __field(unsigned int, conn )
- __field(int, ref )
- __field(int, why )
+ __field(unsigned int, conn)
+ __field(int, ref)
+ __field(int, why)
),
TP_fast_assign(
@@ -648,11 +790,11 @@ TRACE_EVENT(rxrpc_client,
TP_ARGS(conn, channel, op),
TP_STRUCT__entry(
- __field(unsigned int, conn )
- __field(u32, cid )
- __field(int, channel )
- __field(int, usage )
- __field(enum rxrpc_client_trace, op )
+ __field(unsigned int, conn)
+ __field(u32, cid)
+ __field(int, channel)
+ __field(int, usage)
+ __field(enum rxrpc_client_trace, op)
),
TP_fast_assign(
@@ -678,10 +820,10 @@ TRACE_EVENT(rxrpc_call,
TP_ARGS(call_debug_id, ref, aux, why),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(int, ref )
- __field(int, why )
- __field(unsigned long, aux )
+ __field(unsigned int, call)
+ __field(int, ref)
+ __field(int, why)
+ __field(unsigned long, aux)
),
TP_fast_assign(
@@ -705,10 +847,10 @@ TRACE_EVENT(rxrpc_skb,
TP_ARGS(skb, usage, mod_count, why),
TP_STRUCT__entry(
- __field(struct sk_buff *, skb )
- __field(int, usage )
- __field(int, mod_count )
- __field(enum rxrpc_skb_trace, why )
+ __field(struct sk_buff *, skb)
+ __field(int, usage)
+ __field(int, mod_count)
+ __field(enum rxrpc_skb_trace, why)
),
TP_fast_assign(
@@ -731,7 +873,7 @@ TRACE_EVENT(rxrpc_rx_packet,
TP_ARGS(sp),
TP_STRUCT__entry(
- __field_struct(struct rxrpc_host_header, hdr )
+ __field_struct(struct rxrpc_host_header, hdr)
),
TP_fast_assign(
@@ -742,9 +884,8 @@ TRACE_EVENT(rxrpc_rx_packet,
__entry->hdr.epoch, __entry->hdr.cid,
__entry->hdr.callNumber, __entry->hdr.serviceId,
__entry->hdr.serial, __entry->hdr.seq,
- __entry->hdr.type, __entry->hdr.flags,
- __entry->hdr.type <= 15 ?
- __print_symbolic(__entry->hdr.type, rxrpc_pkts) : "?UNK")
+ __entry->hdr.securityIndex, __entry->hdr.flags,
+ __print_symbolic(__entry->hdr.type, rxrpc_pkts))
);
TRACE_EVENT(rxrpc_rx_done,
@@ -753,8 +894,8 @@ TRACE_EVENT(rxrpc_rx_done,
TP_ARGS(result, abort_code),
TP_STRUCT__entry(
- __field(int, result )
- __field(int, abort_code )
+ __field(int, result)
+ __field(int, abort_code)
),
TP_fast_assign(
@@ -765,6 +906,31 @@ TRACE_EVENT(rxrpc_rx_done,
TP_printk("r=%d a=%d", __entry->result, __entry->abort_code)
);
+TRACE_EVENT(rxrpc_abort_call,
+ TP_PROTO(const struct rxrpc_call *call, int abort_code),
+
+ TP_ARGS(call, abort_code),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call_nr)
+ __field(enum rxrpc_abort_reason, why)
+ __field(int, abort_code)
+ __field(int, error)
+ ),
+
+ TP_fast_assign(
+ __entry->call_nr = call->debug_id;
+ __entry->why = call->send_abort_why;
+ __entry->abort_code = abort_code;
+ __entry->error = call->send_abort_err;
+ ),
+
+ TP_printk("c=%08x a=%d e=%d %s",
+ __entry->call_nr,
+ __entry->abort_code, __entry->error,
+ __print_symbolic(__entry->why, rxrpc_abort_reasons))
+ );
+
TRACE_EVENT(rxrpc_abort,
TP_PROTO(unsigned int call_nr, enum rxrpc_abort_reason why,
u32 cid, u32 call_id, rxrpc_seq_t seq, int abort_code, int error),
@@ -772,13 +938,13 @@ TRACE_EVENT(rxrpc_abort,
TP_ARGS(call_nr, why, cid, call_id, seq, abort_code, error),
TP_STRUCT__entry(
- __field(unsigned int, call_nr )
- __field(enum rxrpc_abort_reason, why )
- __field(u32, cid )
- __field(u32, call_id )
- __field(rxrpc_seq_t, seq )
- __field(int, abort_code )
- __field(int, error )
+ __field(unsigned int, call_nr)
+ __field(enum rxrpc_abort_reason, why)
+ __field(u32, cid)
+ __field(u32, call_id)
+ __field(rxrpc_seq_t, seq)
+ __field(int, abort_code)
+ __field(int, error)
),
TP_fast_assign(
@@ -804,10 +970,10 @@ TRACE_EVENT(rxrpc_call_complete,
TP_ARGS(call),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum rxrpc_call_completion, compl )
- __field(int, error )
- __field(u32, abort_code )
+ __field(unsigned int, call)
+ __field(enum rxrpc_call_completion, compl)
+ __field(int, error)
+ __field(u32, abort_code)
),
TP_fast_assign(
@@ -830,36 +996,103 @@ TRACE_EVENT(rxrpc_txqueue,
TP_ARGS(call, why),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum rxrpc_txqueue_trace, why )
- __field(rxrpc_seq_t, acks_hard_ack )
- __field(rxrpc_seq_t, tx_bottom )
- __field(rxrpc_seq_t, tx_top )
- __field(rxrpc_seq_t, tx_prepared )
- __field(int, tx_winsize )
+ __field(unsigned int, call)
+ __field(enum rxrpc_txqueue_trace, why)
+ __field(rxrpc_seq_t, tx_bottom)
+ __field(rxrpc_seq_t, acks_hard_ack)
+ __field(rxrpc_seq_t, tx_top)
+ __field(rxrpc_seq_t, send_top)
+ __field(int, tx_winsize)
),
TP_fast_assign(
__entry->call = call->debug_id;
__entry->why = why;
- __entry->acks_hard_ack = call->acks_hard_ack;
__entry->tx_bottom = call->tx_bottom;
+ __entry->acks_hard_ack = call->acks_hard_ack;
__entry->tx_top = call->tx_top;
- __entry->tx_prepared = call->tx_prepared;
+ __entry->send_top = call->send_top;
__entry->tx_winsize = call->tx_winsize;
),
- TP_printk("c=%08x %s f=%08x h=%08x n=%u/%u/%u/%u",
+ TP_printk("c=%08x %s b=%08x h=%08x n=%u/%u/%u/%u",
__entry->call,
__print_symbolic(__entry->why, rxrpc_txqueue_traces),
__entry->tx_bottom,
__entry->acks_hard_ack,
- __entry->tx_top - __entry->tx_bottom,
+ __entry->acks_hard_ack - __entry->tx_bottom,
__entry->tx_top - __entry->acks_hard_ack,
- __entry->tx_prepared - __entry->tx_bottom,
+ __entry->send_top - __entry->tx_top,
__entry->tx_winsize)
);
+TRACE_EVENT(rxrpc_transmit,
+ TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t send_top, int space),
+
+ TP_ARGS(call, send_top, space),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call)
+ __field(rxrpc_seq_t, seq)
+ __field(u16, space)
+ __field(u16, tx_winsize)
+ __field(u16, cong_cwnd)
+ __field(u16, cong_extra)
+ __field(u16, in_flight)
+ __field(u16, prepared)
+ __field(u16, pmtud_jumbo)
+ ),
+
+ TP_fast_assign(
+ __entry->call = call->debug_id;
+ __entry->seq = call->tx_top + 1;
+ __entry->space = space;
+ __entry->tx_winsize = call->tx_winsize;
+ __entry->cong_cwnd = call->cong_cwnd;
+ __entry->cong_extra = call->cong_extra;
+ __entry->prepared = send_top - call->tx_bottom;
+ __entry->in_flight = call->tx_top - call->tx_bottom;
+ __entry->pmtud_jumbo = call->peer->pmtud_jumbo;
+ ),
+
+ TP_printk("c=%08x q=%08x sp=%u tw=%u cw=%u+%u pr=%u if=%u pj=%u",
+ __entry->call,
+ __entry->seq,
+ __entry->space,
+ __entry->tx_winsize,
+ __entry->cong_cwnd,
+ __entry->cong_extra,
+ __entry->prepared,
+ __entry->in_flight,
+ __entry->pmtud_jumbo)
+ );
+
+TRACE_EVENT(rxrpc_tx_rotate,
+ TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t seq, rxrpc_seq_t to),
+
+ TP_ARGS(call, seq, to),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call)
+ __field(rxrpc_seq_t, seq)
+ __field(rxrpc_seq_t, to)
+ __field(rxrpc_seq_t, top)
+ ),
+
+ TP_fast_assign(
+ __entry->call = call->debug_id;
+ __entry->seq = seq;
+ __entry->to = to;
+ __entry->top = call->tx_top;
+ ),
+
+ TP_printk("c=%08x q=%08x-%08x-%08x",
+ __entry->call,
+ __entry->seq,
+ __entry->to,
+ __entry->top)
+ );
+
TRACE_EVENT(rxrpc_rx_data,
TP_PROTO(unsigned int call, rxrpc_seq_t seq,
rxrpc_serial_t serial, u8 flags),
@@ -867,10 +1100,10 @@ TRACE_EVENT(rxrpc_rx_data,
TP_ARGS(call, seq, serial, flags),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(rxrpc_seq_t, seq )
- __field(rxrpc_serial_t, serial )
- __field(u8, flags )
+ __field(unsigned int, call)
+ __field(rxrpc_seq_t, seq)
+ __field(rxrpc_serial_t, serial)
+ __field(u8, flags)
),
TP_fast_assign(
@@ -888,37 +1121,38 @@ TRACE_EVENT(rxrpc_rx_data,
);
TRACE_EVENT(rxrpc_rx_ack,
- TP_PROTO(struct rxrpc_call *call,
- rxrpc_serial_t serial, rxrpc_serial_t ack_serial,
- rxrpc_seq_t first, rxrpc_seq_t prev, u8 reason, u8 n_acks),
+ TP_PROTO(struct rxrpc_call *call, struct rxrpc_skb_priv *sp),
- TP_ARGS(call, serial, ack_serial, first, prev, reason, n_acks),
+ TP_ARGS(call, sp),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(rxrpc_serial_t, serial )
- __field(rxrpc_serial_t, ack_serial )
- __field(rxrpc_seq_t, first )
- __field(rxrpc_seq_t, prev )
- __field(u8, reason )
- __field(u8, n_acks )
+ __field(unsigned int, call)
+ __field(rxrpc_serial_t, serial)
+ __field(rxrpc_serial_t, ack_serial)
+ __field(rxrpc_seq_t, first)
+ __field(rxrpc_seq_t, prev)
+ __field(u8, reason)
+ __field(u8, n_acks)
+ __field(u8, user_status)
),
TP_fast_assign(
- __entry->call = call->debug_id;
- __entry->serial = serial;
- __entry->ack_serial = ack_serial;
- __entry->first = first;
- __entry->prev = prev;
- __entry->reason = reason;
- __entry->n_acks = n_acks;
+ __entry->call = call->debug_id;
+ __entry->serial = sp->hdr.serial;
+ __entry->user_status = sp->hdr.userStatus;
+ __entry->ack_serial = sp->ack.acked_serial;
+ __entry->first = sp->ack.first_ack;
+ __entry->prev = sp->ack.prev_ack;
+ __entry->reason = sp->ack.reason;
+ __entry->n_acks = sp->ack.nr_acks;
),
- TP_printk("c=%08x %08x %s r=%08x f=%08x p=%08x n=%u",
+ TP_printk("c=%08x %08x %s r=%08x us=%02x f=%08x p=%08x n=%u",
__entry->call,
__entry->serial,
__print_symbolic(__entry->reason, rxrpc_ack_names),
__entry->ack_serial,
+ __entry->user_status,
__entry->first,
__entry->prev,
__entry->n_acks)
@@ -931,9 +1165,9 @@ TRACE_EVENT(rxrpc_rx_abort,
TP_ARGS(call, serial, abort_code),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(rxrpc_serial_t, serial )
- __field(u32, abort_code )
+ __field(unsigned int, call)
+ __field(rxrpc_serial_t, serial)
+ __field(u32, abort_code)
),
TP_fast_assign(
@@ -948,6 +1182,62 @@ TRACE_EVENT(rxrpc_rx_abort,
__entry->abort_code)
);
+TRACE_EVENT(rxrpc_rx_conn_abort,
+ TP_PROTO(const struct rxrpc_connection *conn, const struct sk_buff *skb),
+
+ TP_ARGS(conn, skb),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, conn)
+ __field(rxrpc_serial_t, serial)
+ __field(u32, abort_code)
+ ),
+
+ TP_fast_assign(
+ __entry->conn = conn->debug_id;
+ __entry->serial = rxrpc_skb(skb)->hdr.serial;
+ __entry->abort_code = skb->priority;
+ ),
+
+ TP_printk("C=%08x ABORT %08x ac=%d",
+ __entry->conn,
+ __entry->serial,
+ __entry->abort_code)
+ );
+
+TRACE_EVENT(rxrpc_tx_challenge,
+ TP_PROTO(struct rxrpc_connection *conn, rxrpc_serial_t serial,
+ u32 version, u32 nonce),
+
+ TP_ARGS(conn, serial, version, nonce),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, conn)
+ __field(rxrpc_serial_t, serial)
+ __field(u32, version)
+ __field(u32, nonce)
+ __field(u16, service_id)
+ __field(u8, security_ix)
+ ),
+
+ TP_fast_assign(
+ __entry->conn = conn->debug_id;
+ __entry->serial = serial;
+ __entry->version = version;
+ __entry->nonce = nonce;
+ __entry->service_id = conn->service_id;
+ __entry->security_ix = conn->security_ix;
+ ),
+
+ TP_printk("C=%08x CHALLENGE r=%08x sv=%u+%u v=%x n=%x",
+ __entry->conn,
+ __entry->serial,
+ __entry->service_id,
+ __entry->security_ix,
+ __entry->version,
+ __entry->nonce)
+ );
+
TRACE_EVENT(rxrpc_rx_challenge,
TP_PROTO(struct rxrpc_connection *conn, rxrpc_serial_t serial,
u32 version, u32 nonce, u32 min_level),
@@ -955,11 +1245,13 @@ TRACE_EVENT(rxrpc_rx_challenge,
TP_ARGS(conn, serial, version, nonce, min_level),
TP_STRUCT__entry(
- __field(unsigned int, conn )
- __field(rxrpc_serial_t, serial )
- __field(u32, version )
- __field(u32, nonce )
- __field(u32, min_level )
+ __field(unsigned int, conn)
+ __field(rxrpc_serial_t, serial)
+ __field(u32, version)
+ __field(u32, nonce)
+ __field(u32, min_level)
+ __field(u16, service_id)
+ __field(u8, security_ix)
),
TP_fast_assign(
@@ -968,16 +1260,60 @@ TRACE_EVENT(rxrpc_rx_challenge,
__entry->version = version;
__entry->nonce = nonce;
__entry->min_level = min_level;
+ __entry->service_id = conn->service_id;
+ __entry->security_ix = conn->security_ix;
),
- TP_printk("C=%08x CHALLENGE %08x v=%x n=%x ml=%x",
+ TP_printk("C=%08x CHALLENGE r=%08x sv=%u+%u v=%x n=%x ml=%x",
__entry->conn,
__entry->serial,
+ __entry->service_id,
+ __entry->security_ix,
__entry->version,
__entry->nonce,
__entry->min_level)
);
+TRACE_EVENT(rxrpc_tx_response,
+ TP_PROTO(struct rxrpc_connection *conn, rxrpc_serial_t serial,
+ struct rxrpc_skb_priv *rsp),
+
+ TP_ARGS(conn, serial, rsp),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, conn)
+ __field(rxrpc_serial_t, serial)
+ __field(rxrpc_serial_t, challenge)
+ __field(u32, version)
+ __field(u32, kvno)
+ __field(u16, ticket_len)
+ __field(u16, appdata_len)
+ __field(u16, service_id)
+ __field(u8, security_ix)
+ ),
+
+ TP_fast_assign(
+ __entry->conn = conn->debug_id;
+ __entry->serial = serial;
+ __entry->challenge = rsp->resp.challenge_serial;
+ __entry->version = rsp->resp.version;
+ __entry->kvno = rsp->resp.kvno;
+ __entry->ticket_len = rsp->resp.ticket_len;
+ __entry->service_id = conn->service_id;
+ __entry->security_ix = conn->security_ix;
+ ),
+
+ TP_printk("C=%08x RESPONSE r=%08x cr=%08x sv=%u+%u v=%x kv=%x tl=%u",
+ __entry->conn,
+ __entry->serial,
+ __entry->challenge,
+ __entry->service_id,
+ __entry->security_ix,
+ __entry->version,
+ __entry->kvno,
+ __entry->ticket_len)
+ );
+
TRACE_EVENT(rxrpc_rx_response,
TP_PROTO(struct rxrpc_connection *conn, rxrpc_serial_t serial,
u32 version, u32 kvno, u32 ticket_len),
@@ -985,11 +1321,12 @@ TRACE_EVENT(rxrpc_rx_response,
TP_ARGS(conn, serial, version, kvno, ticket_len),
TP_STRUCT__entry(
- __field(unsigned int, conn )
- __field(rxrpc_serial_t, serial )
- __field(u32, version )
- __field(u32, kvno )
- __field(u32, ticket_len )
+ __field(unsigned int, conn)
+ __field(rxrpc_serial_t, serial)
+ __field(u32, version)
+ __field(u32, kvno)
+ __field(u32, ticket_len)
+ __field(u8, security_ix)
),
TP_fast_assign(
@@ -998,11 +1335,13 @@ TRACE_EVENT(rxrpc_rx_response,
__entry->version = version;
__entry->kvno = kvno;
__entry->ticket_len = ticket_len;
+ __entry->security_ix = conn->security_ix;
),
- TP_printk("C=%08x RESPONSE %08x v=%x kvno=%x tl=%x",
+ TP_printk("C=%08x RESPONSE r=%08x sx=%u v=%x kvno=%x tl=%x",
__entry->conn,
__entry->serial,
+ __entry->security_ix,
__entry->version,
__entry->kvno,
__entry->ticket_len)
@@ -1015,10 +1354,10 @@ TRACE_EVENT(rxrpc_rx_rwind_change,
TP_ARGS(call, serial, rwind, wake),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(rxrpc_serial_t, serial )
- __field(u32, rwind )
- __field(bool, wake )
+ __field(unsigned int, call)
+ __field(rxrpc_serial_t, serial)
+ __field(u32, rwind)
+ __field(bool, wake)
),
TP_fast_assign(
@@ -1042,9 +1381,9 @@ TRACE_EVENT(rxrpc_tx_packet,
TP_ARGS(call_id, whdr, where),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum rxrpc_tx_point, where )
- __field_struct(struct rxrpc_wire_header, whdr )
+ __field(unsigned int, call)
+ __field(enum rxrpc_tx_point, where)
+ __field_struct(struct rxrpc_wire_header, whdr)
),
TP_fast_assign(
@@ -1069,19 +1408,19 @@ TRACE_EVENT(rxrpc_tx_packet,
TRACE_EVENT(rxrpc_tx_data,
TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t seq,
- rxrpc_serial_t serial, u8 flags, bool retrans, bool lose),
+ rxrpc_serial_t serial, unsigned int flags,
+ enum rxrpc_txdata_trace trace),
- TP_ARGS(call, seq, serial, flags, retrans, lose),
+ TP_ARGS(call, seq, serial, flags, trace),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(rxrpc_seq_t, seq )
- __field(rxrpc_serial_t, serial )
- __field(u32, cid )
- __field(u32, call_id )
- __field(u8, flags )
- __field(bool, retrans )
- __field(bool, lose )
+ __field(unsigned int, call)
+ __field(rxrpc_seq_t, seq)
+ __field(rxrpc_serial_t, serial)
+ __field(u32, cid)
+ __field(u32, call_id)
+ __field(u16, flags)
+ __field(enum rxrpc_txdata_trace, trace)
),
TP_fast_assign(
@@ -1091,35 +1430,36 @@ TRACE_EVENT(rxrpc_tx_data,
__entry->seq = seq;
__entry->serial = serial;
__entry->flags = flags;
- __entry->retrans = retrans;
- __entry->lose = lose;
+ __entry->trace = trace;
),
- TP_printk("c=%08x DATA %08x:%08x %08x q=%08x fl=%02x%s%s",
+ TP_printk("c=%08x DATA %08x:%08x %08x q=%08x fl=%02x%s",
__entry->call,
__entry->cid,
__entry->call_id,
__entry->serial,
__entry->seq,
- __entry->flags,
- __entry->retrans ? " *RETRANS*" : "",
- __entry->lose ? " *LOSE*" : "")
+ __entry->flags & RXRPC_TXBUF_WIRE_FLAGS,
+ __print_symbolic(__entry->trace, rxrpc_txdata_traces))
);
TRACE_EVENT(rxrpc_tx_ack,
TP_PROTO(unsigned int call, rxrpc_serial_t serial,
rxrpc_seq_t ack_first, rxrpc_serial_t ack_serial,
- u8 reason, u8 n_acks),
+ u8 reason, u8 n_acks, u16 rwind,
+ enum rxrpc_propose_ack_trace trace),
- TP_ARGS(call, serial, ack_first, ack_serial, reason, n_acks),
+ TP_ARGS(call, serial, ack_first, ack_serial, reason, n_acks, rwind, trace),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(rxrpc_serial_t, serial )
- __field(rxrpc_seq_t, ack_first )
- __field(rxrpc_serial_t, ack_serial )
- __field(u8, reason )
- __field(u8, n_acks )
+ __field(unsigned int, call)
+ __field(rxrpc_serial_t, serial)
+ __field(rxrpc_seq_t, ack_first)
+ __field(rxrpc_serial_t, ack_serial)
+ __field(u8, reason)
+ __field(u8, n_acks)
+ __field(u16, rwind)
+ __field(enum rxrpc_propose_ack_trace, trace)
),
TP_fast_assign(
@@ -1129,15 +1469,19 @@ TRACE_EVENT(rxrpc_tx_ack,
__entry->ack_serial = ack_serial;
__entry->reason = reason;
__entry->n_acks = n_acks;
+ __entry->rwind = rwind;
+ __entry->trace = trace;
),
- TP_printk(" c=%08x ACK %08x %s f=%08x r=%08x n=%u",
+ TP_printk(" c=%08x ACK %08x %s f=%08x r=%08x n=%u rw=%u %s",
__entry->call,
__entry->serial,
__print_symbolic(__entry->reason, rxrpc_ack_names),
__entry->ack_first,
__entry->ack_serial,
- __entry->n_acks)
+ __entry->n_acks,
+ __entry->rwind,
+ __print_symbolic(__entry->trace, rxrpc_propose_ack_traces))
);
TRACE_EVENT(rxrpc_receive,
@@ -1147,11 +1491,12 @@ TRACE_EVENT(rxrpc_receive,
TP_ARGS(call, why, serial, seq),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum rxrpc_receive_trace, why )
- __field(rxrpc_serial_t, serial )
- __field(rxrpc_seq_t, seq )
- __field(u64, window )
+ __field(unsigned int, call)
+ __field(enum rxrpc_receive_trace, why)
+ __field(rxrpc_serial_t, serial)
+ __field(rxrpc_seq_t, seq)
+ __field(rxrpc_seq_t, window)
+ __field(rxrpc_seq_t, wtop)
),
TP_fast_assign(
@@ -1159,7 +1504,8 @@ TRACE_EVENT(rxrpc_receive,
__entry->why = why;
__entry->serial = serial;
__entry->seq = seq;
- __entry->window = atomic64_read(&call->ackr_window);
+ __entry->window = call->ackr_window;
+ __entry->wtop = call->ackr_wtop;
),
TP_printk("c=%08x %s r=%08x q=%08x w=%08x-%08x",
@@ -1167,8 +1513,8 @@ TRACE_EVENT(rxrpc_receive,
__print_symbolic(__entry->why, rxrpc_receive_traces),
__entry->serial,
__entry->seq,
- lower_32_bits(__entry->window),
- upper_32_bits(__entry->window))
+ __entry->window,
+ __entry->wtop)
);
TRACE_EVENT(rxrpc_recvmsg,
@@ -1178,9 +1524,9 @@ TRACE_EVENT(rxrpc_recvmsg,
TP_ARGS(call_debug_id, why, ret),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum rxrpc_recvmsg_trace, why )
- __field(int, ret )
+ __field(unsigned int, call)
+ __field(enum rxrpc_recvmsg_trace, why)
+ __field(int, ret)
),
TP_fast_assign(
@@ -1203,12 +1549,12 @@ TRACE_EVENT(rxrpc_recvdata,
TP_ARGS(call, why, seq, offset, len, ret),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum rxrpc_recvmsg_trace, why )
- __field(rxrpc_seq_t, seq )
- __field(unsigned int, offset )
- __field(unsigned int, len )
- __field(int, ret )
+ __field(unsigned int, call)
+ __field(enum rxrpc_recvmsg_trace, why)
+ __field(rxrpc_seq_t, seq)
+ __field(unsigned int, offset)
+ __field(unsigned int, len)
+ __field(int, ret)
),
TP_fast_assign(
@@ -1236,10 +1582,10 @@ TRACE_EVENT(rxrpc_rtt_tx,
TP_ARGS(call, why, slot, send_serial),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum rxrpc_rtt_tx_trace, why )
- __field(int, slot )
- __field(rxrpc_serial_t, send_serial )
+ __field(unsigned int, call)
+ __field(enum rxrpc_rtt_tx_trace, why)
+ __field(int, slot)
+ __field(rxrpc_serial_t, send_serial)
),
TP_fast_assign(
@@ -1260,18 +1606,20 @@ TRACE_EVENT(rxrpc_rtt_rx,
TP_PROTO(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
int slot,
rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial,
- u32 rtt, u32 rto),
+ u32 rtt, u32 srtt, u32 rto),
- TP_ARGS(call, why, slot, send_serial, resp_serial, rtt, rto),
+ TP_ARGS(call, why, slot, send_serial, resp_serial, rtt, srtt, rto),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum rxrpc_rtt_rx_trace, why )
- __field(int, slot )
- __field(rxrpc_serial_t, send_serial )
- __field(rxrpc_serial_t, resp_serial )
- __field(u32, rtt )
- __field(u32, rto )
+ __field(unsigned int, call)
+ __field(enum rxrpc_rtt_rx_trace, why)
+ __field(int, slot)
+ __field(rxrpc_serial_t, send_serial)
+ __field(rxrpc_serial_t, resp_serial)
+ __field(u32, rtt)
+ __field(u32, srtt)
+ __field(u32, rto)
+ __field(u32, min_rtt)
),
TP_fast_assign(
@@ -1281,103 +1629,129 @@ TRACE_EVENT(rxrpc_rtt_rx,
__entry->send_serial = send_serial;
__entry->resp_serial = resp_serial;
__entry->rtt = rtt;
+ __entry->srtt = srtt;
__entry->rto = rto;
+ __entry->min_rtt = minmax_get(&call->min_rtt)
),
- TP_printk("c=%08x [%d] %s sr=%08x rr=%08x rtt=%u rto=%u",
+ TP_printk("c=%08x [%d] %s sr=%08x rr=%08x rtt=%u srtt=%u rto=%u min=%u",
__entry->call,
__entry->slot,
__print_symbolic(__entry->why, rxrpc_rtt_rx_traces),
__entry->send_serial,
__entry->resp_serial,
__entry->rtt,
- __entry->rto)
+ __entry->srtt / 8,
+ __entry->rto,
+ __entry->min_rtt)
);
-TRACE_EVENT(rxrpc_timer,
- TP_PROTO(struct rxrpc_call *call, enum rxrpc_timer_trace why,
- unsigned long now),
+TRACE_EVENT(rxrpc_timer_set,
+ TP_PROTO(struct rxrpc_call *call, ktime_t delay,
+ enum rxrpc_timer_trace why),
- TP_ARGS(call, why, now),
+ TP_ARGS(call, delay, why),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum rxrpc_timer_trace, why )
- __field(long, now )
- __field(long, ack_at )
- __field(long, ack_lost_at )
- __field(long, resend_at )
- __field(long, ping_at )
- __field(long, expect_rx_by )
- __field(long, expect_req_by )
- __field(long, expect_term_by )
- __field(long, timer )
+ __field(unsigned int, call)
+ __field(enum rxrpc_timer_trace, why)
+ __field(ktime_t, delay)
),
TP_fast_assign(
__entry->call = call->debug_id;
__entry->why = why;
- __entry->now = now;
- __entry->ack_at = call->delay_ack_at;
- __entry->ack_lost_at = call->ack_lost_at;
- __entry->resend_at = call->resend_at;
- __entry->expect_rx_by = call->expect_rx_by;
- __entry->expect_req_by = call->expect_req_by;
- __entry->expect_term_by = call->expect_term_by;
- __entry->timer = call->timer.expires;
+ __entry->delay = delay;
),
- TP_printk("c=%08x %s a=%ld la=%ld r=%ld xr=%ld xq=%ld xt=%ld t=%ld",
+ TP_printk("c=%08x %s to=%lld",
__entry->call,
__print_symbolic(__entry->why, rxrpc_timer_traces),
- __entry->ack_at - __entry->now,
- __entry->ack_lost_at - __entry->now,
- __entry->resend_at - __entry->now,
- __entry->expect_rx_by - __entry->now,
- __entry->expect_req_by - __entry->now,
- __entry->expect_term_by - __entry->now,
- __entry->timer - __entry->now)
+ ktime_to_us(__entry->delay))
);
-TRACE_EVENT(rxrpc_timer_expired,
- TP_PROTO(struct rxrpc_call *call, unsigned long now),
+TRACE_EVENT(rxrpc_timer_exp,
+ TP_PROTO(struct rxrpc_call *call, ktime_t delay,
+ enum rxrpc_timer_trace why),
+
+ TP_ARGS(call, delay, why),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call)
+ __field(enum rxrpc_timer_trace, why)
+ __field(ktime_t, delay)
+ ),
+
+ TP_fast_assign(
+ __entry->call = call->debug_id;
+ __entry->why = why;
+ __entry->delay = delay;
+ ),
+
+ TP_printk("c=%08x %s to=%lld",
+ __entry->call,
+ __print_symbolic(__entry->why, rxrpc_timer_traces),
+ ktime_to_us(__entry->delay))
+ );
+
+TRACE_EVENT(rxrpc_timer_can,
+ TP_PROTO(struct rxrpc_call *call, enum rxrpc_timer_trace why),
- TP_ARGS(call, now),
+ TP_ARGS(call, why),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(long, now )
- __field(long, ack_at )
- __field(long, ack_lost_at )
- __field(long, resend_at )
- __field(long, ping_at )
- __field(long, expect_rx_by )
- __field(long, expect_req_by )
- __field(long, expect_term_by )
- __field(long, timer )
+ __field(unsigned int, call)
+ __field(enum rxrpc_timer_trace, why)
),
TP_fast_assign(
__entry->call = call->debug_id;
- __entry->now = now;
- __entry->ack_at = call->delay_ack_at;
- __entry->ack_lost_at = call->ack_lost_at;
- __entry->resend_at = call->resend_at;
- __entry->expect_rx_by = call->expect_rx_by;
- __entry->expect_req_by = call->expect_req_by;
- __entry->expect_term_by = call->expect_term_by;
- __entry->timer = call->timer.expires;
+ __entry->why = why;
),
- TP_printk("c=%08x EXPIRED a=%ld la=%ld r=%ld xr=%ld xq=%ld xt=%ld t=%ld",
+ TP_printk("c=%08x %s",
__entry->call,
- __entry->ack_at - __entry->now,
- __entry->ack_lost_at - __entry->now,
- __entry->resend_at - __entry->now,
- __entry->expect_rx_by - __entry->now,
- __entry->expect_req_by - __entry->now,
- __entry->expect_term_by - __entry->now,
- __entry->timer - __entry->now)
+ __print_symbolic(__entry->why, rxrpc_timer_traces))
+ );
+
+TRACE_EVENT(rxrpc_timer_restart,
+ TP_PROTO(struct rxrpc_call *call, ktime_t delay, unsigned long delayj),
+
+ TP_ARGS(call, delay, delayj),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call)
+ __field(unsigned long, delayj)
+ __field(ktime_t, delay)
+ ),
+
+ TP_fast_assign(
+ __entry->call = call->debug_id;
+ __entry->delayj = delayj;
+ __entry->delay = delay;
+ ),
+
+ TP_printk("c=%08x to=%lld j=%ld",
+ __entry->call,
+ ktime_to_us(__entry->delay),
+ __entry->delayj)
+ );
+
+TRACE_EVENT(rxrpc_timer_expired,
+ TP_PROTO(struct rxrpc_call *call),
+
+ TP_ARGS(call),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call)
+ ),
+
+ TP_fast_assign(
+ __entry->call = call->debug_id;
+ ),
+
+ TP_printk("c=%08x EXPIRED",
+ __entry->call)
);
TRACE_EVENT(rxrpc_rx_lose,
@@ -1386,7 +1760,7 @@ TRACE_EVENT(rxrpc_rx_lose,
TP_ARGS(sp),
TP_STRUCT__entry(
- __field_struct(struct rxrpc_host_header, hdr )
+ __field_struct(struct rxrpc_host_header, hdr)
),
TP_fast_assign(
@@ -1409,10 +1783,10 @@ TRACE_EVENT(rxrpc_propose_ack,
TP_ARGS(call, why, ack_reason, serial),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum rxrpc_propose_ack_trace, why )
- __field(rxrpc_serial_t, serial )
- __field(u8, ack_reason )
+ __field(unsigned int, call)
+ __field(enum rxrpc_propose_ack_trace, why)
+ __field(rxrpc_serial_t, serial)
+ __field(u8, ack_reason)
),
TP_fast_assign(
@@ -1436,10 +1810,10 @@ TRACE_EVENT(rxrpc_send_ack,
TP_ARGS(call, why, ack_reason, serial),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum rxrpc_propose_ack_trace, why )
- __field(rxrpc_serial_t, serial )
- __field(u8, ack_reason )
+ __field(unsigned int, call)
+ __field(enum rxrpc_propose_ack_trace, why)
+ __field(rxrpc_serial_t, serial)
+ __field(u8, ack_reason)
),
TP_fast_assign(
@@ -1463,11 +1837,11 @@ TRACE_EVENT(rxrpc_drop_ack,
TP_ARGS(call, why, ack_reason, serial, nobuf),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum rxrpc_propose_ack_trace, why )
- __field(rxrpc_serial_t, serial )
- __field(u8, ack_reason )
- __field(bool, nobuf )
+ __field(unsigned int, call)
+ __field(enum rxrpc_propose_ack_trace, why)
+ __field(rxrpc_serial_t, serial)
+ __field(u8, ack_reason)
+ __field(bool, nobuf)
),
TP_fast_assign(
@@ -1486,108 +1860,125 @@ TRACE_EVENT(rxrpc_drop_ack,
);
TRACE_EVENT(rxrpc_retransmit,
- TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t seq, s64 expiry),
+ TP_PROTO(struct rxrpc_call *call,
+ struct rxrpc_send_data_req *req,
+ struct rxrpc_txbuf *txb),
- TP_ARGS(call, seq, expiry),
+ TP_ARGS(call, req, txb),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(rxrpc_seq_t, seq )
- __field(s64, expiry )
+ __field(unsigned int, call)
+ __field(unsigned int, qbase)
+ __field(rxrpc_seq_t, seq)
+ __field(rxrpc_serial_t, serial)
),
TP_fast_assign(
__entry->call = call->debug_id;
- __entry->seq = seq;
- __entry->expiry = expiry;
+ __entry->qbase = req->tq->qbase;
+ __entry->seq = req->seq;
+ __entry->serial = txb->serial;
),
- TP_printk("c=%08x q=%x xp=%lld",
+ TP_printk("c=%08x tq=%x q=%x r=%x",
__entry->call,
+ __entry->qbase,
__entry->seq,
- __entry->expiry)
+ __entry->serial)
);
TRACE_EVENT(rxrpc_congest,
- TP_PROTO(struct rxrpc_call *call, struct rxrpc_ack_summary *summary,
- rxrpc_serial_t ack_serial, enum rxrpc_congest_change change),
+ TP_PROTO(struct rxrpc_call *call, struct rxrpc_ack_summary *summary),
- TP_ARGS(call, summary, ack_serial, change),
+ TP_ARGS(call, summary),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum rxrpc_congest_change, change )
- __field(rxrpc_seq_t, hard_ack )
- __field(rxrpc_seq_t, top )
- __field(rxrpc_seq_t, lowest_nak )
- __field(rxrpc_serial_t, ack_serial )
- __field_struct(struct rxrpc_ack_summary, sum )
+ __field(unsigned int, call)
+ __field(enum rxrpc_ca_state, ca_state)
+ __field(rxrpc_seq_t, hard_ack)
+ __field(rxrpc_seq_t, top)
+ __field(rxrpc_seq_t, lowest_nak)
+ __field(u16, nr_sacks)
+ __field(u16, nr_snacks)
+ __field(u16, cwnd)
+ __field(u16, ssthresh)
+ __field(u16, cumul_acks)
+ __field(u16, dup_acks)
+ __field_struct(struct rxrpc_ack_summary, sum)
),
TP_fast_assign(
__entry->call = call->debug_id;
- __entry->change = change;
+ __entry->ca_state = call->cong_ca_state;
__entry->hard_ack = call->acks_hard_ack;
__entry->top = call->tx_top;
__entry->lowest_nak = call->acks_lowest_nak;
- __entry->ack_serial = ack_serial;
+ __entry->nr_sacks = call->acks_nr_sacks;
+ __entry->nr_snacks = call->acks_nr_snacks;
+ __entry->cwnd = call->cong_cwnd;
+ __entry->ssthresh = call->cong_ssthresh;
+ __entry->cumul_acks = call->cong_cumul_acks;
+ __entry->dup_acks = call->cong_dup_acks;
memcpy(&__entry->sum, summary, sizeof(__entry->sum));
),
- TP_printk("c=%08x r=%08x %s q=%08x %s cw=%u ss=%u nA=%u,%u+%u r=%u b=%u u=%u d=%u l=%x%s%s%s",
+ TP_printk("c=%08x r=%08x %s q=%08x %s cw=%u ss=%u A=%u+%u/%u+%u r=%u b=%u u=%u d=%u l=%x%s%s%s",
__entry->call,
- __entry->ack_serial,
+ __entry->sum.acked_serial,
__print_symbolic(__entry->sum.ack_reason, rxrpc_ack_names),
__entry->hard_ack,
- __print_symbolic(__entry->sum.mode, rxrpc_congest_modes),
- __entry->sum.cwnd,
- __entry->sum.ssthresh,
- __entry->sum.nr_acks, __entry->sum.saw_nacks,
- __entry->sum.nr_new_acks,
- __entry->sum.nr_rot_new_acks,
+ __print_symbolic(__entry->ca_state, rxrpc_ca_states),
+ __entry->cwnd,
+ __entry->ssthresh,
+ __entry->nr_sacks, __entry->sum.nr_new_sacks,
+ __entry->nr_snacks, __entry->sum.nr_new_snacks,
+ __entry->sum.nr_new_hacks,
__entry->top - __entry->hard_ack,
- __entry->sum.cumulative_acks,
- __entry->sum.dup_acks,
- __entry->lowest_nak, __entry->sum.new_low_nack ? "!" : "",
- __print_symbolic(__entry->change, rxrpc_congest_changes),
+ __entry->cumul_acks,
+ __entry->dup_acks,
+ __entry->lowest_nak, __entry->sum.new_low_snack ? "!" : "",
+ __print_symbolic(__entry->sum.change, rxrpc_congest_changes),
__entry->sum.retrans_timeo ? " rTxTo" : "")
);
TRACE_EVENT(rxrpc_reset_cwnd,
- TP_PROTO(struct rxrpc_call *call, ktime_t now),
+ TP_PROTO(struct rxrpc_call *call, ktime_t since_last_tx, ktime_t rtt),
- TP_ARGS(call, now),
+ TP_ARGS(call, since_last_tx, rtt),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum rxrpc_congest_mode, mode )
- __field(unsigned short, cwnd )
- __field(unsigned short, extra )
- __field(rxrpc_seq_t, hard_ack )
- __field(rxrpc_seq_t, prepared )
- __field(ktime_t, since_last_tx )
- __field(bool, has_data )
+ __field(unsigned int, call)
+ __field(enum rxrpc_ca_state, ca_state)
+ __field(unsigned short, cwnd)
+ __field(unsigned short, extra)
+ __field(rxrpc_seq_t, hard_ack)
+ __field(rxrpc_seq_t, prepared)
+ __field(ktime_t, since_last_tx)
+ __field(ktime_t, rtt)
+ __field(bool, has_data)
),
TP_fast_assign(
__entry->call = call->debug_id;
- __entry->mode = call->cong_mode;
+ __entry->ca_state = call->cong_ca_state;
__entry->cwnd = call->cong_cwnd;
__entry->extra = call->cong_extra;
__entry->hard_ack = call->acks_hard_ack;
- __entry->prepared = call->tx_prepared - call->tx_bottom;
- __entry->since_last_tx = ktime_sub(now, call->tx_last_sent);
- __entry->has_data = !list_empty(&call->tx_sendmsg);
+ __entry->prepared = call->send_top - call->tx_bottom;
+ __entry->since_last_tx = since_last_tx;
+ __entry->rtt = rtt;
+ __entry->has_data = call->tx_bottom != call->tx_top;
),
- TP_printk("c=%08x q=%08x %s cw=%u+%u pr=%u tm=%llu d=%u",
+ TP_printk("c=%08x q=%08x %s cw=%u+%u pr=%u tm=%llu/%llu d=%u",
__entry->call,
__entry->hard_ack,
- __print_symbolic(__entry->mode, rxrpc_congest_modes),
+ __print_symbolic(__entry->ca_state, rxrpc_ca_states),
__entry->cwnd,
__entry->extra,
__entry->prepared,
- ktime_to_ns(__entry->since_last_tx),
+ ktime_to_us(__entry->since_last_tx),
+ ktime_to_us(__entry->rtt),
__entry->has_data)
);
@@ -1597,8 +1988,8 @@ TRACE_EVENT(rxrpc_disconnect_call,
TP_ARGS(call),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(u32, abort_code )
+ __field(unsigned int, call)
+ __field(u32, abort_code)
),
TP_fast_assign(
@@ -1617,8 +2008,8 @@ TRACE_EVENT(rxrpc_improper_term,
TP_ARGS(call),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(u32, abort_code )
+ __field(unsigned int, call)
+ __field(u32, abort_code)
),
TP_fast_assign(
@@ -1637,11 +2028,11 @@ TRACE_EVENT(rxrpc_connect_call,
TP_ARGS(call),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(unsigned long, user_call_ID )
- __field(u32, cid )
- __field(u32, call_id )
- __field_struct(struct sockaddr_rxrpc, srx )
+ __field(unsigned int, call)
+ __field(unsigned long, user_call_ID)
+ __field(u32, cid)
+ __field(u32, call_id)
+ __field_struct(struct sockaddr_rxrpc, srx)
),
TP_fast_assign(
@@ -1660,24 +2051,49 @@ TRACE_EVENT(rxrpc_connect_call,
&__entry->srx.transport)
);
+TRACE_EVENT(rxrpc_apply_acks,
+ TP_PROTO(struct rxrpc_call *call, struct rxrpc_txqueue *tq),
+
+ TP_ARGS(call, tq),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call)
+ __field(unsigned int, nr_rep)
+ __field(rxrpc_seq_t, qbase)
+ __field(unsigned long, acks)
+ ),
+
+ TP_fast_assign(
+ __entry->call = call->debug_id;
+ __entry->qbase = tq->qbase;
+ __entry->acks = tq->segment_acked;
+ __entry->nr_rep = tq->nr_reported_acks;
+ ),
+
+ TP_printk("c=%08x tq=%x acks=%016lx rep=%u",
+ __entry->call,
+ __entry->qbase,
+ __entry->acks,
+ __entry->nr_rep)
+ );
+
TRACE_EVENT(rxrpc_resend,
- TP_PROTO(struct rxrpc_call *call, struct sk_buff *ack),
+ TP_PROTO(struct rxrpc_call *call, rxrpc_serial_t ack_serial),
- TP_ARGS(call, ack),
+ TP_ARGS(call, ack_serial),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(rxrpc_seq_t, seq )
- __field(rxrpc_seq_t, transmitted )
- __field(rxrpc_serial_t, ack_serial )
+ __field(unsigned int, call)
+ __field(rxrpc_seq_t, seq)
+ __field(rxrpc_seq_t, transmitted)
+ __field(rxrpc_serial_t, ack_serial)
),
TP_fast_assign(
- struct rxrpc_skb_priv *sp = ack ? rxrpc_skb(ack) : NULL;
__entry->call = call->debug_id;
__entry->seq = call->acks_hard_ack;
__entry->transmitted = call->tx_transmitted;
- __entry->ack_serial = sp ? sp->hdr.serial : 0;
+ __entry->ack_serial = ack_serial;
),
TP_printk("c=%08x r=%x q=%x tq=%x",
@@ -1687,6 +2103,63 @@ TRACE_EVENT(rxrpc_resend,
__entry->transmitted)
);
+TRACE_EVENT(rxrpc_resend_lost,
+ TP_PROTO(struct rxrpc_call *call, struct rxrpc_txqueue *tq, unsigned long lost),
+
+ TP_ARGS(call, tq, lost),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call)
+ __field(rxrpc_seq_t, qbase)
+ __field(u8, nr_rep)
+ __field(unsigned long, lost)
+ ),
+
+ TP_fast_assign(
+ __entry->call = call->debug_id;
+ __entry->qbase = tq->qbase;
+ __entry->nr_rep = tq->nr_reported_acks;
+ __entry->lost = lost;
+ ),
+
+ TP_printk("c=%08x tq=%x lost=%016lx nr=%u",
+ __entry->call,
+ __entry->qbase,
+ __entry->lost,
+ __entry->nr_rep)
+ );
+
+TRACE_EVENT(rxrpc_rotate,
+ TP_PROTO(struct rxrpc_call *call, struct rxrpc_txqueue *tq,
+ struct rxrpc_ack_summary *summary, rxrpc_seq_t seq,
+ enum rxrpc_rotate_trace trace),
+
+ TP_ARGS(call, tq, summary, seq, trace),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call)
+ __field(rxrpc_seq_t, qbase)
+ __field(rxrpc_seq_t, seq)
+ __field(unsigned int, nr_rep)
+ __field(enum rxrpc_rotate_trace, trace)
+ ),
+
+ TP_fast_assign(
+ __entry->call = call->debug_id;
+ __entry->qbase = tq->qbase;
+ __entry->seq = seq;
+ __entry->nr_rep = tq->nr_reported_acks;
+ __entry->trace = trace;
+ ),
+
+ TP_printk("c=%08x tq=%x q=%x nr=%x %s",
+ __entry->call,
+ __entry->qbase,
+ __entry->seq,
+ __entry->nr_rep,
+ __print_symbolic(__entry->trace, rxrpc_rotate_traces))
+ );
+
TRACE_EVENT(rxrpc_rx_icmp,
TP_PROTO(struct rxrpc_peer *peer, struct sock_extended_err *ee,
struct sockaddr_rxrpc *srx),
@@ -1694,9 +2167,9 @@ TRACE_EVENT(rxrpc_rx_icmp,
TP_ARGS(peer, ee, srx),
TP_STRUCT__entry(
- __field(unsigned int, peer )
- __field_struct(struct sock_extended_err, ee )
- __field_struct(struct sockaddr_rxrpc, srx )
+ __field(unsigned int, peer)
+ __field_struct(struct sock_extended_err, ee)
+ __field_struct(struct sockaddr_rxrpc, srx)
),
TP_fast_assign(
@@ -1723,10 +2196,10 @@ TRACE_EVENT(rxrpc_tx_fail,
TP_ARGS(debug_id, serial, ret, where),
TP_STRUCT__entry(
- __field(unsigned int, debug_id )
- __field(rxrpc_serial_t, serial )
- __field(int, ret )
- __field(enum rxrpc_tx_point, where )
+ __field(unsigned int, debug_id)
+ __field(rxrpc_serial_t, serial)
+ __field(int, ret)
+ __field(enum rxrpc_tx_point, where)
),
TP_fast_assign(
@@ -1749,13 +2222,13 @@ TRACE_EVENT(rxrpc_call_reset,
TP_ARGS(call),
TP_STRUCT__entry(
- __field(unsigned int, debug_id )
- __field(u32, cid )
- __field(u32, call_id )
- __field(rxrpc_serial_t, call_serial )
- __field(rxrpc_serial_t, conn_serial )
- __field(rxrpc_seq_t, tx_seq )
- __field(rxrpc_seq_t, rx_seq )
+ __field(unsigned int, debug_id)
+ __field(u32, cid)
+ __field(u32, call_id)
+ __field(rxrpc_serial_t, call_serial)
+ __field(rxrpc_serial_t, conn_serial)
+ __field(rxrpc_seq_t, tx_seq)
+ __field(rxrpc_seq_t, rx_seq)
),
TP_fast_assign(
@@ -1781,8 +2254,8 @@ TRACE_EVENT(rxrpc_notify_socket,
TP_ARGS(debug_id, serial),
TP_STRUCT__entry(
- __field(unsigned int, debug_id )
- __field(rxrpc_serial_t, serial )
+ __field(unsigned int, debug_id)
+ __field(rxrpc_serial_t, serial)
),
TP_fast_assign(
@@ -1796,38 +2269,36 @@ TRACE_EVENT(rxrpc_notify_socket,
);
TRACE_EVENT(rxrpc_rx_discard_ack,
- TP_PROTO(unsigned int debug_id, rxrpc_serial_t serial,
- rxrpc_seq_t first_soft_ack, rxrpc_seq_t call_ackr_first,
- rxrpc_seq_t prev_pkt, rxrpc_seq_t call_ackr_prev),
+ TP_PROTO(struct rxrpc_call *call, rxrpc_serial_t serial,
+ rxrpc_seq_t hard_ack, rxrpc_seq_t prev_pkt),
- TP_ARGS(debug_id, serial, first_soft_ack, call_ackr_first,
- prev_pkt, call_ackr_prev),
+ TP_ARGS(call, serial, hard_ack, prev_pkt),
TP_STRUCT__entry(
- __field(unsigned int, debug_id )
- __field(rxrpc_serial_t, serial )
- __field(rxrpc_seq_t, first_soft_ack)
- __field(rxrpc_seq_t, call_ackr_first)
+ __field(unsigned int, debug_id)
+ __field(rxrpc_serial_t, serial)
+ __field(rxrpc_seq_t, hard_ack)
__field(rxrpc_seq_t, prev_pkt)
- __field(rxrpc_seq_t, call_ackr_prev)
+ __field(rxrpc_seq_t, acks_hard_ack)
+ __field(rxrpc_seq_t, acks_prev_seq)
),
TP_fast_assign(
- __entry->debug_id = debug_id;
+ __entry->debug_id = call->debug_id;
__entry->serial = serial;
- __entry->first_soft_ack = first_soft_ack;
- __entry->call_ackr_first = call_ackr_first;
+ __entry->hard_ack = hard_ack;
__entry->prev_pkt = prev_pkt;
- __entry->call_ackr_prev = call_ackr_prev;
+ __entry->acks_hard_ack = call->acks_hard_ack;
+ __entry->acks_prev_seq = call->acks_prev_seq;
),
TP_printk("c=%08x r=%08x %08x<%08x %08x<%08x",
__entry->debug_id,
__entry->serial,
- __entry->first_soft_ack,
- __entry->call_ackr_first,
+ __entry->hard_ack,
+ __entry->acks_hard_ack,
__entry->prev_pkt,
- __entry->call_ackr_prev)
+ __entry->acks_prev_seq)
);
TRACE_EVENT(rxrpc_req_ack,
@@ -1837,9 +2308,9 @@ TRACE_EVENT(rxrpc_req_ack,
TP_ARGS(call_debug_id, seq, why),
TP_STRUCT__entry(
- __field(unsigned int, call_debug_id )
- __field(rxrpc_seq_t, seq )
- __field(enum rxrpc_req_ack_trace, why )
+ __field(unsigned int, call_debug_id)
+ __field(rxrpc_seq_t, seq)
+ __field(enum rxrpc_req_ack_trace, why)
),
TP_fast_assign(
@@ -1862,11 +2333,11 @@ TRACE_EVENT(rxrpc_txbuf,
TP_ARGS(debug_id, call_debug_id, seq, ref, what),
TP_STRUCT__entry(
- __field(unsigned int, debug_id )
- __field(unsigned int, call_debug_id )
- __field(rxrpc_seq_t, seq )
- __field(int, ref )
- __field(enum rxrpc_txbuf_trace, what )
+ __field(unsigned int, debug_id)
+ __field(unsigned int, call_debug_id)
+ __field(rxrpc_seq_t, seq)
+ __field(int, ref)
+ __field(enum rxrpc_txbuf_trace, what)
),
TP_fast_assign(
@@ -1885,6 +2356,33 @@ TRACE_EVENT(rxrpc_txbuf,
__entry->ref)
);
+TRACE_EVENT(rxrpc_tq,
+ TP_PROTO(struct rxrpc_call *call, struct rxrpc_txqueue *tq,
+ rxrpc_seq_t seq, enum rxrpc_tq_trace trace),
+
+ TP_ARGS(call, tq, seq, trace),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call_debug_id)
+ __field(rxrpc_seq_t, qbase)
+ __field(rxrpc_seq_t, seq)
+ __field(enum rxrpc_tq_trace, trace)
+ ),
+
+ TP_fast_assign(
+ __entry->call_debug_id = call->debug_id;
+ __entry->qbase = tq ? tq->qbase : call->tx_qbase;
+ __entry->seq = seq;
+ __entry->trace = trace;
+ ),
+
+ TP_printk("c=%08x bq=%08x q=%08x %s",
+ __entry->call_debug_id,
+ __entry->qbase,
+ __entry->seq,
+ __print_symbolic(__entry->trace, rxrpc_tq_traces))
+ );
+
TRACE_EVENT(rxrpc_poke_call,
TP_PROTO(struct rxrpc_call *call, bool busy,
enum rxrpc_call_poke_trace what),
@@ -1892,9 +2390,9 @@ TRACE_EVENT(rxrpc_poke_call,
TP_ARGS(call, busy, what),
TP_STRUCT__entry(
- __field(unsigned int, call_debug_id )
- __field(bool, busy )
- __field(enum rxrpc_call_poke_trace, what )
+ __field(unsigned int, call_debug_id)
+ __field(bool, busy)
+ __field(enum rxrpc_call_poke_trace, what)
),
TP_fast_assign(
@@ -1915,7 +2413,7 @@ TRACE_EVENT(rxrpc_call_poked,
TP_ARGS(call),
TP_STRUCT__entry(
- __field(unsigned int, call_debug_id )
+ __field(unsigned int, call_debug_id)
),
TP_fast_assign(
@@ -1926,6 +2424,411 @@ TRACE_EVENT(rxrpc_call_poked,
__entry->call_debug_id)
);
+TRACE_EVENT(rxrpc_sack,
+ TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t seq,
+ unsigned int sack, enum rxrpc_sack_trace what),
+
+ TP_ARGS(call, seq, sack, what),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call_debug_id)
+ __field(rxrpc_seq_t, seq)
+ __field(unsigned int, sack)
+ __field(enum rxrpc_sack_trace, what)
+ ),
+
+ TP_fast_assign(
+ __entry->call_debug_id = call->debug_id;
+ __entry->seq = seq;
+ __entry->sack = sack;
+ __entry->what = what;
+ ),
+
+ TP_printk("c=%08x q=%08x %s k=%x",
+ __entry->call_debug_id,
+ __entry->seq,
+ __print_symbolic(__entry->what, rxrpc_sack_traces),
+ __entry->sack)
+ );
+
+TRACE_EVENT(rxrpc_pmtud_tx,
+ TP_PROTO(struct rxrpc_call *call),
+
+ TP_ARGS(call),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, peer_debug_id)
+ __field(unsigned int, call_debug_id)
+ __field(rxrpc_serial_t, ping_serial)
+ __field(unsigned short, pmtud_trial)
+ __field(unsigned short, pmtud_good)
+ __field(unsigned short, pmtud_bad)
+ ),
+
+ TP_fast_assign(
+ __entry->peer_debug_id = call->peer->debug_id;
+ __entry->call_debug_id = call->debug_id;
+ __entry->ping_serial = call->conn->pmtud_probe;
+ __entry->pmtud_trial = call->peer->pmtud_trial;
+ __entry->pmtud_good = call->peer->pmtud_good;
+ __entry->pmtud_bad = call->peer->pmtud_bad;
+ ),
+
+ TP_printk("P=%08x c=%08x pr=%08x %u-%u-%u",
+ __entry->peer_debug_id,
+ __entry->call_debug_id,
+ __entry->ping_serial,
+ __entry->pmtud_good,
+ __entry->pmtud_trial,
+ __entry->pmtud_bad)
+ );
+
+TRACE_EVENT(rxrpc_pmtud_rx,
+ TP_PROTO(struct rxrpc_connection *conn, rxrpc_serial_t resp_serial),
+
+ TP_ARGS(conn, resp_serial),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, peer_debug_id)
+ __field(unsigned int, call_debug_id)
+ __field(rxrpc_serial_t, ping_serial)
+ __field(rxrpc_serial_t, resp_serial)
+ __field(unsigned short, max_data)
+ __field(u8, jumbo_max)
+ ),
+
+ TP_fast_assign(
+ __entry->peer_debug_id = conn->peer->debug_id;
+ __entry->call_debug_id = conn->pmtud_call;
+ __entry->ping_serial = conn->pmtud_probe;
+ __entry->resp_serial = resp_serial;
+ __entry->max_data = conn->peer->max_data;
+ __entry->jumbo_max = conn->peer->pmtud_jumbo;
+ ),
+
+ TP_printk("P=%08x c=%08x pr=%08x rr=%08x max=%u jm=%u",
+ __entry->peer_debug_id,
+ __entry->call_debug_id,
+ __entry->ping_serial,
+ __entry->resp_serial,
+ __entry->max_data,
+ __entry->jumbo_max)
+ );
+
+TRACE_EVENT(rxrpc_pmtud_lost,
+ TP_PROTO(struct rxrpc_connection *conn, rxrpc_serial_t resp_serial),
+
+ TP_ARGS(conn, resp_serial),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, peer_debug_id)
+ __field(unsigned int, call_debug_id)
+ __field(rxrpc_serial_t, ping_serial)
+ __field(rxrpc_serial_t, resp_serial)
+ ),
+
+ TP_fast_assign(
+ __entry->peer_debug_id = conn->peer->debug_id;
+ __entry->call_debug_id = conn->pmtud_call;
+ __entry->ping_serial = conn->pmtud_probe;
+ __entry->resp_serial = resp_serial;
+ ),
+
+ TP_printk("P=%08x c=%08x pr=%08x rr=%08x",
+ __entry->peer_debug_id,
+ __entry->call_debug_id,
+ __entry->ping_serial,
+ __entry->resp_serial)
+ );
+
+TRACE_EVENT(rxrpc_pmtud_reduce,
+ TP_PROTO(struct rxrpc_peer *peer, rxrpc_serial_t serial,
+ unsigned int max_data, enum rxrpc_pmtud_reduce_trace reason),
+
+ TP_ARGS(peer, serial, max_data, reason),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, peer_debug_id)
+ __field(rxrpc_serial_t, serial)
+ __field(unsigned int, max_data)
+ __field(enum rxrpc_pmtud_reduce_trace, reason)
+ ),
+
+ TP_fast_assign(
+ __entry->peer_debug_id = peer->debug_id;
+ __entry->serial = serial;
+ __entry->max_data = max_data;
+ __entry->reason = reason;
+ ),
+
+ TP_printk("P=%08x %s r=%08x m=%u",
+ __entry->peer_debug_id,
+ __print_symbolic(__entry->reason, rxrpc_pmtud_reduce_traces),
+ __entry->serial, __entry->max_data)
+ );
+
+TRACE_EVENT(rxrpc_rack,
+ TP_PROTO(struct rxrpc_call *call, ktime_t timo),
+
+ TP_ARGS(call, timo),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call)
+ __field(rxrpc_serial_t, ack_serial)
+ __field(rxrpc_seq_t, seq)
+ __field(enum rxrpc_rack_timer_mode, mode)
+ __field(unsigned short, nr_sent)
+ __field(unsigned short, nr_lost)
+ __field(unsigned short, nr_resent)
+ __field(unsigned short, nr_sacked)
+ __field(ktime_t, timo)
+ ),
+
+ TP_fast_assign(
+ __entry->call = call->debug_id;
+ __entry->ack_serial = call->rx_serial;
+ __entry->seq = call->rack_end_seq;
+ __entry->mode = call->rack_timer_mode;
+ __entry->nr_sent = call->tx_nr_sent;
+ __entry->nr_lost = call->tx_nr_lost;
+ __entry->nr_resent = call->tx_nr_resent;
+ __entry->nr_sacked = call->acks_nr_sacks;
+ __entry->timo = timo;
+ ),
+
+ TP_printk("c=%08x r=%08x q=%08x %s slrs=%u,%u,%u,%u t=%lld",
+ __entry->call, __entry->ack_serial, __entry->seq,
+ __print_symbolic(__entry->mode, rxrpc_rack_timer_modes),
+ __entry->nr_sent, __entry->nr_lost,
+ __entry->nr_resent, __entry->nr_sacked,
+ ktime_to_us(__entry->timo))
+ );
+
+TRACE_EVENT(rxrpc_rack_update,
+ TP_PROTO(struct rxrpc_call *call, struct rxrpc_ack_summary *summary),
+
+ TP_ARGS(call, summary),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call)
+ __field(rxrpc_serial_t, ack_serial)
+ __field(rxrpc_seq_t, seq)
+ __field(int, xmit_ts)
+ ),
+
+ TP_fast_assign(
+ __entry->call = call->debug_id;
+ __entry->ack_serial = call->rx_serial;
+ __entry->seq = call->rack_end_seq;
+ __entry->xmit_ts = ktime_sub(call->acks_latest_ts, call->rack_xmit_ts);
+ ),
+
+ TP_printk("c=%08x r=%08x q=%08x xt=%lld",
+ __entry->call, __entry->ack_serial, __entry->seq,
+ ktime_to_us(__entry->xmit_ts))
+ );
+
+TRACE_EVENT(rxrpc_rack_scan_loss,
+ TP_PROTO(struct rxrpc_call *call),
+
+ TP_ARGS(call),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call)
+ __field(ktime_t, rack_rtt)
+ __field(ktime_t, rack_reo_wnd)
+ ),
+
+ TP_fast_assign(
+ __entry->call = call->debug_id;
+ __entry->rack_rtt = call->rack_rtt;
+ __entry->rack_reo_wnd = call->rack_reo_wnd;
+ ),
+
+ TP_printk("c=%08x rtt=%lld reow=%lld",
+ __entry->call, ktime_to_us(__entry->rack_rtt),
+ ktime_to_us(__entry->rack_reo_wnd))
+ );
+
+TRACE_EVENT(rxrpc_rack_scan_loss_tq,
+ TP_PROTO(struct rxrpc_call *call, const struct rxrpc_txqueue *tq,
+ unsigned long nacks),
+
+ TP_ARGS(call, tq, nacks),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call)
+ __field(rxrpc_seq_t, qbase)
+ __field(unsigned long, nacks)
+ __field(unsigned long, lost)
+ __field(unsigned long, retrans)
+ ),
+
+ TP_fast_assign(
+ __entry->call = call->debug_id;
+ __entry->qbase = tq->qbase;
+ __entry->nacks = nacks;
+ __entry->lost = tq->segment_lost;
+ __entry->retrans = tq->segment_retransmitted;
+ ),
+
+ TP_printk("c=%08x q=%08x n=%lx l=%lx r=%lx",
+ __entry->call, __entry->qbase,
+ __entry->nacks, __entry->lost, __entry->retrans)
+ );
+
+TRACE_EVENT(rxrpc_rack_detect_loss,
+ TP_PROTO(struct rxrpc_call *call, struct rxrpc_ack_summary *summary,
+ rxrpc_seq_t seq),
+
+ TP_ARGS(call, summary, seq),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call)
+ __field(rxrpc_serial_t, ack_serial)
+ __field(rxrpc_seq_t, seq)
+ ),
+
+ TP_fast_assign(
+ __entry->call = call->debug_id;
+ __entry->ack_serial = call->rx_serial;
+ __entry->seq = seq;
+ ),
+
+ TP_printk("c=%08x r=%08x q=%08x",
+ __entry->call, __entry->ack_serial, __entry->seq)
+ );
+
+TRACE_EVENT(rxrpc_rack_mark_loss_tq,
+ TP_PROTO(struct rxrpc_call *call, const struct rxrpc_txqueue *tq),
+
+ TP_ARGS(call, tq),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call)
+ __field(rxrpc_seq_t, qbase)
+ __field(rxrpc_seq_t, trans)
+ __field(unsigned long, acked)
+ __field(unsigned long, lost)
+ __field(unsigned long, retrans)
+ ),
+
+ TP_fast_assign(
+ __entry->call = call->debug_id;
+ __entry->qbase = tq->qbase;
+ __entry->trans = call->tx_transmitted;
+ __entry->acked = tq->segment_acked;
+ __entry->lost = tq->segment_lost;
+ __entry->retrans = tq->segment_retransmitted;
+ ),
+
+ TP_printk("c=%08x tq=%08x txq=%08x a=%lx l=%lx r=%lx",
+ __entry->call, __entry->qbase, __entry->trans,
+ __entry->acked, __entry->lost, __entry->retrans)
+ );
+
+TRACE_EVENT(rxrpc_tlp_probe,
+ TP_PROTO(struct rxrpc_call *call, enum rxrpc_tlp_probe_trace trace),
+
+ TP_ARGS(call, trace),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call)
+ __field(rxrpc_serial_t, serial)
+ __field(rxrpc_seq_t, seq)
+ __field(enum rxrpc_tlp_probe_trace, trace)
+ ),
+
+ TP_fast_assign(
+ __entry->call = call->debug_id;
+ __entry->serial = call->tlp_serial;
+ __entry->seq = call->tlp_seq;
+ __entry->trace = trace;
+ ),
+
+ TP_printk("c=%08x r=%08x pq=%08x %s",
+ __entry->call, __entry->serial, __entry->seq,
+ __print_symbolic(__entry->trace, rxrpc_tlp_probe_traces))
+ );
+
+TRACE_EVENT(rxrpc_tlp_ack,
+ TP_PROTO(struct rxrpc_call *call, struct rxrpc_ack_summary *summary,
+ enum rxrpc_tlp_ack_trace trace),
+
+ TP_ARGS(call, summary, trace),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call)
+ __field(rxrpc_serial_t, serial)
+ __field(rxrpc_seq_t, tlp_seq)
+ __field(rxrpc_seq_t, hard_ack)
+ __field(enum rxrpc_tlp_ack_trace, trace)
+ ),
+
+ TP_fast_assign(
+ __entry->call = call->debug_id;
+ __entry->serial = call->tlp_serial;
+ __entry->tlp_seq = call->tlp_seq;
+ __entry->hard_ack = call->acks_hard_ack;
+ __entry->trace = trace;
+ ),
+
+ TP_printk("c=%08x r=%08x pq=%08x hq=%08x %s",
+ __entry->call, __entry->serial,
+ __entry->tlp_seq, __entry->hard_ack,
+ __print_symbolic(__entry->trace, rxrpc_tlp_ack_traces))
+ );
+
+TRACE_EVENT(rxrpc_rack_timer,
+ TP_PROTO(struct rxrpc_call *call, ktime_t delay, bool exp),
+
+ TP_ARGS(call, delay, exp),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call)
+ __field(bool, exp)
+ __field(enum rxrpc_rack_timer_mode, mode)
+ __field(ktime_t, delay)
+ ),
+
+ TP_fast_assign(
+ __entry->call = call->debug_id;
+ __entry->exp = exp;
+ __entry->mode = call->rack_timer_mode;
+ __entry->delay = delay;
+ ),
+
+ TP_printk("c=%08x %s %s to=%lld",
+ __entry->call,
+ __entry->exp ? "Exp" : "Set",
+ __print_symbolic(__entry->mode, rxrpc_rack_timer_modes),
+ ktime_to_us(__entry->delay))
+ );
+
+TRACE_EVENT(rxrpc_rxgk_rekey,
+ TP_PROTO(struct rxrpc_connection *conn,
+ unsigned int current_key, unsigned int requested_key),
+
+ TP_ARGS(conn, current_key, requested_key),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, conn)
+ __field(unsigned int, current_key)
+ __field(unsigned int, requested_key)
+ ),
+
+ TP_fast_assign(
+ __entry->conn = conn->debug_id;
+ __entry->current_key = current_key;
+ __entry->requested_key = requested_key;
+ ),
+
+ TP_printk("C=%08x cur=%x req=%x",
+ __entry->conn,
+ __entry->current_key,
+ __entry->requested_key)
+ );
+
#undef EM
#undef E_
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index fbb99a61f714..7b2645b50e78 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -20,16 +20,16 @@ TRACE_EVENT(sched_kthread_stop,
TP_ARGS(t),
TP_STRUCT__entry(
- __array( char, comm, TASK_COMM_LEN )
- __field( pid_t, pid )
+ __string( comm, t->comm )
+ __field( pid_t, pid )
),
TP_fast_assign(
- memcpy(__entry->comm, t->comm, TASK_COMM_LEN);
+ __assign_str(comm);
__entry->pid = t->pid;
),
- TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
+ TP_printk("comm=%s pid=%d", __get_str(comm), __entry->pid)
);
/*
@@ -193,9 +193,7 @@ static inline long __trace_sched_switch_state(bool preempt,
{
unsigned int state;
-#ifdef CONFIG_SCHED_DEBUG
BUG_ON(p != current);
-#endif /* CONFIG_SCHED_DEBUG */
/*
* Preemption ignores task state, therefore preempted tasks are always
@@ -239,11 +237,11 @@ TRACE_EVENT(sched_switch,
),
TP_fast_assign(
- memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
+ memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
__entry->prev_pid = prev->pid;
__entry->prev_prio = prev->prio;
__entry->prev_state = __trace_sched_switch_state(preempt, prev_state, prev);
- memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
+ memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
__entry->next_pid = next->pid;
__entry->next_prio = next->prio;
/* XXX SCHED_DEADLINE */
@@ -278,15 +276,15 @@ TRACE_EVENT(sched_migrate_task,
TP_ARGS(p, dest_cpu),
TP_STRUCT__entry(
- __array( char, comm, TASK_COMM_LEN )
- __field( pid_t, pid )
- __field( int, prio )
- __field( int, orig_cpu )
- __field( int, dest_cpu )
+ __string( comm, p->comm )
+ __field( pid_t, pid )
+ __field( int, prio )
+ __field( int, orig_cpu )
+ __field( int, dest_cpu )
),
TP_fast_assign(
- memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+ __assign_str(comm);
__entry->pid = p->pid;
__entry->prio = p->prio; /* XXX SCHED_DEADLINE */
__entry->orig_cpu = task_cpu(p);
@@ -294,7 +292,7 @@ TRACE_EVENT(sched_migrate_task,
),
TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d",
- __entry->comm, __entry->pid, __entry->prio,
+ __get_str(comm), __entry->pid, __entry->prio,
__entry->orig_cpu, __entry->dest_cpu)
);
@@ -305,19 +303,19 @@ DECLARE_EVENT_CLASS(sched_process_template,
TP_ARGS(p),
TP_STRUCT__entry(
- __array( char, comm, TASK_COMM_LEN )
- __field( pid_t, pid )
- __field( int, prio )
+ __string( comm, p->comm )
+ __field( pid_t, pid )
+ __field( int, prio )
),
TP_fast_assign(
- memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+ __assign_str(comm);
__entry->pid = p->pid;
__entry->prio = p->prio; /* XXX SCHED_DEADLINE */
),
TP_printk("comm=%s pid=%d prio=%d",
- __entry->comm, __entry->pid, __entry->prio)
+ __get_str(comm), __entry->pid, __entry->prio)
);
/*
@@ -328,11 +326,37 @@ DEFINE_EVENT(sched_process_template, sched_process_free,
TP_ARGS(p));
/*
- * Tracepoint for a task exiting:
+ * Tracepoint for a task exiting.
+ * Note, it's a superset of sched_process_template and should be kept
+ * compatible as much as possible. sched_process_exits has an extra
+ * `group_dead` argument, so sched_process_template can't be used,
+ * unfortunately, just like sched_migrate_task above.
*/
-DEFINE_EVENT(sched_process_template, sched_process_exit,
- TP_PROTO(struct task_struct *p),
- TP_ARGS(p));
+TRACE_EVENT(sched_process_exit,
+
+ TP_PROTO(struct task_struct *p, bool group_dead),
+
+ TP_ARGS(p, group_dead),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( int, prio )
+ __field( bool, group_dead )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+ __entry->pid = p->pid;
+ __entry->prio = p->prio; /* XXX SCHED_DEADLINE */
+ __entry->group_dead = group_dead;
+ ),
+
+ TP_printk("comm=%s pid=%d prio=%d group_dead=%s",
+ __entry->comm, __entry->pid, __entry->prio,
+ __entry->group_dead ? "true" : "false"
+ )
+);
/*
* Tracepoint for waiting on task to unschedule:
@@ -351,19 +375,19 @@ TRACE_EVENT(sched_process_wait,
TP_ARGS(pid),
TP_STRUCT__entry(
- __array( char, comm, TASK_COMM_LEN )
+ __string( comm, current->comm )
__field( pid_t, pid )
__field( int, prio )
),
TP_fast_assign(
- memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
+ __assign_str(comm);
__entry->pid = pid_nr(pid);
__entry->prio = current->prio; /* XXX SCHED_DEADLINE */
),
TP_printk("comm=%s pid=%d prio=%d",
- __entry->comm, __entry->pid, __entry->prio)
+ __get_str(comm), __entry->pid, __entry->prio)
);
/*
@@ -376,22 +400,22 @@ TRACE_EVENT(sched_process_fork,
TP_ARGS(parent, child),
TP_STRUCT__entry(
- __array( char, parent_comm, TASK_COMM_LEN )
- __field( pid_t, parent_pid )
- __array( char, child_comm, TASK_COMM_LEN )
- __field( pid_t, child_pid )
+ __string( parent_comm, parent->comm )
+ __field( pid_t, parent_pid )
+ __string( child_comm, child->comm )
+ __field( pid_t, child_pid )
),
TP_fast_assign(
- memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN);
+ __assign_str(parent_comm);
__entry->parent_pid = parent->pid;
- memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN);
+ __assign_str(child_comm);
__entry->child_pid = child->pid;
),
TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d",
- __entry->parent_comm, __entry->parent_pid,
- __entry->child_comm, __entry->child_pid)
+ __get_str(parent_comm), __entry->parent_pid,
+ __get_str(child_comm), __entry->child_pid)
);
/*
@@ -411,7 +435,7 @@ TRACE_EVENT(sched_process_exec,
),
TP_fast_assign(
- __assign_str(filename, bprm->filename);
+ __assign_str(filename);
__entry->pid = p->pid;
__entry->old_pid = old_pid;
),
@@ -420,6 +444,41 @@ TRACE_EVENT(sched_process_exec,
__entry->pid, __entry->old_pid)
);
+/**
+ * sched_prepare_exec - called before setting up new exec
+ * @task: pointer to the current task
+ * @bprm: pointer to linux_binprm used for new exec
+ *
+ * Called before flushing the old exec, where @task is still unchanged, but at
+ * the point of no return during switching to the new exec. At the point it is
+ * called the exec will either succeed, or on failure terminate the task. Also
+ * see the "sched_process_exec" tracepoint, which is called right after @task
+ * has successfully switched to the new exec.
+ */
+TRACE_EVENT(sched_prepare_exec,
+
+ TP_PROTO(struct task_struct *task, struct linux_binprm *bprm),
+
+ TP_ARGS(task, bprm),
+
+ TP_STRUCT__entry(
+ __string( interp, bprm->interp )
+ __string( filename, bprm->filename )
+ __field( pid_t, pid )
+ __string( comm, task->comm )
+ ),
+
+ TP_fast_assign(
+ __assign_str(interp);
+ __assign_str(filename);
+ __entry->pid = task->pid;
+ __assign_str(comm);
+ ),
+
+ TP_printk("interp=%s filename=%s pid=%d comm=%s",
+ __get_str(interp), __get_str(filename),
+ __entry->pid, __get_str(comm))
+);
#ifdef CONFIG_SCHEDSTATS
#define DEFINE_EVENT_SCHEDSTAT DEFINE_EVENT
@@ -440,19 +499,19 @@ DECLARE_EVENT_CLASS_SCHEDSTAT(sched_stat_template,
TP_ARGS(__perf_task(tsk), __perf_count(delay)),
TP_STRUCT__entry(
- __array( char, comm, TASK_COMM_LEN )
- __field( pid_t, pid )
- __field( u64, delay )
+ __string( comm, tsk->comm )
+ __field( pid_t, pid )
+ __field( u64, delay )
),
TP_fast_assign(
- memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+ __assign_str(comm);
__entry->pid = tsk->pid;
__entry->delay = delay;
),
TP_printk("comm=%s pid=%d delay=%Lu [ns]",
- __entry->comm, __entry->pid,
+ __get_str(comm), __entry->pid,
(unsigned long long)__entry->delay)
);
@@ -493,33 +552,30 @@ DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_blocked,
*/
DECLARE_EVENT_CLASS(sched_stat_runtime,
- TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
+ TP_PROTO(struct task_struct *tsk, u64 runtime),
- TP_ARGS(tsk, __perf_count(runtime), vruntime),
+ TP_ARGS(tsk, __perf_count(runtime)),
TP_STRUCT__entry(
- __array( char, comm, TASK_COMM_LEN )
- __field( pid_t, pid )
- __field( u64, runtime )
- __field( u64, vruntime )
+ __string( comm, tsk->comm )
+ __field( pid_t, pid )
+ __field( u64, runtime )
),
TP_fast_assign(
- memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+ __assign_str(comm);
__entry->pid = tsk->pid;
__entry->runtime = runtime;
- __entry->vruntime = vruntime;
),
- TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
- __entry->comm, __entry->pid,
- (unsigned long long)__entry->runtime,
- (unsigned long long)__entry->vruntime)
+ TP_printk("comm=%s pid=%d runtime=%Lu [ns]",
+ __get_str(comm), __entry->pid,
+ (unsigned long long)__entry->runtime)
);
DEFINE_EVENT(sched_stat_runtime, sched_stat_runtime,
- TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
- TP_ARGS(tsk, runtime, vruntime));
+ TP_PROTO(struct task_struct *tsk, u64 runtime),
+ TP_ARGS(tsk, runtime));
/*
* Tracepoint for showing priority inheritance modifying a tasks
@@ -532,14 +588,14 @@ TRACE_EVENT(sched_pi_setprio,
TP_ARGS(tsk, pi_task),
TP_STRUCT__entry(
- __array( char, comm, TASK_COMM_LEN )
- __field( pid_t, pid )
- __field( int, oldprio )
- __field( int, newprio )
+ __string( comm, tsk->comm )
+ __field( pid_t, pid )
+ __field( int, oldprio )
+ __field( int, newprio )
),
TP_fast_assign(
- memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+ __assign_str(comm);
__entry->pid = tsk->pid;
__entry->oldprio = tsk->prio;
__entry->newprio = pi_task ?
@@ -549,7 +605,7 @@ TRACE_EVENT(sched_pi_setprio,
),
TP_printk("comm=%s pid=%d oldprio=%d newprio=%d",
- __entry->comm, __entry->pid,
+ __get_str(comm), __entry->pid,
__entry->oldprio, __entry->newprio)
);
@@ -559,19 +615,20 @@ TRACE_EVENT(sched_process_hang,
TP_ARGS(tsk),
TP_STRUCT__entry(
- __array( char, comm, TASK_COMM_LEN )
- __field( pid_t, pid )
+ __string( comm, tsk->comm )
+ __field( pid_t, pid )
),
TP_fast_assign(
- memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+ __assign_str(comm);
__entry->pid = tsk->pid;
),
- TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
+ TP_printk("comm=%s pid=%d", __get_str(comm), __entry->pid)
);
#endif /* CONFIG_DETECT_HUNG_TASK */
+#ifdef CONFIG_NUMA_BALANCING
/*
* Tracks migration of tasks from one runqueue to another. Can be used to
* detect if automatic NUMA balancing is bouncing between nodes.
@@ -664,6 +721,90 @@ DEFINE_EVENT(sched_numa_pair_template, sched_swap_numa,
TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu)
);
+#define NUMAB_SKIP_REASON \
+ EM( NUMAB_SKIP_UNSUITABLE, "unsuitable" ) \
+ EM( NUMAB_SKIP_SHARED_RO, "shared_ro" ) \
+ EM( NUMAB_SKIP_INACCESSIBLE, "inaccessible" ) \
+ EM( NUMAB_SKIP_SCAN_DELAY, "scan_delay" ) \
+ EM( NUMAB_SKIP_PID_INACTIVE, "pid_inactive" ) \
+ EM( NUMAB_SKIP_IGNORE_PID, "ignore_pid_inactive" ) \
+ EMe(NUMAB_SKIP_SEQ_COMPLETED, "seq_completed" )
+
+/* Redefine for export. */
+#undef EM
+#undef EMe
+#define EM(a, b) TRACE_DEFINE_ENUM(a);
+#define EMe(a, b) TRACE_DEFINE_ENUM(a);
+
+NUMAB_SKIP_REASON
+
+/* Redefine for symbolic printing. */
+#undef EM
+#undef EMe
+#define EM(a, b) { a, b },
+#define EMe(a, b) { a, b }
+
+TRACE_EVENT(sched_skip_vma_numa,
+
+ TP_PROTO(struct mm_struct *mm, struct vm_area_struct *vma,
+ enum numa_vmaskip_reason reason),
+
+ TP_ARGS(mm, vma, reason),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, numa_scan_offset)
+ __field(unsigned long, vm_start)
+ __field(unsigned long, vm_end)
+ __field(enum numa_vmaskip_reason, reason)
+ ),
+
+ TP_fast_assign(
+ __entry->numa_scan_offset = mm->numa_scan_offset;
+ __entry->vm_start = vma->vm_start;
+ __entry->vm_end = vma->vm_end;
+ __entry->reason = reason;
+ ),
+
+ TP_printk("numa_scan_offset=%lX vm_start=%lX vm_end=%lX reason=%s",
+ __entry->numa_scan_offset,
+ __entry->vm_start,
+ __entry->vm_end,
+ __print_symbolic(__entry->reason, NUMAB_SKIP_REASON))
+);
+
+TRACE_EVENT(sched_skip_cpuset_numa,
+
+ TP_PROTO(struct task_struct *tsk, nodemask_t *mem_allowed_ptr),
+
+ TP_ARGS(tsk, mem_allowed_ptr),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( pid_t, tgid )
+ __field( pid_t, ngid )
+ __array( unsigned long, mem_allowed, BITS_TO_LONGS(MAX_NUMNODES))
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+ __entry->pid = task_pid_nr(tsk);
+ __entry->tgid = task_tgid_nr(tsk);
+ __entry->ngid = task_numa_group_id(tsk);
+ BUILD_BUG_ON(sizeof(nodemask_t) != \
+ BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long));
+ memcpy(__entry->mem_allowed, mem_allowed_ptr->bits,
+ sizeof(__entry->mem_allowed));
+ ),
+
+ TP_printk("comm=%s pid=%d tgid=%d ngid=%d mem_nodes_allowed=%*pbl",
+ __entry->comm,
+ __entry->pid,
+ __entry->tgid,
+ __entry->ngid,
+ MAX_NUMNODES, __entry->mem_allowed)
+);
+#endif /* CONFIG_NUMA_BALANCING */
/*
* Tracepoint for waking a polling cpu without an IPI.
@@ -688,53 +829,73 @@ TRACE_EVENT(sched_wake_idle_without_ipi,
/*
* Following tracepoints are not exported in tracefs and provide hooking
* mechanisms only for testing and debugging purposes.
- *
- * Postfixed with _tp to make them easily identifiable in the code.
*/
-DECLARE_TRACE(pelt_cfs_tp,
+DECLARE_TRACE(pelt_cfs,
TP_PROTO(struct cfs_rq *cfs_rq),
TP_ARGS(cfs_rq));
-DECLARE_TRACE(pelt_rt_tp,
+DECLARE_TRACE(pelt_rt,
TP_PROTO(struct rq *rq),
TP_ARGS(rq));
-DECLARE_TRACE(pelt_dl_tp,
+DECLARE_TRACE(pelt_dl,
TP_PROTO(struct rq *rq),
TP_ARGS(rq));
-DECLARE_TRACE(pelt_thermal_tp,
+DECLARE_TRACE(pelt_hw,
TP_PROTO(struct rq *rq),
TP_ARGS(rq));
-DECLARE_TRACE(pelt_irq_tp,
+DECLARE_TRACE(pelt_irq,
TP_PROTO(struct rq *rq),
TP_ARGS(rq));
-DECLARE_TRACE(pelt_se_tp,
+DECLARE_TRACE(pelt_se,
TP_PROTO(struct sched_entity *se),
TP_ARGS(se));
-DECLARE_TRACE(sched_cpu_capacity_tp,
+DECLARE_TRACE(sched_cpu_capacity,
TP_PROTO(struct rq *rq),
TP_ARGS(rq));
-DECLARE_TRACE(sched_overutilized_tp,
+DECLARE_TRACE(sched_overutilized,
TP_PROTO(struct root_domain *rd, bool overutilized),
TP_ARGS(rd, overutilized));
-DECLARE_TRACE(sched_util_est_cfs_tp,
+DECLARE_TRACE(sched_util_est_cfs,
TP_PROTO(struct cfs_rq *cfs_rq),
TP_ARGS(cfs_rq));
-DECLARE_TRACE(sched_util_est_se_tp,
+DECLARE_TRACE(sched_util_est_se,
TP_PROTO(struct sched_entity *se),
TP_ARGS(se));
-DECLARE_TRACE(sched_update_nr_running_tp,
+DECLARE_TRACE(sched_update_nr_running,
TP_PROTO(struct rq *rq, int change),
TP_ARGS(rq, change));
+DECLARE_TRACE(sched_compute_energy,
+ TP_PROTO(struct task_struct *p, int dst_cpu, unsigned long energy,
+ unsigned long max_util, unsigned long busy_time),
+ TP_ARGS(p, dst_cpu, energy, max_util, busy_time));
+
+DECLARE_TRACE(sched_entry,
+ TP_PROTO(bool preempt),
+ TP_ARGS(preempt));
+
+DECLARE_TRACE(sched_exit,
+ TP_PROTO(bool is_switch),
+ TP_ARGS(is_switch));
+
+DECLARE_TRACE_CONDITION(sched_set_state,
+ TP_PROTO(struct task_struct *tsk, int state),
+ TP_ARGS(tsk, state),
+ TP_CONDITION(!!(tsk->__state) != !!state));
+
+DECLARE_TRACE(sched_set_need_resched,
+ TP_PROTO(struct task_struct *tsk, int cpu, int tif),
+ TP_ARGS(tsk, cpu, tif));
+
#endif /* _TRACE_SCHED_H */
/* This part must be outside protection */
diff --git a/include/trace/events/sched_ext.h b/include/trace/events/sched_ext.h
new file mode 100644
index 000000000000..d1bf5acd59c5
--- /dev/null
+++ b/include/trace/events/sched_ext.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM sched_ext
+
+#if !defined(_TRACE_SCHED_EXT_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SCHED_EXT_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(sched_ext_dump,
+
+ TP_PROTO(const char *line),
+
+ TP_ARGS(line),
+
+ TP_STRUCT__entry(
+ __string(line, line)
+ ),
+
+ TP_fast_assign(
+ __assign_str(line);
+ ),
+
+ TP_printk("%s",
+ __get_str(line)
+ )
+);
+
+TRACE_EVENT(sched_ext_event,
+ TP_PROTO(const char *name, __s64 delta),
+ TP_ARGS(name, delta),
+
+ TP_STRUCT__entry(
+ __string(name, name)
+ __field( __s64, delta )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name);
+ __entry->delta = delta;
+ ),
+
+ TP_printk("name %s delta %lld",
+ __get_str(name), __entry->delta
+ )
+);
+
+TRACE_EVENT(sched_ext_bypass_lb,
+
+ TP_PROTO(__u32 node, __u32 nr_cpus, __u32 nr_tasks, __u32 nr_balanced,
+ __u32 before_min, __u32 before_max,
+ __u32 after_min, __u32 after_max),
+
+ TP_ARGS(node, nr_cpus, nr_tasks, nr_balanced,
+ before_min, before_max, after_min, after_max),
+
+ TP_STRUCT__entry(
+ __field( __u32, node )
+ __field( __u32, nr_cpus )
+ __field( __u32, nr_tasks )
+ __field( __u32, nr_balanced )
+ __field( __u32, before_min )
+ __field( __u32, before_max )
+ __field( __u32, after_min )
+ __field( __u32, after_max )
+ ),
+
+ TP_fast_assign(
+ __entry->node = node;
+ __entry->nr_cpus = nr_cpus;
+ __entry->nr_tasks = nr_tasks;
+ __entry->nr_balanced = nr_balanced;
+ __entry->before_min = before_min;
+ __entry->before_max = before_max;
+ __entry->after_min = after_min;
+ __entry->after_max = after_max;
+ ),
+
+ TP_printk("node %u: nr_cpus=%u nr_tasks=%u nr_balanced=%u min=%u->%u max=%u->%u",
+ __entry->node, __entry->nr_cpus,
+ __entry->nr_tasks, __entry->nr_balanced,
+ __entry->before_min, __entry->after_min,
+ __entry->before_max, __entry->after_max
+ )
+);
+
+#endif /* _TRACE_SCHED_EXT_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/scmi.h b/include/trace/events/scmi.h
index f160d68f961d..703b7bb68e44 100644
--- a/include/trace/events/scmi.h
+++ b/include/trace/events/scmi.h
@@ -7,6 +7,8 @@
#include <linux/tracepoint.h>
+#define TRACE_SCMI_MAX_TAG_LEN 6
+
TRACE_EVENT(scmi_fc_call,
TP_PROTO(u8 protocol_id, u8 msg_id, u32 res_id, u32 val1, u32 val2),
TP_ARGS(protocol_id, msg_id, res_id, val1, val2),
@@ -34,8 +36,8 @@ TRACE_EVENT(scmi_fc_call,
TRACE_EVENT(scmi_xfer_begin,
TP_PROTO(int transfer_id, u8 msg_id, u8 protocol_id, u16 seq,
- bool poll),
- TP_ARGS(transfer_id, msg_id, protocol_id, seq, poll),
+ bool poll, int inflight),
+ TP_ARGS(transfer_id, msg_id, protocol_id, seq, poll, inflight),
TP_STRUCT__entry(
__field(int, transfer_id)
@@ -43,6 +45,7 @@ TRACE_EVENT(scmi_xfer_begin,
__field(u8, protocol_id)
__field(u16, seq)
__field(bool, poll)
+ __field(int, inflight)
),
TP_fast_assign(
@@ -51,11 +54,12 @@ TRACE_EVENT(scmi_xfer_begin,
__entry->protocol_id = protocol_id;
__entry->seq = seq;
__entry->poll = poll;
+ __entry->inflight = inflight;
),
- TP_printk("pt=%02X msg_id=%02X seq=%04X transfer_id=%X poll=%u",
- __entry->protocol_id, __entry->msg_id, __entry->seq,
- __entry->transfer_id, __entry->poll)
+ TP_printk("pt=%02X msg_id=%02X seq=%04X transfer_id=%X poll=%u inflight=%d",
+ __entry->protocol_id, __entry->msg_id, __entry->seq,
+ __entry->transfer_id, __entry->poll, __entry->inflight)
);
TRACE_EVENT(scmi_xfer_response_wait,
@@ -88,8 +92,8 @@ TRACE_EVENT(scmi_xfer_response_wait,
TRACE_EVENT(scmi_xfer_end,
TP_PROTO(int transfer_id, u8 msg_id, u8 protocol_id, u16 seq,
- int status),
- TP_ARGS(transfer_id, msg_id, protocol_id, seq, status),
+ int status, int inflight),
+ TP_ARGS(transfer_id, msg_id, protocol_id, seq, status, inflight),
TP_STRUCT__entry(
__field(int, transfer_id)
@@ -97,6 +101,7 @@ TRACE_EVENT(scmi_xfer_end,
__field(u8, protocol_id)
__field(u16, seq)
__field(int, status)
+ __field(int, inflight)
),
TP_fast_assign(
@@ -105,11 +110,12 @@ TRACE_EVENT(scmi_xfer_end,
__entry->protocol_id = protocol_id;
__entry->seq = seq;
__entry->status = status;
+ __entry->inflight = inflight;
),
- TP_printk("pt=%02X msg_id=%02X seq=%04X transfer_id=%X s=%d",
- __entry->protocol_id, __entry->msg_id, __entry->seq,
- __entry->transfer_id, __entry->status)
+ TP_printk("pt=%02X msg_id=%02X seq=%04X transfer_id=%X s=%d inflight=%d",
+ __entry->protocol_id, __entry->msg_id, __entry->seq,
+ __entry->transfer_id, __entry->status, __entry->inflight)
);
TRACE_EVENT(scmi_rx_done,
@@ -139,14 +145,18 @@ TRACE_EVENT(scmi_rx_done,
);
TRACE_EVENT(scmi_msg_dump,
- TP_PROTO(u8 protocol_id, u8 msg_id, unsigned char *tag, u16 seq,
- int status, void *buf, size_t len),
- TP_ARGS(protocol_id, msg_id, tag, seq, status, buf, len),
+ TP_PROTO(int id, u8 channel_id, u8 protocol_id, u8 msg_id,
+ unsigned char *tag, u16 seq, int status,
+ void *buf, size_t len),
+ TP_ARGS(id, channel_id, protocol_id, msg_id, tag, seq, status,
+ buf, len),
TP_STRUCT__entry(
+ __field(int, id)
+ __field(u8, channel_id)
__field(u8, protocol_id)
__field(u8, msg_id)
- __array(char, tag, 5)
+ __array(char, tag, TRACE_SCMI_MAX_TAG_LEN)
__field(u16, seq)
__field(int, status)
__field(size_t, len)
@@ -154,18 +164,20 @@ TRACE_EVENT(scmi_msg_dump,
),
TP_fast_assign(
+ __entry->id = id;
+ __entry->channel_id = channel_id;
__entry->protocol_id = protocol_id;
__entry->msg_id = msg_id;
- strscpy(__entry->tag, tag, 5);
+ strscpy(__entry->tag, tag, TRACE_SCMI_MAX_TAG_LEN);
__entry->seq = seq;
__entry->status = status;
__entry->len = len;
memcpy(__get_dynamic_array(cmd), buf, __entry->len);
),
- TP_printk("pt=%02X t=%s msg_id=%02X seq=%04X s=%d pyld=%s",
- __entry->protocol_id, __entry->tag, __entry->msg_id,
- __entry->seq, __entry->status,
+ TP_printk("id=%d ch=%02X pt=%02X t=%s msg_id=%02X seq=%04X s=%d pyld=%s",
+ __entry->id, __entry->channel_id, __entry->protocol_id,
+ __entry->tag, __entry->msg_id, __entry->seq, __entry->status,
__print_hex_str(__get_dynamic_array(cmd), __entry->len))
);
#endif /* _TRACE_SCMI_H */
diff --git a/include/trace/events/scsi.h b/include/trace/events/scsi.h
index a2c7befd451a..c36c72ab7f2b 100644
--- a/include/trace/events/scsi.h
+++ b/include/trace/events/scsi.h
@@ -29,8 +29,8 @@
scsi_opcode_name(INQUIRY), \
scsi_opcode_name(RECOVER_BUFFERED_DATA), \
scsi_opcode_name(MODE_SELECT), \
- scsi_opcode_name(RESERVE), \
- scsi_opcode_name(RELEASE), \
+ scsi_opcode_name(RESERVE_6), \
+ scsi_opcode_name(RELEASE_6), \
scsi_opcode_name(COPY), \
scsi_opcode_name(ERASE), \
scsi_opcode_name(MODE_SENSE), \
@@ -102,6 +102,7 @@
scsi_opcode_name(WRITE_32), \
scsi_opcode_name(WRITE_SAME_32), \
scsi_opcode_name(ATA_16), \
+ scsi_opcode_name(WRITE_ATOMIC_16), \
scsi_opcode_name(ATA_12))
#define scsi_hostbyte_name(result) { result, #result }
@@ -199,6 +200,14 @@ TRACE_EVENT(scsi_dispatch_cmd_start,
__print_hex(__get_dynamic_array(cmnd), __entry->cmd_len))
);
+#define scsi_rtn_name(result) { result, #result }
+#define show_rtn_name(val) \
+ __print_symbolic(val, \
+ scsi_rtn_name(SCSI_MLQUEUE_HOST_BUSY), \
+ scsi_rtn_name(SCSI_MLQUEUE_DEVICE_BUSY), \
+ scsi_rtn_name(SCSI_MLQUEUE_EH_RETRY), \
+ scsi_rtn_name(SCSI_MLQUEUE_TARGET_BUSY))
+
TRACE_EVENT(scsi_dispatch_cmd_error,
TP_PROTO(struct scsi_cmnd *cmd, int rtn),
@@ -239,14 +248,15 @@ TRACE_EVENT(scsi_dispatch_cmd_error,
TP_printk("host_no=%u channel=%u id=%u lun=%u data_sgl=%u prot_sgl=%u" \
" prot_op=%s driver_tag=%d scheduler_tag=%d cmnd=(%s %s raw=%s)" \
- " rtn=%d",
+ " rtn=%s",
__entry->host_no, __entry->channel, __entry->id,
__entry->lun, __entry->data_sglen, __entry->prot_sglen,
show_prot_op_name(__entry->prot_op), __entry->driver_tag,
__entry->scheduler_tag, show_opcode_name(__entry->opcode),
__parse_cdb(__get_dynamic_array(cmnd), __entry->cmd_len),
__print_hex(__get_dynamic_array(cmnd), __entry->cmd_len),
- __entry->rtn)
+ show_rtn_name(__entry->rtn)
+ )
);
DECLARE_EVENT_CLASS(scsi_cmd_done_timeout_template,
@@ -269,9 +279,14 @@ DECLARE_EVENT_CLASS(scsi_cmd_done_timeout_template,
__field( unsigned int, prot_sglen )
__field( unsigned char, prot_op )
__dynamic_array(unsigned char, cmnd, cmd->cmd_len)
+ __field( u8, sense_key )
+ __field( u8, asc )
+ __field( u8, ascq )
),
TP_fast_assign(
+ struct scsi_sense_hdr sshdr;
+
__entry->host_no = cmd->device->host->host_no;
__entry->channel = cmd->device->channel;
__entry->id = cmd->device->id;
@@ -285,11 +300,22 @@ DECLARE_EVENT_CLASS(scsi_cmd_done_timeout_template,
__entry->prot_sglen = scsi_prot_sg_count(cmd);
__entry->prot_op = scsi_get_prot_op(cmd);
memcpy(__get_dynamic_array(cmnd), cmd->cmnd, cmd->cmd_len);
+ if (cmd->sense_buffer && SCSI_SENSE_VALID(cmd) &&
+ scsi_command_normalize_sense(cmd, &sshdr)) {
+ __entry->sense_key = sshdr.sense_key;
+ __entry->asc = sshdr.asc;
+ __entry->ascq = sshdr.ascq;
+ } else {
+ __entry->sense_key = 0;
+ __entry->asc = 0;
+ __entry->ascq = 0;
+ }
),
TP_printk("host_no=%u channel=%u id=%u lun=%u data_sgl=%u prot_sgl=%u " \
"prot_op=%s driver_tag=%d scheduler_tag=%d cmnd=(%s %s raw=%s) " \
- "result=(driver=%s host=%s message=%s status=%s)",
+ "result=(driver=%s host=%s message=%s status=%s) "
+ "sense=(key=%#x asc=%#x ascq=%#x)",
__entry->host_no, __entry->channel, __entry->id,
__entry->lun, __entry->data_sglen, __entry->prot_sglen,
show_prot_op_name(__entry->prot_op), __entry->driver_tag,
@@ -299,7 +325,8 @@ DECLARE_EVENT_CLASS(scsi_cmd_done_timeout_template,
"DRIVER_OK",
show_hostbyte_name(((__entry->result) >> 16) & 0xff),
"COMMAND_COMPLETE",
- show_statusbyte_name(__entry->result & 0xff))
+ show_statusbyte_name(__entry->result & 0xff),
+ __entry->sense_key, __entry->asc, __entry->ascq)
);
DEFINE_EVENT(scsi_cmd_done_timeout_template, scsi_dispatch_cmd_done,
diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h
index 25ab1ff9423d..b877133cd93a 100644
--- a/include/trace/events/skb.h
+++ b/include/trace/events/skb.h
@@ -24,13 +24,14 @@ DEFINE_DROP_REASON(FN, FN)
TRACE_EVENT(kfree_skb,
TP_PROTO(struct sk_buff *skb, void *location,
- enum skb_drop_reason reason),
+ enum skb_drop_reason reason, struct sock *rx_sk),
- TP_ARGS(skb, location, reason),
+ TP_ARGS(skb, location, reason, rx_sk),
TP_STRUCT__entry(
__field(void *, skbaddr)
__field(void *, location)
+ __field(void *, rx_sk)
__field(unsigned short, protocol)
__field(enum skb_drop_reason, reason)
),
@@ -38,12 +39,14 @@ TRACE_EVENT(kfree_skb,
TP_fast_assign(
__entry->skbaddr = skb;
__entry->location = location;
+ __entry->rx_sk = rx_sk;
__entry->protocol = ntohs(skb->protocol);
__entry->reason = reason;
),
- TP_printk("skbaddr=%p protocol=%u location=%pS reason: %s",
- __entry->skbaddr, __entry->protocol, __entry->location,
+ TP_printk("skbaddr=%p rx_sk=%p protocol=%u location=%pS reason: %s",
+ __entry->skbaddr, __entry->rx_sk, __entry->protocol,
+ __entry->location,
__print_symbolic(__entry->reason,
DEFINE_DROP_REASON(FN, FNe)))
);
@@ -53,19 +56,21 @@ TRACE_EVENT(kfree_skb,
TRACE_EVENT(consume_skb,
- TP_PROTO(struct sk_buff *skb),
+ TP_PROTO(struct sk_buff *skb, void *location),
- TP_ARGS(skb),
+ TP_ARGS(skb, location),
TP_STRUCT__entry(
- __field( void *, skbaddr )
+ __field( void *, skbaddr)
+ __field( void *, location)
),
TP_fast_assign(
__entry->skbaddr = skb;
+ __entry->location = location;
),
- TP_printk("skbaddr=%p", __entry->skbaddr)
+ TP_printk("skbaddr=%p location=%pS", __entry->skbaddr, __entry->location)
);
TRACE_EVENT(skb_copy_datagram_iovec,
diff --git a/include/trace/events/sock.h b/include/trace/events/sock.h
index 777ee6cbe933..b5310439536e 100644
--- a/include/trace/events/sock.h
+++ b/include/trace/events/sock.h
@@ -10,6 +10,7 @@
#include <linux/tracepoint.h>
#include <linux/ipv6.h>
#include <linux/tcp.h>
+#include <trace/events/net_probe_common.h>
#define family_names \
EM(AF_INET) \
@@ -18,7 +19,6 @@
/* The protocol traced by inet_sock_set_state */
#define inet_protocol_names \
EM(IPPROTO_TCP) \
- EM(IPPROTO_DCCP) \
EM(IPPROTO_SCTP) \
EMe(IPPROTO_MPTCP)
@@ -109,7 +109,7 @@ TRACE_EVENT(sock_exceed_buf_limit,
),
TP_fast_assign(
- strncpy(__entry->name, prot->name, 32);
+ strscpy(__entry->name, prot->name, 32);
__entry->sysctl_mem[0] = READ_ONCE(prot->sysctl_mem[0]);
__entry->sysctl_mem[1] = READ_ONCE(prot->sysctl_mem[1]);
__entry->sysctl_mem[2] = READ_ONCE(prot->sysctl_mem[2]);
@@ -158,8 +158,7 @@ TRACE_EVENT(inet_sock_set_state,
),
TP_fast_assign(
- struct inet_sock *inet = inet_sk(sk);
- struct in6_addr *pin6;
+ const struct inet_sock *inet = inet_sk(sk);
__be32 *p32;
__entry->skaddr = sk;
@@ -177,20 +176,8 @@ TRACE_EVENT(inet_sock_set_state,
p32 = (__be32 *) __entry->daddr;
*p32 = inet->inet_daddr;
-#if IS_ENABLED(CONFIG_IPV6)
- if (sk->sk_family == AF_INET6) {
- pin6 = (struct in6_addr *)__entry->saddr_v6;
- *pin6 = sk->sk_v6_rcv_saddr;
- pin6 = (struct in6_addr *)__entry->daddr_v6;
- *pin6 = sk->sk_v6_daddr;
- } else
-#endif
- {
- pin6 = (struct in6_addr *)__entry->saddr_v6;
- ipv6_addr_set_v4mapped(inet->inet_saddr, pin6);
- pin6 = (struct in6_addr *)__entry->daddr_v6;
- ipv6_addr_set_v4mapped(inet->inet_daddr, pin6);
- }
+ TP_STORE_ADDRS(__entry, inet->inet_saddr, inet->inet_daddr,
+ sk->sk_v6_rcv_saddr, sk->sk_v6_daddr);
),
TP_printk("family=%s protocol=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c oldstate=%s newstate=%s",
@@ -222,8 +209,7 @@ TRACE_EVENT(inet_sk_error_report,
),
TP_fast_assign(
- struct inet_sock *inet = inet_sk(sk);
- struct in6_addr *pin6;
+ const struct inet_sock *inet = inet_sk(sk);
__be32 *p32;
__entry->error = sk->sk_err;
@@ -238,20 +224,8 @@ TRACE_EVENT(inet_sk_error_report,
p32 = (__be32 *) __entry->daddr;
*p32 = inet->inet_daddr;
-#if IS_ENABLED(CONFIG_IPV6)
- if (sk->sk_family == AF_INET6) {
- pin6 = (struct in6_addr *)__entry->saddr_v6;
- *pin6 = sk->sk_v6_rcv_saddr;
- pin6 = (struct in6_addr *)__entry->daddr_v6;
- *pin6 = sk->sk_v6_daddr;
- } else
-#endif
- {
- pin6 = (struct in6_addr *)__entry->saddr_v6;
- ipv6_addr_set_v4mapped(inet->inet_saddr, pin6);
- pin6 = (struct in6_addr *)__entry->daddr_v6;
- ipv6_addr_set_v4mapped(inet->inet_daddr, pin6);
- }
+ TP_STORE_ADDRS(__entry, inet->inet_saddr, inet->inet_daddr,
+ sk->sk_v6_rcv_saddr, sk->sk_v6_daddr);
),
TP_printk("family=%s protocol=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c error=%d",
@@ -263,6 +237,75 @@ TRACE_EVENT(inet_sk_error_report,
__entry->error)
);
+TRACE_EVENT(sk_data_ready,
+
+ TP_PROTO(const struct sock *sk),
+
+ TP_ARGS(sk),
+
+ TP_STRUCT__entry(
+ __field(const void *, skaddr)
+ __field(__u16, family)
+ __field(__u16, protocol)
+ __field(unsigned long, ip)
+ ),
+
+ TP_fast_assign(
+ __entry->skaddr = sk;
+ __entry->family = sk->sk_family;
+ __entry->protocol = sk->sk_protocol;
+ __entry->ip = _RET_IP_;
+ ),
+
+ TP_printk("family=%u protocol=%u func=%ps",
+ __entry->family, __entry->protocol, (void *)__entry->ip)
+);
+
+/*
+ * sock send/recv msg length
+ */
+DECLARE_EVENT_CLASS(sock_msg_length,
+
+ TP_PROTO(struct sock *sk, int ret, int flags),
+
+ TP_ARGS(sk, ret, flags),
+
+ TP_STRUCT__entry(
+ __field(void *, sk)
+ __field(__u16, family)
+ __field(__u16, protocol)
+ __field(int, ret)
+ __field(int, flags)
+ ),
+
+ TP_fast_assign(
+ __entry->sk = sk;
+ __entry->family = sk->sk_family;
+ __entry->protocol = sk->sk_protocol;
+ __entry->ret = ret;
+ __entry->flags = flags;
+ ),
+
+ TP_printk("sk address = %p, family = %s protocol = %s, length = %d, error = %d, flags = 0x%x",
+ __entry->sk, show_family_name(__entry->family),
+ show_inet_protocol_name(__entry->protocol),
+ !(__entry->flags & MSG_PEEK) ?
+ (__entry->ret > 0 ? __entry->ret : 0) : 0,
+ __entry->ret < 0 ? __entry->ret : 0,
+ __entry->flags)
+);
+
+DEFINE_EVENT(sock_msg_length, sock_send_length,
+ TP_PROTO(struct sock *sk, int ret, int flags),
+
+ TP_ARGS(sk, ret, flags)
+);
+
+DEFINE_EVENT(sock_msg_length, sock_recv_length,
+ TP_PROTO(struct sock *sk, int ret, int flags),
+
+ TP_ARGS(sk, ret, flags)
+);
#endif /* _TRACE_SOCK_H */
/* This part must be outside protection */
diff --git a/include/trace/events/sof.h b/include/trace/events/sof.h
index 21c2a1efb9f6..3061423c0667 100644
--- a/include/trace/events/sof.h
+++ b/include/trace/events/sof.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright(c) 2022 Intel Corporation. All rights reserved.
+ * Copyright(c) 2022 Intel Corporation
*
* Author: Noah Klayman <noah.klayman@intel.com>
*/
@@ -23,7 +23,7 @@ DECLARE_EVENT_CLASS(sof_widget_template,
__field(int, use_count)
),
TP_fast_assign(
- __assign_str(name, swidget->widget->name);
+ __assign_str(name);
__entry->use_count = swidget->use_count;
),
TP_printk("name=%s use_count=%d", __get_str(name), __entry->use_count)
@@ -49,7 +49,7 @@ TRACE_EVENT(sof_ipc3_period_elapsed_position,
__field(u64, wallclock)
),
TP_fast_assign(
- __assign_str(device_name, dev_name(sdev->dev));
+ __assign_str(device_name);
__entry->host_posn = posn->host_posn;
__entry->dai_posn = posn->dai_posn;
__entry->wallclock = posn->wallclock;
@@ -75,7 +75,7 @@ TRACE_EVENT(sof_pcm_pointer_position,
__field(unsigned long, dai_posn)
),
TP_fast_assign(
- __assign_str(device_name, dev_name(sdev->dev));
+ __assign_str(device_name);
__entry->pcm_id = le32_to_cpu(spcm->pcm.pcm_id);
__entry->stream = substream->stream;
__entry->dma_posn = dma_posn;
@@ -93,7 +93,7 @@ TRACE_EVENT(sof_stream_position_ipc_rx,
__string(device_name, dev_name(dev))
),
TP_fast_assign(
- __assign_str(device_name, dev_name(dev));
+ __assign_str(device_name);
),
TP_printk("device_name=%s", __get_str(device_name))
);
@@ -107,8 +107,8 @@ TRACE_EVENT(sof_ipc4_fw_config,
__field(u32, value)
),
TP_fast_assign(
- __assign_str(device_name, dev_name(sdev->dev));
- __assign_str(key, key);
+ __assign_str(device_name);
+ __assign_str(key);
__entry->value = value;
),
TP_printk("device_name=%s key=%s value=%d",
diff --git a/include/trace/events/sof_intel.h b/include/trace/events/sof_intel.h
index 2a77f9d26c0b..9e579e57b15c 100644
--- a/include/trace/events/sof_intel.h
+++ b/include/trace/events/sof_intel.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright(c) 2022 Intel Corporation. All rights reserved.
+ * Copyright(c) 2022 Intel Corporation
*
* Author: Noah Klayman <noah.klayman@intel.com>
*/
@@ -22,8 +22,8 @@ TRACE_EVENT(sof_intel_hda_irq,
__string(source, source)
),
TP_fast_assign(
- __assign_str(device_name, dev_name(sdev->dev));
- __assign_str(source, source);
+ __assign_str(device_name);
+ __assign_str(source);
),
TP_printk("device_name=%s source=%s",
__get_str(device_name), __get_str(source))
@@ -38,7 +38,7 @@ DECLARE_EVENT_CLASS(sof_intel_ipc_firmware_template,
__field(u32, msg_ext)
),
TP_fast_assign(
- __assign_str(device_name, dev_name(sdev->dev));
+ __assign_str(device_name);
__entry->msg = msg;
__entry->msg_ext = msg_ext;
),
@@ -64,7 +64,7 @@ TRACE_EVENT(sof_intel_D0I3C_updated,
__field(u8, reg)
),
TP_fast_assign(
- __assign_str(device_name, dev_name(sdev->dev));
+ __assign_str(device_name);
__entry->reg = reg;
),
TP_printk("device_name=%s register=%#x",
@@ -79,7 +79,7 @@ TRACE_EVENT(sof_intel_hda_irq_ipc_check,
__field(u32, irq_status)
),
TP_fast_assign(
- __assign_str(device_name, dev_name(sdev->dev));
+ __assign_str(device_name);
__entry->irq_status = irq_status;
),
TP_printk("device_name=%s irq_status=%#x",
@@ -100,7 +100,7 @@ TRACE_EVENT(sof_intel_hda_dsp_pcm,
__field(unsigned long, pos)
),
TP_fast_assign(
- __assign_str(device_name, dev_name(sdev->dev));
+ __assign_str(device_name);
__entry->hstream_index = hstream->index;
__entry->substream = substream->stream;
__entry->pos = pos;
@@ -119,7 +119,7 @@ TRACE_EVENT(sof_intel_hda_dsp_stream_status,
__field(u32, status)
),
TP_fast_assign(
- __assign_str(device_name, dev_name(dev));
+ __assign_str(device_name);
__entry->stream = s->index;
__entry->status = status;
),
@@ -135,7 +135,7 @@ TRACE_EVENT(sof_intel_hda_dsp_check_stream_irq,
__field(u32, status)
),
TP_fast_assign(
- __assign_str(device_name, dev_name(sdev->dev));
+ __assign_str(device_name);
__entry->status = status;
),
TP_printk("device_name=%s status=%#x",
diff --git a/include/trace/events/spi-mem.h b/include/trace/events/spi-mem.h
new file mode 100644
index 000000000000..d13f0bcff5e7
--- /dev/null
+++ b/include/trace/events/spi-mem.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM spi-mem
+
+#undef TRACE_SYSTEM_VAR
+#define TRACE_SYSTEM_VAR spi_mem
+
+#if !defined(_TRACE_SPI_MEM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SPI_MEM_H
+
+#include <linux/tracepoint.h>
+#include <linux/spi/spi-mem.h>
+
+#define decode_dtr(dtr) \
+ __print_symbolic(dtr, \
+ { 0, "S" }, \
+ { 1, "D" })
+
+TRACE_EVENT(spi_mem_start_op,
+ TP_PROTO(struct spi_mem *mem, const struct spi_mem_op *op),
+ TP_ARGS(mem, op),
+
+ TP_STRUCT__entry(
+ __string(name, mem->name)
+ __dynamic_array(u8, op, 1 + op->addr.nbytes + op->dummy.nbytes)
+ __dynamic_array(u8, data, op->data.dir == SPI_MEM_DATA_OUT ?
+ min(op->data.nbytes, 64) : 0)
+ __field(u32, data_len)
+ __field(u32, max_freq)
+ __field(u8, cmd_buswidth)
+ __field(bool, cmd_dtr)
+ __field(u8, addr_buswidth)
+ __field(bool, addr_dtr)
+ __field(u8, dummy_nbytes)
+ __field(u8, data_buswidth)
+ __field(bool, data_dtr)
+ ),
+
+ TP_fast_assign(
+ int i;
+
+ __assign_str(name);
+ __entry->max_freq = op->max_freq ?: mem->spi->max_speed_hz;
+
+ __entry->cmd_buswidth = op->cmd.buswidth;
+ __entry->cmd_dtr = op->cmd.dtr;
+ *((u8 *)__get_dynamic_array(op)) = op->cmd.opcode;
+
+ __entry->addr_buswidth = op->addr.buswidth;
+ __entry->addr_dtr = op->addr.dtr;
+ for (i = 0; i < op->addr.nbytes; i++)
+ ((u8 *)__get_dynamic_array(op))[i + 1] =
+ op->addr.val >> (8 * (op->addr.nbytes - i - 1));
+
+ memset(((u8 *)__get_dynamic_array(op)) + op->addr.nbytes + 1,
+ 0xff, op->dummy.nbytes);
+
+ __entry->data_len = op->data.nbytes;
+ __entry->data_buswidth = op->data.buswidth;
+ __entry->data_dtr = op->data.dtr;
+ if (op->data.dir == SPI_MEM_DATA_OUT)
+ memcpy(__get_dynamic_array(data), op->data.buf.out,
+ __get_dynamic_array_len(data));
+ ),
+
+ TP_printk("%s %u%s-%u%s-%u%s @%u Hz op=[%*phD] len=%u tx=[%*phD]",
+ __get_str(name),
+ __entry->cmd_buswidth, decode_dtr(__entry->cmd_dtr),
+ __entry->addr_buswidth, decode_dtr(__entry->addr_dtr),
+ __entry->data_buswidth, decode_dtr(__entry->data_dtr),
+ __entry->max_freq,
+ __get_dynamic_array_len(op), __get_dynamic_array(op),
+ __entry->data_len,
+ __get_dynamic_array_len(data), __get_dynamic_array(data))
+);
+
+TRACE_EVENT(spi_mem_stop_op,
+ TP_PROTO(struct spi_mem *mem, const struct spi_mem_op *op),
+ TP_ARGS(mem, op),
+
+ TP_STRUCT__entry(
+ __string(name, mem->name)
+ __dynamic_array(u8, data, op->data.dir == SPI_MEM_DATA_IN ?
+ min(op->data.nbytes, 64) : 0)
+ __field(u32, data_len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name);
+ __entry->data_len = op->data.nbytes;
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ memcpy(__get_dynamic_array(data), op->data.buf.in,
+ __get_dynamic_array_len(data));
+ ),
+
+ TP_printk("%s len=%u rx=[%*phD]",
+ __get_str(name),
+ __entry->data_len,
+ __get_dynamic_array_len(data), __get_dynamic_array(data))
+);
+
+
+#endif /* _TRACE_SPI_MEM_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/spi.h b/include/trace/events/spi.h
index c0d9844befd7..e63d4a24d879 100644
--- a/include/trace/events/spi.h
+++ b/include/trace/events/spi.h
@@ -57,7 +57,7 @@ TRACE_EVENT(spi_setup,
TP_fast_assign(
__entry->bus_num = spi->controller->bus_num;
- __entry->chip_select = spi->chip_select;
+ __entry->chip_select = spi_get_chipselect(spi, 0);
__entry->mode = spi->mode;
__entry->bits_per_word = spi->bits_per_word;
__entry->max_speed_hz = spi->max_speed_hz;
@@ -88,7 +88,7 @@ TRACE_EVENT(spi_set_cs,
TP_fast_assign(
__entry->bus_num = spi->controller->bus_num;
- __entry->chip_select = spi->chip_select;
+ __entry->chip_select = spi_get_chipselect(spi, 0);
__entry->mode = spi->mode;
__entry->enable = enable;
),
@@ -113,7 +113,7 @@ DECLARE_EVENT_CLASS(spi_message,
TP_fast_assign(
__entry->bus_num = msg->spi->controller->bus_num;
- __entry->chip_select = msg->spi->chip_select;
+ __entry->chip_select = spi_get_chipselect(msg->spi, 0);
__entry->msg = msg;
),
@@ -154,7 +154,7 @@ TRACE_EVENT(spi_message_done,
TP_fast_assign(
__entry->bus_num = msg->spi->controller->bus_num;
- __entry->chip_select = msg->spi->chip_select;
+ __entry->chip_select = spi_get_chipselect(msg->spi, 0);
__entry->msg = msg;
__entry->frame = msg->frame_length;
__entry->actual = msg->actual_length;
@@ -167,7 +167,7 @@ TRACE_EVENT(spi_message_done,
);
/*
- * consider a buffer valid if non-NULL and if it doesn't match the dummy buffer
+ * Consider a buffer valid if non-NULL and if it doesn't match the dummy buffer
* that only exist to work with controllers that have SPI_CONTROLLER_MUST_TX or
* SPI_CONTROLLER_MUST_RX.
*/
@@ -197,7 +197,7 @@ DECLARE_EVENT_CLASS(spi_transfer,
TP_fast_assign(
__entry->bus_num = msg->spi->controller->bus_num;
- __entry->chip_select = msg->spi->chip_select;
+ __entry->chip_select = spi_get_chipselect(msg->spi, 0);
__entry->xfer = xfer;
__entry->len = xfer->len;
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index 37604e0e5963..750ecce56930 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -21,7 +21,6 @@ TRACE_DEFINE_ENUM(SOCK_DGRAM);
TRACE_DEFINE_ENUM(SOCK_RAW);
TRACE_DEFINE_ENUM(SOCK_RDM);
TRACE_DEFINE_ENUM(SOCK_SEQPACKET);
-TRACE_DEFINE_ENUM(SOCK_DCCP);
TRACE_DEFINE_ENUM(SOCK_PACKET);
#define show_socket_type(type) \
@@ -31,7 +30,6 @@ TRACE_DEFINE_ENUM(SOCK_PACKET);
{ SOCK_RAW, "RAW" }, \
{ SOCK_RDM, "RDM" }, \
{ SOCK_SEQPACKET, "SEQPACKET" }, \
- { SOCK_DCCP, "DCCP" }, \
{ SOCK_PACKET, "PACKET" })
/* This list is known to be incomplete, add new enums as needed. */
@@ -139,36 +137,68 @@ DEFINE_RPC_CLNT_EVENT(release);
DEFINE_RPC_CLNT_EVENT(replace_xprt);
DEFINE_RPC_CLNT_EVENT(replace_xprt_err);
+TRACE_DEFINE_ENUM(RPC_XPRTSEC_NONE);
+TRACE_DEFINE_ENUM(RPC_XPRTSEC_TLS_X509);
+
+#define rpc_show_xprtsec_policy(policy) \
+ __print_symbolic(policy, \
+ { RPC_XPRTSEC_NONE, "none" }, \
+ { RPC_XPRTSEC_TLS_ANON, "tls-anon" }, \
+ { RPC_XPRTSEC_TLS_X509, "tls-x509" })
+
+#define rpc_show_create_flags(flags) \
+ __print_flags(flags, "|", \
+ { RPC_CLNT_CREATE_HARDRTRY, "HARDRTRY" }, \
+ { RPC_CLNT_CREATE_AUTOBIND, "AUTOBIND" }, \
+ { RPC_CLNT_CREATE_NONPRIVPORT, "NONPRIVPORT" }, \
+ { RPC_CLNT_CREATE_NOPING, "NOPING" }, \
+ { RPC_CLNT_CREATE_DISCRTRY, "DISCRTRY" }, \
+ { RPC_CLNT_CREATE_QUIET, "QUIET" }, \
+ { RPC_CLNT_CREATE_INFINITE_SLOTS, \
+ "INFINITE_SLOTS" }, \
+ { RPC_CLNT_CREATE_NO_IDLE_TIMEOUT, \
+ "NO_IDLE_TIMEOUT" }, \
+ { RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT, \
+ "NO_RETRANS_TIMEOUT" }, \
+ { RPC_CLNT_CREATE_SOFTERR, "SOFTERR" }, \
+ { RPC_CLNT_CREATE_REUSEPORT, "REUSEPORT" })
+
TRACE_EVENT(rpc_clnt_new,
TP_PROTO(
const struct rpc_clnt *clnt,
const struct rpc_xprt *xprt,
- const char *program,
- const char *server
+ const struct rpc_create_args *args
),
- TP_ARGS(clnt, xprt, program, server),
+ TP_ARGS(clnt, xprt, args),
TP_STRUCT__entry(
__field(unsigned int, client_id)
+ __field(unsigned long, xprtsec)
+ __field(unsigned long, flags)
+ __string(program, clnt->cl_program->name)
+ __string(server, xprt->servername)
__string(addr, xprt->address_strings[RPC_DISPLAY_ADDR])
__string(port, xprt->address_strings[RPC_DISPLAY_PORT])
- __string(program, program)
- __string(server, server)
),
TP_fast_assign(
__entry->client_id = clnt->cl_clid;
- __assign_str(addr, xprt->address_strings[RPC_DISPLAY_ADDR]);
- __assign_str(port, xprt->address_strings[RPC_DISPLAY_PORT]);
- __assign_str(program, program);
- __assign_str(server, server);
+ __entry->xprtsec = args->xprtsec.policy;
+ __entry->flags = args->flags;
+ __assign_str(program);
+ __assign_str(server);
+ __assign_str(addr);
+ __assign_str(port);
),
- TP_printk("client=" SUNRPC_TRACE_CLID_SPECIFIER
- " peer=[%s]:%s program=%s server=%s",
+ TP_printk("client=" SUNRPC_TRACE_CLID_SPECIFIER " peer=[%s]:%s"
+ " program=%s server=%s xprtsec=%s flags=%s",
__entry->client_id, __get_str(addr), __get_str(port),
- __get_str(program), __get_str(server))
+ __get_str(program), __get_str(server),
+ rpc_show_xprtsec_policy(__entry->xprtsec),
+ rpc_show_create_flags(__entry->flags)
+ )
);
TRACE_EVENT(rpc_clnt_new_err,
@@ -188,8 +218,8 @@ TRACE_EVENT(rpc_clnt_new_err,
TP_fast_assign(
__entry->error = error;
- __assign_str(program, program);
- __assign_str(server, server);
+ __assign_str(program);
+ __assign_str(server);
),
TP_printk("program=%s server=%s error=%d",
@@ -293,8 +323,8 @@ TRACE_EVENT(rpc_request,
__entry->client_id = task->tk_client->cl_clid;
__entry->version = task->tk_client->cl_vers;
__entry->async = RPC_IS_ASYNC(task);
- __assign_str(progname, task->tk_client->cl_program->name);
- __assign_str(procname, rpc_proc_name(task));
+ __assign_str(progname);
+ __assign_str(procname);
),
TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " %sv%d %s (%ssync)",
@@ -311,6 +341,7 @@ TRACE_EVENT(rpc_request,
{ RPC_TASK_MOVEABLE, "MOVEABLE" }, \
{ RPC_TASK_NULLCREDS, "NULLCREDS" }, \
{ RPC_CALL_MAJORSEEN, "MAJORSEEN" }, \
+ { RPC_TASK_NETUNREACH_FATAL, "NETUNREACH_FATAL"}, \
{ RPC_TASK_DYNAMIC, "DYNAMIC" }, \
{ RPC_TASK_NO_ROUND_ROBIN, "NO_ROUND_ROBIN" }, \
{ RPC_TASK_SOFT, "SOFT" }, \
@@ -328,8 +359,7 @@ TRACE_EVENT(rpc_request,
{ (1UL << RPC_TASK_ACTIVE), "ACTIVE" }, \
{ (1UL << RPC_TASK_NEED_XMIT), "NEED_XMIT" }, \
{ (1UL << RPC_TASK_NEED_RECV), "NEED_RECV" }, \
- { (1UL << RPC_TASK_MSG_PIN_WAIT), "MSG_PIN_WAIT" }, \
- { (1UL << RPC_TASK_SIGNALLED), "SIGNALLED" })
+ { (1UL << RPC_TASK_MSG_PIN_WAIT), "MSG_PIN_WAIT" })
DECLARE_EVENT_CLASS(rpc_task_running,
@@ -407,7 +437,7 @@ DECLARE_EVENT_CLASS(rpc_task_queued,
__entry->runstate = task->tk_runstate;
__entry->status = task->tk_status;
__entry->flags = task->tk_flags;
- __assign_str(q_name, rpc_qname(q));
+ __assign_str(q_name);
),
TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
@@ -483,10 +513,10 @@ DECLARE_EVENT_CLASS(rpc_reply_event,
__entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client->cl_clid;
__entry->xid = be32_to_cpu(task->tk_rqstp->rq_xid);
- __assign_str(progname, task->tk_client->cl_program->name);
+ __assign_str(progname);
__entry->version = task->tk_client->cl_vers;
- __assign_str(procname, rpc_proc_name(task));
- __assign_str(servername, task->tk_xprt->servername);
+ __assign_str(procname);
+ __assign_str(servername);
),
TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
@@ -607,6 +637,7 @@ TRACE_EVENT(rpc_stats_latency,
__field(unsigned long, backlog)
__field(unsigned long, rtt)
__field(unsigned long, execute)
+ __field(u32, xprt_id)
),
TP_fast_assign(
@@ -614,18 +645,21 @@ TRACE_EVENT(rpc_stats_latency,
__entry->task_id = task->tk_pid;
__entry->xid = be32_to_cpu(task->tk_rqstp->rq_xid);
__entry->version = task->tk_client->cl_vers;
- __assign_str(progname, task->tk_client->cl_program->name);
- __assign_str(procname, rpc_proc_name(task));
+ __assign_str(progname);
+ __assign_str(procname);
__entry->backlog = ktime_to_us(backlog);
__entry->rtt = ktime_to_us(rtt);
__entry->execute = ktime_to_us(execute);
+ __entry->xprt_id = task->tk_xprt->id;
),
TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
- " xid=0x%08x %sv%d %s backlog=%lu rtt=%lu execute=%lu",
+ " xid=0x%08x %sv%d %s backlog=%lu rtt=%lu execute=%lu"
+ " xprt_id=%d",
__entry->task_id, __entry->client_id, __entry->xid,
__get_str(progname), __entry->version, __get_str(procname),
- __entry->backlog, __entry->rtt, __entry->execute)
+ __entry->backlog, __entry->rtt, __entry->execute,
+ __entry->xprt_id)
);
TRACE_EVENT(rpc_xdr_overflow,
@@ -661,16 +695,15 @@ TRACE_EVENT(rpc_xdr_overflow,
__entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client->cl_clid;
- __assign_str(progname,
- task->tk_client->cl_program->name);
+ __assign_str(progname);
__entry->version = task->tk_client->cl_vers;
- __assign_str(procedure, task->tk_msg.rpc_proc->p_name);
+ __assign_str(procedure);
} else {
__entry->task_id = -1;
__entry->client_id = -1;
- __assign_str(progname, "unknown");
+ __assign_str(progname);
__entry->version = 0;
- __assign_str(procedure, "unknown");
+ __assign_str(procedure);
}
__entry->requested = requested;
__entry->end = xdr->end;
@@ -684,7 +717,7 @@ TRACE_EVENT(rpc_xdr_overflow,
),
TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
- " %sv%d %s requested=%zu p=%p end=%p xdr=[%p,%zu]/%u/[%p,%zu]/%u\n",
+ " %sv%d %s requested=%zu p=%p end=%p xdr=[%p,%zu]/%u/[%p,%zu]/%u",
__entry->task_id, __entry->client_id,
__get_str(progname), __entry->version, __get_str(procedure),
__entry->requested, __entry->p, __entry->end,
@@ -727,10 +760,9 @@ TRACE_EVENT(rpc_xdr_alignment,
__entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client->cl_clid;
- __assign_str(progname,
- task->tk_client->cl_program->name);
+ __assign_str(progname);
__entry->version = task->tk_client->cl_vers;
- __assign_str(procedure, task->tk_msg.rpc_proc->p_name);
+ __assign_str(procedure);
__entry->offset = offset;
__entry->copied = copied;
@@ -743,7 +775,7 @@ TRACE_EVENT(rpc_xdr_alignment,
),
TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
- " %sv%d %s offset=%zu copied=%u xdr=[%p,%zu]/%u/[%p,%zu]/%u\n",
+ " %sv%d %s offset=%zu copied=%u xdr=[%p,%zu]/%u/[%p,%zu]/%u",
__entry->task_id, __entry->client_id,
__get_str(progname), __entry->version, __get_str(procedure),
__entry->offset, __entry->copied,
@@ -982,8 +1014,8 @@ DECLARE_EVENT_CLASS(rpc_xprt_lifetime_class,
TP_fast_assign(
__entry->state = xprt->state;
- __assign_str(addr, xprt->address_strings[RPC_DISPLAY_ADDR]);
- __assign_str(port, xprt->address_strings[RPC_DISPLAY_PORT]);
+ __assign_str(addr);
+ __assign_str(port);
),
TP_printk("peer=[%s]:%s state=%s",
@@ -1025,8 +1057,8 @@ DECLARE_EVENT_CLASS(rpc_xprt_event,
TP_fast_assign(
__entry->xid = be32_to_cpu(xid);
__entry->status = status;
- __assign_str(addr, xprt->address_strings[RPC_DISPLAY_ADDR]);
- __assign_str(port, xprt->address_strings[RPC_DISPLAY_PORT]);
+ __assign_str(addr);
+ __assign_str(port);
),
TP_printk("peer=[%s]:%s xid=0x%08x status=%d", __get_str(addr),
@@ -1066,7 +1098,7 @@ TRACE_EVENT(xprt_transmit,
__entry->client_id = rqst->rq_task->tk_client ?
rqst->rq_task->tk_client->cl_clid : -1;
__entry->xid = be32_to_cpu(rqst->rq_xid);
- __entry->seqno = rqst->rq_seqno;
+ __entry->seqno = *rqst->rq_seqnos;
__entry->status = status;
),
@@ -1104,10 +1136,9 @@ TRACE_EVENT(xprt_retransmit,
__entry->xid = be32_to_cpu(rqst->rq_xid);
__entry->ntrans = rqst->rq_ntrans;
__entry->timeout = task->tk_timeout;
- __assign_str(progname,
- task->tk_client->cl_program->name);
+ __assign_str(progname);
__entry->version = task->tk_client->cl_vers;
- __assign_str(procname, rpc_proc_name(task));
+ __assign_str(procname);
),
TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
@@ -1131,8 +1162,8 @@ TRACE_EVENT(xprt_ping,
TP_fast_assign(
__entry->status = status;
- __assign_str(addr, xprt->address_strings[RPC_DISPLAY_ADDR]);
- __assign_str(port, xprt->address_strings[RPC_DISPLAY_PORT]);
+ __assign_str(addr);
+ __assign_str(port);
),
TP_printk("peer=[%s]:%s status=%d",
@@ -1279,8 +1310,8 @@ TRACE_EVENT(xs_data_ready,
),
TP_fast_assign(
- __assign_str(addr, xprt->address_strings[RPC_DISPLAY_ADDR]);
- __assign_str(port, xprt->address_strings[RPC_DISPLAY_PORT]);
+ __assign_str(addr);
+ __assign_str(port);
),
TP_printk("peer=[%s]:%s", __get_str(addr), __get_str(port))
@@ -1295,18 +1326,16 @@ TRACE_EVENT(xs_stream_read_data,
__field(ssize_t, err)
__field(size_t, total)
__string(addr, xprt ? xprt->address_strings[RPC_DISPLAY_ADDR] :
- "(null)")
+ EVENT_NULL_STR)
__string(port, xprt ? xprt->address_strings[RPC_DISPLAY_PORT] :
- "(null)")
+ EVENT_NULL_STR)
),
TP_fast_assign(
__entry->err = err;
__entry->total = total;
- __assign_str(addr, xprt ?
- xprt->address_strings[RPC_DISPLAY_ADDR] : "(null)");
- __assign_str(port, xprt ?
- xprt->address_strings[RPC_DISPLAY_PORT] : "(null)");
+ __assign_str(addr);
+ __assign_str(port);
),
TP_printk("peer=[%s]:%s err=%zd total=%zu", __get_str(addr),
@@ -1328,8 +1357,8 @@ TRACE_EVENT(xs_stream_read_request,
),
TP_fast_assign(
- __assign_str(addr, xs->xprt.address_strings[RPC_DISPLAY_ADDR]);
- __assign_str(port, xs->xprt.address_strings[RPC_DISPLAY_PORT]);
+ __assign_str(addr);
+ __assign_str(port);
__entry->xid = be32_to_cpu(xs->recv.xid);
__entry->copied = xs->recv.copied;
__entry->reclen = xs->recv.len;
@@ -1367,7 +1396,7 @@ TRACE_EVENT(rpcb_getport,
__entry->version = clnt->cl_vers;
__entry->protocol = task->tk_xprt->prot;
__entry->bind_version = bind_version;
- __assign_str(servername, task->tk_xprt->servername);
+ __assign_str(servername);
),
TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
@@ -1457,8 +1486,8 @@ TRACE_EVENT(rpcb_register,
TP_fast_assign(
__entry->program = program;
__entry->version = version;
- __assign_str(addr, addr);
- __assign_str(netid, netid);
+ __assign_str(addr);
+ __assign_str(netid);
),
TP_printk("program=%u version=%u addr=%s netid=%s",
@@ -1485,7 +1514,7 @@ TRACE_EVENT(rpcb_unregister,
TP_fast_assign(
__entry->program = program;
__entry->version = version;
- __assign_str(netid, netid);
+ __assign_str(netid);
),
TP_printk("program=%u version=%u netid=%s",
@@ -1493,6 +1522,50 @@ TRACE_EVENT(rpcb_unregister,
)
);
+/**
+ ** RPC-over-TLS tracepoints
+ **/
+
+DECLARE_EVENT_CLASS(rpc_tls_class,
+ TP_PROTO(
+ const struct rpc_clnt *clnt,
+ const struct rpc_xprt *xprt
+ ),
+
+ TP_ARGS(clnt, xprt),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, requested_policy)
+ __field(u32, version)
+ __string(servername, xprt->servername)
+ __string(progname, clnt->cl_program->name)
+ ),
+
+ TP_fast_assign(
+ __entry->requested_policy = clnt->cl_xprtsec.policy;
+ __entry->version = clnt->cl_vers;
+ __assign_str(servername);
+ __assign_str(progname);
+ ),
+
+ TP_printk("server=%s %sv%u requested_policy=%s",
+ __get_str(servername), __get_str(progname), __entry->version,
+ rpc_show_xprtsec_policy(__entry->requested_policy)
+ )
+);
+
+#define DEFINE_RPC_TLS_EVENT(name) \
+ DEFINE_EVENT(rpc_tls_class, rpc_tls_##name, \
+ TP_PROTO( \
+ const struct rpc_clnt *clnt, \
+ const struct rpc_xprt *xprt \
+ ), \
+ TP_ARGS(clnt, xprt))
+
+DEFINE_RPC_TLS_EVENT(unavailable);
+DEFINE_RPC_TLS_EVENT(not_started);
+
+
/* Record an xdr_buf containing a fully-formed RPC message */
DECLARE_EVENT_CLASS(svc_xdr_msg_class,
TP_PROTO(
@@ -1599,9 +1672,7 @@ DEFINE_SVCXDRBUF_EVENT(sendto);
svc_rqst_flag(LOCAL) \
svc_rqst_flag(USEDEFERRAL) \
svc_rqst_flag(DROPME) \
- svc_rqst_flag(SPLICE_OK) \
svc_rqst_flag(VICTIM) \
- svc_rqst_flag(BUSY) \
svc_rqst_flag_end(DATA)
#undef svc_rqst_flag
@@ -1620,7 +1691,6 @@ SVC_RQST_FLAG_LIST
__print_flags(flags, "|", SVC_RQST_FLAG_LIST)
TRACE_DEFINE_ENUM(SVC_GARBAGE);
-TRACE_DEFINE_ENUM(SVC_SYSERR);
TRACE_DEFINE_ENUM(SVC_VALID);
TRACE_DEFINE_ENUM(SVC_NEGATIVE);
TRACE_DEFINE_ENUM(SVC_OK);
@@ -1630,10 +1700,9 @@ TRACE_DEFINE_ENUM(SVC_DENIED);
TRACE_DEFINE_ENUM(SVC_PENDING);
TRACE_DEFINE_ENUM(SVC_COMPLETE);
-#define svc_show_status(status) \
+#define show_svc_auth_status(status) \
__print_symbolic(status, \
{ SVC_GARBAGE, "SVC_GARBAGE" }, \
- { SVC_SYSERR, "SVC_SYSERR" }, \
{ SVC_VALID, "SVC_VALID" }, \
{ SVC_NEGATIVE, "SVC_NEGATIVE" }, \
{ SVC_OK, "SVC_OK" }, \
@@ -1667,7 +1736,10 @@ TRACE_DEFINE_ENUM(SVC_COMPLETE);
__entry->xid, __get_sockaddr(server), __get_sockaddr(client)
TRACE_EVENT_CONDITION(svc_authenticate,
- TP_PROTO(const struct svc_rqst *rqst, int auth_res),
+ TP_PROTO(
+ const struct svc_rqst *rqst,
+ enum svc_auth_status auth_res
+ ),
TP_ARGS(rqst, auth_res),
@@ -1690,7 +1762,7 @@ TRACE_EVENT_CONDITION(svc_authenticate,
TP_printk(SVC_RQST_ENDPOINT_FORMAT
" auth_res=%s auth_stat=%s",
SVC_RQST_ENDPOINT_VARARGS,
- svc_show_status(__entry->svc_status),
+ show_svc_auth_status(__entry->svc_status),
rpc_show_auth_stat(__entry->auth_stat))
);
@@ -1706,17 +1778,16 @@ TRACE_EVENT(svc_process,
__string(service, name)
__string(procedure, svc_proc_name(rqst))
__string(addr, rqst->rq_xprt ?
- rqst->rq_xprt->xpt_remotebuf : "(null)")
+ rqst->rq_xprt->xpt_remotebuf : EVENT_NULL_STR)
),
TP_fast_assign(
__entry->xid = be32_to_cpu(rqst->rq_xid);
__entry->vers = rqst->rq_vers;
__entry->proc = rqst->rq_proc;
- __assign_str(service, name);
- __assign_str(procedure, svc_proc_name(rqst));
- __assign_str(addr, rqst->rq_xprt ?
- rqst->rq_xprt->xpt_remotebuf : "(null)");
+ __assign_str(service);
+ __assign_str(procedure);
+ __assign_str(addr);
),
TP_printk("addr=%s xid=0x%08x service=%s vers=%u proc=%s",
@@ -1790,6 +1861,31 @@ DEFINE_EVENT(svc_rqst_status, svc_send,
TP_PROTO(const struct svc_rqst *rqst, int status),
TP_ARGS(rqst, status));
+TRACE_EVENT(svc_replace_page_err,
+ TP_PROTO(const struct svc_rqst *rqst),
+
+ TP_ARGS(rqst),
+ TP_STRUCT__entry(
+ SVC_RQST_ENDPOINT_FIELDS(rqst)
+
+ __field(const void *, begin)
+ __field(const void *, respages)
+ __field(const void *, nextpage)
+ ),
+
+ TP_fast_assign(
+ SVC_RQST_ENDPOINT_ASSIGNMENTS(rqst);
+
+ __entry->begin = rqst->rq_pages;
+ __entry->respages = rqst->rq_respages;
+ __entry->nextpage = rqst->rq_next_page;
+ ),
+
+ TP_printk(SVC_RQST_ENDPOINT_FORMAT " begin=%p respages=%p nextpage=%p",
+ SVC_RQST_ENDPOINT_VARARGS,
+ __entry->begin, __entry->respages, __entry->nextpage)
+);
+
TRACE_EVENT(svc_stats_latency,
TP_PROTO(
const struct svc_rqst *rqst
@@ -1809,7 +1905,7 @@ TRACE_EVENT(svc_stats_latency,
__entry->execute = ktime_to_us(ktime_sub(ktime_get(),
rqst->rq_stime));
- __assign_str(procedure, svc_proc_name(rqst));
+ __assign_str(procedure);
),
TP_printk(SVC_RQST_ENDPOINT_FORMAT " proc=%s execute-us=%lu",
@@ -1817,22 +1913,42 @@ TRACE_EVENT(svc_stats_latency,
__get_str(procedure), __entry->execute)
);
+/*
+ * from include/linux/sunrpc/svc_xprt.h
+ */
+#define SVC_XPRT_FLAG_LIST \
+ svc_xprt_flag(BUSY) \
+ svc_xprt_flag(CONN) \
+ svc_xprt_flag(CLOSE) \
+ svc_xprt_flag(DATA) \
+ svc_xprt_flag(TEMP) \
+ svc_xprt_flag(DEAD) \
+ svc_xprt_flag(CHNGBUF) \
+ svc_xprt_flag(DEFERRED) \
+ svc_xprt_flag(OLD) \
+ svc_xprt_flag(LISTENER) \
+ svc_xprt_flag(CACHE_AUTH) \
+ svc_xprt_flag(LOCAL) \
+ svc_xprt_flag(KILL_TEMP) \
+ svc_xprt_flag(CONG_CTRL) \
+ svc_xprt_flag(HANDSHAKE) \
+ svc_xprt_flag(TLS_SESSION) \
+ svc_xprt_flag_end(PEER_AUTH)
+
+#undef svc_xprt_flag
+#undef svc_xprt_flag_end
+#define svc_xprt_flag(x) TRACE_DEFINE_ENUM(XPT_##x);
+#define svc_xprt_flag_end(x) TRACE_DEFINE_ENUM(XPT_##x);
+
+SVC_XPRT_FLAG_LIST
+
+#undef svc_xprt_flag
+#undef svc_xprt_flag_end
+#define svc_xprt_flag(x) { BIT(XPT_##x), #x },
+#define svc_xprt_flag_end(x) { BIT(XPT_##x), #x }
+
#define show_svc_xprt_flags(flags) \
- __print_flags(flags, "|", \
- { (1UL << XPT_BUSY), "XPT_BUSY"}, \
- { (1UL << XPT_CONN), "XPT_CONN"}, \
- { (1UL << XPT_CLOSE), "XPT_CLOSE"}, \
- { (1UL << XPT_DATA), "XPT_DATA"}, \
- { (1UL << XPT_TEMP), "XPT_TEMP"}, \
- { (1UL << XPT_DEAD), "XPT_DEAD"}, \
- { (1UL << XPT_CHNGBUF), "XPT_CHNGBUF"}, \
- { (1UL << XPT_DEFERRED), "XPT_DEFERRED"}, \
- { (1UL << XPT_OLD), "XPT_OLD"}, \
- { (1UL << XPT_LISTENER), "XPT_LISTENER"}, \
- { (1UL << XPT_CACHE_AUTH), "XPT_CACHE_AUTH"}, \
- { (1UL << XPT_LOCAL), "XPT_LOCAL"}, \
- { (1UL << XPT_KILL_TEMP), "XPT_KILL_TEMP"}, \
- { (1UL << XPT_CONG_CTRL), "XPT_CONG_CTRL"})
+ __print_flags(flags, "|", SVC_XPRT_FLAG_LIST)
TRACE_EVENT(svc_xprt_create_err,
TP_PROTO(
@@ -1854,8 +1970,8 @@ TRACE_EVENT(svc_xprt_create_err,
TP_fast_assign(
__entry->error = PTR_ERR(xprt);
- __assign_str(program, program);
- __assign_str(protocol, protocol);
+ __assign_str(program);
+ __assign_str(protocol);
__assign_sockaddr(addr, sap, salen);
),
@@ -1890,25 +2006,25 @@ TRACE_EVENT(svc_xprt_create_err,
TRACE_EVENT(svc_xprt_enqueue,
TP_PROTO(
const struct svc_xprt *xprt,
- const struct svc_rqst *rqst
+ unsigned long flags
),
- TP_ARGS(xprt, rqst),
+ TP_ARGS(xprt, flags),
TP_STRUCT__entry(
SVC_XPRT_ENDPOINT_FIELDS(xprt)
-
- __field(int, pid)
),
TP_fast_assign(
- SVC_XPRT_ENDPOINT_ASSIGNMENTS(xprt);
-
- __entry->pid = rqst? rqst->rq_task->pid : 0;
+ __assign_sockaddr(server, &xprt->xpt_local,
+ xprt->xpt_locallen);
+ __assign_sockaddr(client, &xprt->xpt_remote,
+ xprt->xpt_remotelen);
+ __entry->flags = flags;
+ __entry->netns_ino = xprt->xpt_net->ns.inum;
),
- TP_printk(SVC_XPRT_ENDPOINT_FORMAT " pid=%d",
- SVC_XPRT_ENDPOINT_VARARGS, __entry->pid)
+ TP_printk(SVC_XPRT_ENDPOINT_FORMAT, SVC_XPRT_ENDPOINT_VARARGS)
);
TRACE_EVENT(svc_xprt_dequeue,
@@ -1920,19 +2036,20 @@ TRACE_EVENT(svc_xprt_dequeue,
TP_STRUCT__entry(
SVC_XPRT_ENDPOINT_FIELDS(rqst->rq_xprt)
-
__field(unsigned long, wakeup)
+ __field(unsigned long, qtime)
),
TP_fast_assign(
- SVC_XPRT_ENDPOINT_ASSIGNMENTS(rqst->rq_xprt);
+ ktime_t ktime = ktime_get();
- __entry->wakeup = ktime_to_us(ktime_sub(ktime_get(),
- rqst->rq_qtime));
+ SVC_XPRT_ENDPOINT_ASSIGNMENTS(rqst->rq_xprt);
+ __entry->wakeup = ktime_to_us(ktime_sub(ktime, rqst->rq_qtime));
+ __entry->qtime = ktime_to_us(ktime_sub(ktime, rqst->rq_xprt->xpt_qtime));
),
- TP_printk(SVC_XPRT_ENDPOINT_FORMAT " wakeup-us=%lu",
- SVC_XPRT_ENDPOINT_VARARGS, __entry->wakeup)
+ TP_printk(SVC_XPRT_ENDPOINT_FORMAT " wakeup-us=%lu qtime-us=%lu",
+ SVC_XPRT_ENDPOINT_VARARGS, __entry->wakeup, __entry->qtime)
);
DECLARE_EVENT_CLASS(svc_xprt_event,
@@ -1965,6 +2082,17 @@ DEFINE_SVC_XPRT_EVENT(close);
DEFINE_SVC_XPRT_EVENT(detach);
DEFINE_SVC_XPRT_EVENT(free);
+#define DEFINE_SVC_TLS_EVENT(name) \
+ DEFINE_EVENT(svc_xprt_event, svc_tls_##name, \
+ TP_PROTO(const struct svc_xprt *xprt), \
+ TP_ARGS(xprt))
+
+DEFINE_SVC_TLS_EVENT(start);
+DEFINE_SVC_TLS_EVENT(upcall);
+DEFINE_SVC_TLS_EVENT(unavailable);
+DEFINE_SVC_TLS_EVENT(not_started);
+DEFINE_SVC_TLS_EVENT(timed_out);
+
TRACE_EVENT(svc_xprt_accept,
TP_PROTO(
const struct svc_xprt *xprt,
@@ -1983,8 +2111,8 @@ TRACE_EVENT(svc_xprt_accept,
TP_fast_assign(
SVC_XPRT_ENDPOINT_ASSIGNMENTS(xprt);
- __assign_str(protocol, xprt->xpt_class->xcl_name);
- __assign_str(service, service);
+ __assign_str(protocol);
+ __assign_str(service);
),
TP_printk(SVC_XPRT_ENDPOINT_FORMAT " protocol=%s service=%s",
@@ -1993,22 +2121,35 @@ TRACE_EVENT(svc_xprt_accept,
)
);
-TRACE_EVENT(svc_wake_up,
- TP_PROTO(int pid),
+DECLARE_EVENT_CLASS(svc_pool_thread_event,
+ TP_PROTO(const struct svc_pool *pool, pid_t pid),
- TP_ARGS(pid),
+ TP_ARGS(pool, pid),
TP_STRUCT__entry(
- __field(int, pid)
+ __field(unsigned int, pool_id)
+ __field(pid_t, pid)
),
TP_fast_assign(
+ __entry->pool_id = pool->sp_id;
__entry->pid = pid;
),
- TP_printk("pid=%d", __entry->pid)
+ TP_printk("pool=%u pid=%d", __entry->pool_id, __entry->pid)
);
+#define DEFINE_SVC_POOL_THREAD_EVENT(name) \
+ DEFINE_EVENT(svc_pool_thread_event, svc_pool_thread_##name, \
+ TP_PROTO( \
+ const struct svc_pool *pool, pid_t pid \
+ ), \
+ TP_ARGS(pool, pid))
+
+DEFINE_SVC_POOL_THREAD_EVENT(wake);
+DEFINE_SVC_POOL_THREAD_EVENT(running);
+DEFINE_SVC_POOL_THREAD_EVENT(noidle);
+
TRACE_EVENT(svc_alloc_arg_err,
TP_PROTO(
unsigned int requested,
@@ -2065,31 +2206,46 @@ DEFINE_SVC_DEFERRED_EVENT(drop);
DEFINE_SVC_DEFERRED_EVENT(queue);
DEFINE_SVC_DEFERRED_EVENT(recv);
-TRACE_EVENT(svcsock_new_socket,
+DECLARE_EVENT_CLASS(svcsock_lifetime_class,
TP_PROTO(
+ const void *svsk,
const struct socket *socket
),
-
- TP_ARGS(socket),
-
+ TP_ARGS(svsk, socket),
TP_STRUCT__entry(
+ __field(unsigned int, netns_ino)
+ __field(const void *, svsk)
+ __field(const void *, sk)
__field(unsigned long, type)
__field(unsigned long, family)
- __field(bool, listener)
+ __field(unsigned long, state)
),
-
TP_fast_assign(
+ struct sock *sk = socket->sk;
+
+ __entry->netns_ino = sock_net(sk)->ns.inum;
+ __entry->svsk = svsk;
+ __entry->sk = sk;
__entry->type = socket->type;
- __entry->family = socket->sk->sk_family;
- __entry->listener = (socket->sk->sk_state == TCP_LISTEN);
+ __entry->family = sk->sk_family;
+ __entry->state = sk->sk_state;
),
-
- TP_printk("type=%s family=%s%s",
- show_socket_type(__entry->type),
+ TP_printk("svsk=%p type=%s family=%s%s",
+ __entry->svsk, show_socket_type(__entry->type),
rpc_show_address_family(__entry->family),
- __entry->listener ? " (listener)" : ""
+ __entry->state == TCP_LISTEN ? " (listener)" : ""
)
);
+#define DEFINE_SVCSOCK_LIFETIME_EVENT(name) \
+ DEFINE_EVENT(svcsock_lifetime_class, name, \
+ TP_PROTO( \
+ const void *svsk, \
+ const struct socket *socket \
+ ), \
+ TP_ARGS(svsk, socket))
+
+DEFINE_SVCSOCK_LIFETIME_EVENT(svcsock_new);
+DEFINE_SVCSOCK_LIFETIME_EVENT(svcsock_free);
TRACE_EVENT(svcsock_marker,
TP_PROTO(
@@ -2108,7 +2264,7 @@ TRACE_EVENT(svcsock_marker,
TP_fast_assign(
__entry->length = be32_to_cpu(marker) & RPC_FRAGMENT_SIZE_MASK;
__entry->last = be32_to_cpu(marker) & RPC_LAST_STREAM_FRAGMENT;
- __assign_str(addr, xprt->xpt_remotebuf);
+ __assign_str(addr);
),
TP_printk("addr=%s length=%u%s", __get_str(addr),
@@ -2132,7 +2288,7 @@ DECLARE_EVENT_CLASS(svcsock_class,
TP_fast_assign(
__entry->result = result;
__entry->flags = xprt->xpt_flags;
- __assign_str(addr, xprt->xpt_remotebuf);
+ __assign_str(addr);
),
TP_printk("addr=%s result=%zd flags=%s", __get_str(addr),
@@ -2178,7 +2334,7 @@ TRACE_EVENT(svcsock_tcp_recv_short,
__entry->expected = expected;
__entry->received = received;
__entry->flags = xprt->xpt_flags;
- __assign_str(addr, xprt->xpt_remotebuf);
+ __assign_str(addr);
),
TP_printk("addr=%s flags=%s expected=%u received=%u",
@@ -2206,7 +2362,7 @@ TRACE_EVENT(svcsock_tcp_state,
__entry->socket_state = socket->state;
__entry->sock_state = socket->sk->sk_state;
__entry->flags = xprt->xpt_flags;
- __assign_str(addr, xprt->xpt_remotebuf);
+ __assign_str(addr);
),
TP_printk("addr=%s state=%s sk_state=%s flags=%s", __get_str(addr),
@@ -2233,7 +2389,7 @@ DECLARE_EVENT_CLASS(svcsock_accept_class,
TP_fast_assign(
__entry->status = status;
- __assign_str(service, service);
+ __assign_str(service);
__entry->netns_ino = xprt->xpt_net->ns.inum;
),
@@ -2269,7 +2425,7 @@ DECLARE_EVENT_CLASS(cache_event,
TP_fast_assign(
__entry->h = h;
- __assign_str(name, cd->name);
+ __assign_str(name);
),
TP_printk("cache=%s entry=%p", __get_str(name), __entry->h)
@@ -2314,7 +2470,7 @@ DECLARE_EVENT_CLASS(register_class,
__entry->protocol = protocol;
__entry->port = port;
__entry->error = error;
- __assign_str(program, program);
+ __assign_str(program);
),
TP_printk("program=%sv%u proto=%s port=%u family=%s error=%d",
@@ -2359,7 +2515,7 @@ TRACE_EVENT(svc_unregister,
TP_fast_assign(
__entry->version = version;
__entry->error = error;
- __assign_str(program, program);
+ __assign_str(program);
),
TP_printk("program=%sv%u error=%d",
diff --git a/include/trace/events/swiotlb.h b/include/trace/events/swiotlb.h
index da05c9ebd224..3b6ddb136e4e 100644
--- a/include/trace/events/swiotlb.h
+++ b/include/trace/events/swiotlb.h
@@ -20,7 +20,7 @@ TRACE_EVENT(swiotlb_bounced,
),
TP_fast_assign(
- __assign_str(dev_name, dev_name(dev));
+ __assign_str(dev_name);
__entry->dma_mask = (dev->dma_mask ? *dev->dma_mask : 0);
__entry->dev_addr = dev_addr;
__entry->size = size;
diff --git a/include/trace/events/syscalls.h b/include/trace/events/syscalls.h
index b6e0cbc2c71f..f31ff446b468 100644
--- a/include/trace/events/syscalls.h
+++ b/include/trace/events/syscalls.h
@@ -15,7 +15,7 @@
#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
-TRACE_EVENT_FN(sys_enter,
+TRACE_EVENT_SYSCALL(sys_enter,
TP_PROTO(struct pt_regs *regs, long id),
@@ -41,7 +41,7 @@ TRACE_EVENT_FN(sys_enter,
TRACE_EVENT_FLAGS(sys_enter, TRACE_EVENT_FL_CAP_ANY)
-TRACE_EVENT_FN(sys_exit,
+TRACE_EVENT_SYSCALL(sys_exit,
TP_PROTO(struct pt_regs *regs, long ret),
diff --git a/include/trace/events/target.h b/include/trace/events/target.h
index 67fad2677ed5..7e2e20ba26f1 100644
--- a/include/trace/events/target.h
+++ b/include/trace/events/target.h
@@ -31,8 +31,8 @@
scsi_opcode_name(INQUIRY), \
scsi_opcode_name(RECOVER_BUFFERED_DATA), \
scsi_opcode_name(MODE_SELECT), \
- scsi_opcode_name(RESERVE), \
- scsi_opcode_name(RELEASE), \
+ scsi_opcode_name(RESERVE_6), \
+ scsi_opcode_name(RELEASE_6), \
scsi_opcode_name(COPY), \
scsi_opcode_name(ERASE), \
scsi_opcode_name(MODE_SENSE), \
@@ -154,7 +154,7 @@ TRACE_EVENT(target_sequencer_start,
__entry->task_attribute = cmd->sam_task_attr;
__entry->control = scsi_command_control(cmd->t_task_cdb);
memcpy(__entry->cdb, cmd->t_task_cdb, TCM_MAX_COMMAND_SIZE);
- __assign_str(initiator, cmd->se_sess->se_node_acl->initiatorname);
+ __assign_str(initiator);
),
TP_printk("%s -> LUN %03u tag %#llx %s data_length %6u CDB %s (TA:%s C:%02x)",
@@ -198,7 +198,7 @@ TRACE_EVENT(target_cmd_complete,
min(18, ((u8 *) cmd->sense_buffer)[SPC_ADD_SENSE_LEN_OFFSET] + 8) : 0;
memcpy(__entry->cdb, cmd->t_task_cdb, TCM_MAX_COMMAND_SIZE);
memcpy(__entry->sense_data, cmd->sense_buffer, __entry->sense_length);
- __assign_str(initiator, cmd->se_sess->se_node_acl->initiatorname);
+ __assign_str(initiator);
),
TP_printk("%s <- LUN %03u tag %#llx status %s (sense len %d%s%s) %s data_length %6u CDB %s (TA:%s C:%02x)",
diff --git a/include/trace/events/task.h b/include/trace/events/task.h
index 64d160930b0d..4f0759634306 100644
--- a/include/trace/events/task.h
+++ b/include/trace/events/task.h
@@ -8,14 +8,14 @@
TRACE_EVENT(task_newtask,
- TP_PROTO(struct task_struct *task, unsigned long clone_flags),
+ TP_PROTO(struct task_struct *task, u64 clone_flags),
TP_ARGS(task, clone_flags),
TP_STRUCT__entry(
__field( pid_t, pid)
__array( char, comm, TASK_COMM_LEN)
- __field( unsigned long, clone_flags)
+ __field( u64, clone_flags)
__field( short, oom_score_adj)
),
@@ -26,7 +26,7 @@ TRACE_EVENT(task_newtask,
__entry->oom_score_adj = task->signal->oom_score_adj;
),
- TP_printk("pid=%d comm=%s clone_flags=%lx oom_score_adj=%hd",
+ TP_printk("pid=%d comm=%s clone_flags=%llx oom_score_adj=%hd",
__entry->pid, __entry->comm,
__entry->clone_flags, __entry->oom_score_adj)
);
@@ -38,22 +38,56 @@ TRACE_EVENT(task_rename,
TP_ARGS(task, comm),
TP_STRUCT__entry(
- __field( pid_t, pid)
__array( char, oldcomm, TASK_COMM_LEN)
__array( char, newcomm, TASK_COMM_LEN)
__field( short, oom_score_adj)
),
TP_fast_assign(
- __entry->pid = task->pid;
memcpy(entry->oldcomm, task->comm, TASK_COMM_LEN);
- strlcpy(entry->newcomm, comm, TASK_COMM_LEN);
+ strscpy(entry->newcomm, comm, TASK_COMM_LEN);
__entry->oom_score_adj = task->signal->oom_score_adj;
),
- TP_printk("pid=%d oldcomm=%s newcomm=%s oom_score_adj=%hd",
- __entry->pid, __entry->oldcomm,
- __entry->newcomm, __entry->oom_score_adj)
+ TP_printk("oldcomm=%s newcomm=%s oom_score_adj=%hd",
+ __entry->oldcomm, __entry->newcomm, __entry->oom_score_adj)
+);
+
+/**
+ * task_prctl_unknown - called on unknown prctl() option
+ * @option: option passed
+ * @arg2: arg2 passed
+ * @arg3: arg3 passed
+ * @arg4: arg4 passed
+ * @arg5: arg5 passed
+ *
+ * Called on an unknown prctl() option.
+ */
+TRACE_EVENT(task_prctl_unknown,
+
+ TP_PROTO(int option, unsigned long arg2, unsigned long arg3,
+ unsigned long arg4, unsigned long arg5),
+
+ TP_ARGS(option, arg2, arg3, arg4, arg5),
+
+ TP_STRUCT__entry(
+ __field( int, option)
+ __field( unsigned long, arg2)
+ __field( unsigned long, arg3)
+ __field( unsigned long, arg4)
+ __field( unsigned long, arg5)
+ ),
+
+ TP_fast_assign(
+ __entry->option = option;
+ __entry->arg2 = arg2;
+ __entry->arg3 = arg3;
+ __entry->arg4 = arg4;
+ __entry->arg5 = arg5;
+ ),
+
+ TP_printk("option=%d arg2=%ld arg3=%ld arg4=%ld arg5=%ld",
+ __entry->option, __entry->arg2, __entry->arg3, __entry->arg4, __entry->arg5)
);
#endif
diff --git a/include/trace/events/tcp.h b/include/trace/events/tcp.h
index 901b440238d5..6757233bd064 100644
--- a/include/trace/events/tcp.h
+++ b/include/trace/events/tcp.h
@@ -11,47 +11,13 @@
#include <net/ipv6.h>
#include <net/tcp.h>
#include <linux/sock_diag.h>
+#include <net/rstreason.h>
-#define TP_STORE_V4MAPPED(__entry, saddr, daddr) \
- do { \
- struct in6_addr *pin6; \
- \
- pin6 = (struct in6_addr *)__entry->saddr_v6; \
- ipv6_addr_set_v4mapped(saddr, pin6); \
- pin6 = (struct in6_addr *)__entry->daddr_v6; \
- ipv6_addr_set_v4mapped(daddr, pin6); \
- } while (0)
-
-#if IS_ENABLED(CONFIG_IPV6)
-#define TP_STORE_ADDRS(__entry, saddr, daddr, saddr6, daddr6) \
- do { \
- if (sk->sk_family == AF_INET6) { \
- struct in6_addr *pin6; \
- \
- pin6 = (struct in6_addr *)__entry->saddr_v6; \
- *pin6 = saddr6; \
- pin6 = (struct in6_addr *)__entry->daddr_v6; \
- *pin6 = daddr6; \
- } else { \
- TP_STORE_V4MAPPED(__entry, saddr, daddr); \
- } \
- } while (0)
-#else
-#define TP_STORE_ADDRS(__entry, saddr, daddr, saddr6, daddr6) \
- TP_STORE_V4MAPPED(__entry, saddr, daddr)
-#endif
+TRACE_EVENT(tcp_retransmit_skb,
-/*
- * tcp event with arguments sk and skb
- *
- * Note: this class requires a valid sk pointer; while skb pointer could
- * be NULL.
- */
-DECLARE_EVENT_CLASS(tcp_event_sk_skb,
-
- TP_PROTO(const struct sock *sk, const struct sk_buff *skb),
+ TP_PROTO(const struct sock *sk, const struct sk_buff *skb, int err),
- TP_ARGS(sk, skb),
+ TP_ARGS(sk, skb, err),
TP_STRUCT__entry(
__field(const void *, skbaddr)
@@ -64,10 +30,11 @@ DECLARE_EVENT_CLASS(tcp_event_sk_skb,
__array(__u8, daddr, 4)
__array(__u8, saddr_v6, 16)
__array(__u8, daddr_v6, 16)
+ __field(int, err)
),
TP_fast_assign(
- struct inet_sock *inet = inet_sk(sk);
+ const struct inet_sock *inet = inet_sk(sk);
__be32 *p32;
__entry->skbaddr = skb;
@@ -86,33 +53,83 @@ DECLARE_EVENT_CLASS(tcp_event_sk_skb,
TP_STORE_ADDRS(__entry, inet->inet_saddr, inet->inet_daddr,
sk->sk_v6_rcv_saddr, sk->sk_v6_daddr);
+
+ __entry->err = err;
),
- TP_printk("family=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c state=%s",
+ TP_printk("skbaddr=%p skaddr=%p family=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c state=%s err=%d",
+ __entry->skbaddr, __entry->skaddr,
show_family_name(__entry->family),
__entry->sport, __entry->dport, __entry->saddr, __entry->daddr,
__entry->saddr_v6, __entry->daddr_v6,
- show_tcp_state_name(__entry->state))
+ show_tcp_state_name(__entry->state),
+ __entry->err)
);
-DEFINE_EVENT(tcp_event_sk_skb, tcp_retransmit_skb,
+#undef FN
+#define FN(reason) TRACE_DEFINE_ENUM(SK_RST_REASON_##reason);
+DEFINE_RST_REASON(FN, FN)
- TP_PROTO(const struct sock *sk, const struct sk_buff *skb),
-
- TP_ARGS(sk, skb)
-);
+#undef FN
+#undef FNe
+#define FN(reason) { SK_RST_REASON_##reason, #reason },
+#define FNe(reason) { SK_RST_REASON_##reason, #reason }
/*
* skb of trace_tcp_send_reset is the skb that caused RST. In case of
* active reset, skb should be NULL
*/
-DEFINE_EVENT(tcp_event_sk_skb, tcp_send_reset,
+TRACE_EVENT(tcp_send_reset,
- TP_PROTO(const struct sock *sk, const struct sk_buff *skb),
+ TP_PROTO(const struct sock *sk,
+ const struct sk_buff *skb__nullable,
+ const enum sk_rst_reason reason),
- TP_ARGS(sk, skb)
+ TP_ARGS(sk, skb__nullable, reason),
+
+ TP_STRUCT__entry(
+ __field(const void *, skbaddr)
+ __field(const void *, skaddr)
+ __field(int, state)
+ __field(enum sk_rst_reason, reason)
+ __array(__u8, saddr, sizeof(struct sockaddr_in6))
+ __array(__u8, daddr, sizeof(struct sockaddr_in6))
+ ),
+
+ TP_fast_assign(
+ __entry->skbaddr = skb__nullable;
+ __entry->skaddr = sk;
+ /* Zero means unknown state. */
+ __entry->state = sk ? sk->sk_state : 0;
+
+ memset(__entry->saddr, 0, sizeof(struct sockaddr_in6));
+ memset(__entry->daddr, 0, sizeof(struct sockaddr_in6));
+
+ if (sk && sk_fullsock(sk)) {
+ const struct inet_sock *inet = inet_sk(sk);
+
+ TP_STORE_ADDR_PORTS(__entry, inet, sk);
+ } else if (skb__nullable) {
+ const struct tcphdr *th = (const struct tcphdr *)skb__nullable->data;
+ /*
+ * We should reverse the 4-tuple of skb, so later
+ * it can print the right flow direction of rst.
+ */
+ TP_STORE_ADDR_PORTS_SKB(skb__nullable, th, entry->daddr, entry->saddr);
+ }
+ __entry->reason = reason;
+ ),
+
+ TP_printk("skbaddr=%p skaddr=%p src=%pISpc dest=%pISpc state=%s reason=%s",
+ __entry->skbaddr, __entry->skaddr,
+ __entry->saddr, __entry->daddr,
+ __entry->state ? show_tcp_state_name(__entry->state) : "UNKNOWN",
+ __print_symbolic(__entry->reason, DEFINE_RST_REASON(FN, FNe)))
);
+#undef FN
+#undef FNe
+
/*
* tcp event with arguments sk
*
@@ -187,6 +204,88 @@ DEFINE_EVENT(tcp_event_sk, tcp_rcv_space_adjust,
TP_ARGS(sk)
);
+TRACE_EVENT(tcp_rcvbuf_grow,
+
+ TP_PROTO(struct sock *sk, int time),
+
+ TP_ARGS(sk, time),
+
+ TP_STRUCT__entry(
+ __field(int, time)
+ __field(__u32, rtt_us)
+ __field(__u32, copied)
+ __field(__u32, inq)
+ __field(__u32, space)
+ __field(__u32, ooo_space)
+ __field(__u32, rcvbuf)
+ __field(__u32, rcv_ssthresh)
+ __field(__u32, window_clamp)
+ __field(__u32, rcv_wnd)
+ __field(__u8, scaling_ratio)
+ __field(__u16, sport)
+ __field(__u16, dport)
+ __field(__u16, family)
+ __array(__u8, saddr, 4)
+ __array(__u8, daddr, 4)
+ __array(__u8, saddr_v6, 16)
+ __array(__u8, daddr_v6, 16)
+ __field(const void *, skaddr)
+ __field(__u64, sock_cookie)
+ ),
+
+ TP_fast_assign(
+ struct inet_sock *inet = inet_sk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+ __be32 *p32;
+
+ __entry->time = time;
+ __entry->rtt_us = tp->rcv_rtt_est.rtt_us >> 3;
+ __entry->copied = tp->copied_seq - tp->rcvq_space.seq;
+ __entry->inq = tp->rcv_nxt - tp->copied_seq;
+ __entry->space = tp->rcvq_space.space;
+ __entry->ooo_space = RB_EMPTY_ROOT(&tp->out_of_order_queue) ? 0 :
+ TCP_SKB_CB(tp->ooo_last_skb)->end_seq -
+ tp->rcv_nxt;
+
+ __entry->rcvbuf = sk->sk_rcvbuf;
+ __entry->rcv_ssthresh = tp->rcv_ssthresh;
+ __entry->window_clamp = tp->window_clamp;
+ __entry->rcv_wnd = tp->rcv_wnd;
+ __entry->scaling_ratio = tp->scaling_ratio;
+ __entry->sport = ntohs(inet->inet_sport);
+ __entry->dport = ntohs(inet->inet_dport);
+ __entry->family = sk->sk_family;
+
+ p32 = (__be32 *) __entry->saddr;
+ *p32 = inet->inet_saddr;
+
+ p32 = (__be32 *) __entry->daddr;
+ *p32 = inet->inet_daddr;
+
+ TP_STORE_ADDRS(__entry, inet->inet_saddr, inet->inet_daddr,
+ sk->sk_v6_rcv_saddr, sk->sk_v6_daddr);
+
+ __entry->skaddr = sk;
+ __entry->sock_cookie = sock_gen_cookie(sk);
+ ),
+
+ TP_printk("time=%u rtt_us=%u copied=%u inq=%u space=%u ooo=%u scaling_ratio=%u rcvbuf=%u "
+ "rcv_ssthresh=%u window_clamp=%u rcv_wnd=%u "
+ "family=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 "
+ "saddrv6=%pI6c daddrv6=%pI6c skaddr=%p sock_cookie=%llx",
+ __entry->time, __entry->rtt_us, __entry->copied,
+ __entry->inq, __entry->space, __entry->ooo_space,
+ __entry->scaling_ratio, __entry->rcvbuf,
+ __entry->rcv_ssthresh, __entry->window_clamp,
+ __entry->rcv_wnd,
+ show_family_name(__entry->family),
+ __entry->sport, __entry->dport,
+ __entry->saddr, __entry->daddr,
+ __entry->saddr_v6, __entry->daddr_v6,
+ __entry->skaddr,
+ __entry->sock_cookie)
+);
+
TRACE_EVENT(tcp_retransmit_synack,
TP_PROTO(const struct sock *sk, const struct request_sock *req),
@@ -233,11 +332,41 @@ TRACE_EVENT(tcp_retransmit_synack,
__entry->saddr_v6, __entry->daddr_v6)
);
+TRACE_EVENT(tcp_sendmsg_locked,
+ TP_PROTO(const struct sock *sk, const struct msghdr *msg,
+ const struct sk_buff *skb, int size_goal),
+
+ TP_ARGS(sk, msg, skb, size_goal),
+
+ TP_STRUCT__entry(
+ __field(const void *, skb_addr)
+ __field(int, skb_len)
+ __field(int, msg_left)
+ __field(int, size_goal)
+ ),
+
+ TP_fast_assign(
+ __entry->skb_addr = skb;
+ __entry->skb_len = skb ? skb->len : 0;
+ __entry->msg_left = msg_data_left(msg);
+ __entry->size_goal = size_goal;
+ ),
+
+ TP_printk("skb_addr %p skb_len %d msg_left %d size_goal %d",
+ __entry->skb_addr, __entry->skb_len, __entry->msg_left,
+ __entry->size_goal));
+
+DECLARE_TRACE(tcp_cwnd_reduction,
+ TP_PROTO(const struct sock *sk, int newly_acked_sacked,
+ int newly_lost, int flag),
+ TP_ARGS(sk, newly_acked_sacked, newly_lost, flag)
+);
+
#include <trace/events/net_probe_common.h>
TRACE_EVENT(tcp_probe,
- TP_PROTO(struct sock *sk, struct sk_buff *skb),
+ TP_PROTO(struct sock *sk, const struct sk_buff *skb),
TP_ARGS(sk, skb),
@@ -258,6 +387,8 @@ TRACE_EVENT(tcp_probe,
__field(__u32, srtt)
__field(__u32, rcv_wnd)
__field(__u64, sock_cookie)
+ __field(const void *, skbaddr)
+ __field(const void *, skaddr)
),
TP_fast_assign(
@@ -285,58 +416,20 @@ TRACE_EVENT(tcp_probe,
__entry->ssthresh = tcp_current_ssthresh(sk);
__entry->srtt = tp->srtt_us >> 3;
__entry->sock_cookie = sock_gen_cookie(sk);
+
+ __entry->skbaddr = skb;
+ __entry->skaddr = sk;
),
- TP_printk("family=%s src=%pISpc dest=%pISpc mark=%#x data_len=%d snd_nxt=%#x snd_una=%#x snd_cwnd=%u ssthresh=%u snd_wnd=%u srtt=%u rcv_wnd=%u sock_cookie=%llx",
+ TP_printk("family=%s src=%pISpc dest=%pISpc mark=%#x data_len=%d snd_nxt=%#x snd_una=%#x snd_cwnd=%u ssthresh=%u snd_wnd=%u srtt=%u rcv_wnd=%u sock_cookie=%llx skbaddr=%p skaddr=%p",
show_family_name(__entry->family),
__entry->saddr, __entry->daddr, __entry->mark,
__entry->data_len, __entry->snd_nxt, __entry->snd_una,
__entry->snd_cwnd, __entry->ssthresh, __entry->snd_wnd,
- __entry->srtt, __entry->rcv_wnd, __entry->sock_cookie)
+ __entry->srtt, __entry->rcv_wnd, __entry->sock_cookie,
+ __entry->skbaddr, __entry->skaddr)
);
-#define TP_STORE_ADDR_PORTS_SKB_V4(__entry, skb) \
- do { \
- const struct tcphdr *th = (const struct tcphdr *)skb->data; \
- struct sockaddr_in *v4 = (void *)__entry->saddr; \
- \
- v4->sin_family = AF_INET; \
- v4->sin_port = th->source; \
- v4->sin_addr.s_addr = ip_hdr(skb)->saddr; \
- v4 = (void *)__entry->daddr; \
- v4->sin_family = AF_INET; \
- v4->sin_port = th->dest; \
- v4->sin_addr.s_addr = ip_hdr(skb)->daddr; \
- } while (0)
-
-#if IS_ENABLED(CONFIG_IPV6)
-
-#define TP_STORE_ADDR_PORTS_SKB(__entry, skb) \
- do { \
- const struct iphdr *iph = ip_hdr(skb); \
- \
- if (iph->version == 6) { \
- const struct tcphdr *th = (const struct tcphdr *)skb->data; \
- struct sockaddr_in6 *v6 = (void *)__entry->saddr; \
- \
- v6->sin6_family = AF_INET6; \
- v6->sin6_port = th->source; \
- v6->sin6_addr = ipv6_hdr(skb)->saddr; \
- v6 = (void *)__entry->daddr; \
- v6->sin6_family = AF_INET6; \
- v6->sin6_port = th->dest; \
- v6->sin6_addr = ipv6_hdr(skb)->daddr; \
- } else \
- TP_STORE_ADDR_PORTS_SKB_V4(__entry, skb); \
- } while (0)
-
-#else
-
-#define TP_STORE_ADDR_PORTS_SKB(__entry, skb) \
- TP_STORE_ADDR_PORTS_SKB_V4(__entry, skb)
-
-#endif
-
/*
* tcp event with only skb
*/
@@ -353,15 +446,17 @@ DECLARE_EVENT_CLASS(tcp_event_skb,
),
TP_fast_assign(
+ const struct tcphdr *th = (const struct tcphdr *)skb->data;
__entry->skbaddr = skb;
memset(__entry->saddr, 0, sizeof(struct sockaddr_in6));
memset(__entry->daddr, 0, sizeof(struct sockaddr_in6));
- TP_STORE_ADDR_PORTS_SKB(__entry, skb);
+ TP_STORE_ADDR_PORTS_SKB(skb, th, __entry->saddr, __entry->daddr);
),
- TP_printk("src=%pISpc dest=%pISpc", __entry->saddr, __entry->daddr)
+ TP_printk("skbaddr=%p src=%pISpc dest=%pISpc",
+ __entry->skbaddr, __entry->saddr, __entry->daddr)
);
DEFINE_EVENT(tcp_event_skb, tcp_bad_csum,
@@ -381,6 +476,7 @@ TRACE_EVENT(tcp_cong_state_set,
__field(const void *, skaddr)
__field(__u16, sport)
__field(__u16, dport)
+ __field(__u16, family)
__array(__u8, saddr, 4)
__array(__u8, daddr, 4)
__array(__u8, saddr_v6, 16)
@@ -396,6 +492,7 @@ TRACE_EVENT(tcp_cong_state_set,
__entry->sport = ntohs(inet->inet_sport);
__entry->dport = ntohs(inet->inet_dport);
+ __entry->family = sk->sk_family;
p32 = (__be32 *) __entry->saddr;
*p32 = inet->inet_saddr;
@@ -409,13 +506,333 @@ TRACE_EVENT(tcp_cong_state_set,
__entry->cong_state = ca_state;
),
- TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c cong_state=%u",
+ TP_printk("family=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c cong_state=%u",
+ show_family_name(__entry->family),
__entry->sport, __entry->dport,
__entry->saddr, __entry->daddr,
__entry->saddr_v6, __entry->daddr_v6,
__entry->cong_state)
);
+DECLARE_EVENT_CLASS(tcp_hash_event,
+
+ TP_PROTO(const struct sock *sk, const struct sk_buff *skb),
+
+ TP_ARGS(sk, skb),
+
+ TP_STRUCT__entry(
+ __field(__u64, net_cookie)
+ __field(const void *, skbaddr)
+ __field(const void *, skaddr)
+ __field(int, state)
+
+ /* sockaddr_in6 is always bigger than sockaddr_in */
+ __array(__u8, saddr, sizeof(struct sockaddr_in6))
+ __array(__u8, daddr, sizeof(struct sockaddr_in6))
+ __field(int, l3index)
+
+ __field(__u16, sport)
+ __field(__u16, dport)
+ __field(__u16, family)
+
+ __field(bool, fin)
+ __field(bool, syn)
+ __field(bool, rst)
+ __field(bool, psh)
+ __field(bool, ack)
+ ),
+
+ TP_fast_assign(
+ const struct tcphdr *th = (const struct tcphdr *)skb->data;
+
+ __entry->net_cookie = sock_net(sk)->net_cookie;
+ __entry->skbaddr = skb;
+ __entry->skaddr = sk;
+ __entry->state = sk->sk_state;
+
+ memset(__entry->saddr, 0, sizeof(struct sockaddr_in6));
+ memset(__entry->daddr, 0, sizeof(struct sockaddr_in6));
+ TP_STORE_ADDR_PORTS_SKB(skb, th, __entry->saddr, __entry->daddr);
+ __entry->l3index = inet_sdif(skb) ? inet_iif(skb) : 0;
+
+ /* For filtering use */
+ __entry->sport = ntohs(th->source);
+ __entry->dport = ntohs(th->dest);
+ __entry->family = sk->sk_family;
+
+ __entry->fin = th->fin;
+ __entry->syn = th->syn;
+ __entry->rst = th->rst;
+ __entry->psh = th->psh;
+ __entry->ack = th->ack;
+ ),
+
+ TP_printk("net=%llu state=%s family=%s src=%pISpc dest=%pISpc L3index=%d [%c%c%c%c%c]",
+ __entry->net_cookie,
+ show_tcp_state_name(__entry->state),
+ show_family_name(__entry->family),
+ __entry->saddr, __entry->daddr,
+ __entry->l3index,
+ __entry->fin ? 'F' : ' ',
+ __entry->syn ? 'S' : ' ',
+ __entry->rst ? 'R' : ' ',
+ __entry->psh ? 'P' : ' ',
+ __entry->ack ? '.' : ' ')
+);
+
+DEFINE_EVENT(tcp_hash_event, tcp_hash_bad_header,
+
+ TP_PROTO(const struct sock *sk, const struct sk_buff *skb),
+ TP_ARGS(sk, skb)
+);
+
+DEFINE_EVENT(tcp_hash_event, tcp_hash_md5_required,
+
+ TP_PROTO(const struct sock *sk, const struct sk_buff *skb),
+ TP_ARGS(sk, skb)
+);
+
+DEFINE_EVENT(tcp_hash_event, tcp_hash_md5_unexpected,
+
+ TP_PROTO(const struct sock *sk, const struct sk_buff *skb),
+ TP_ARGS(sk, skb)
+);
+
+DEFINE_EVENT(tcp_hash_event, tcp_hash_md5_mismatch,
+
+ TP_PROTO(const struct sock *sk, const struct sk_buff *skb),
+ TP_ARGS(sk, skb)
+);
+
+DEFINE_EVENT(tcp_hash_event, tcp_hash_ao_required,
+
+ TP_PROTO(const struct sock *sk, const struct sk_buff *skb),
+ TP_ARGS(sk, skb)
+);
+
+DECLARE_EVENT_CLASS(tcp_ao_event,
+
+ TP_PROTO(const struct sock *sk, const struct sk_buff *skb,
+ const __u8 keyid, const __u8 rnext, const __u8 maclen),
+
+ TP_ARGS(sk, skb, keyid, rnext, maclen),
+
+ TP_STRUCT__entry(
+ __field(__u64, net_cookie)
+ __field(const void *, skbaddr)
+ __field(const void *, skaddr)
+ __field(int, state)
+
+ /* sockaddr_in6 is always bigger than sockaddr_in */
+ __array(__u8, saddr, sizeof(struct sockaddr_in6))
+ __array(__u8, daddr, sizeof(struct sockaddr_in6))
+ __field(int, l3index)
+
+ __field(__u16, sport)
+ __field(__u16, dport)
+ __field(__u16, family)
+
+ __field(bool, fin)
+ __field(bool, syn)
+ __field(bool, rst)
+ __field(bool, psh)
+ __field(bool, ack)
+
+ __field(__u8, keyid)
+ __field(__u8, rnext)
+ __field(__u8, maclen)
+ ),
+
+ TP_fast_assign(
+ const struct tcphdr *th = (const struct tcphdr *)skb->data;
+
+ __entry->net_cookie = sock_net(sk)->net_cookie;
+ __entry->skbaddr = skb;
+ __entry->skaddr = sk;
+ __entry->state = sk->sk_state;
+
+ memset(__entry->saddr, 0, sizeof(struct sockaddr_in6));
+ memset(__entry->daddr, 0, sizeof(struct sockaddr_in6));
+ TP_STORE_ADDR_PORTS_SKB(skb, th, __entry->saddr, __entry->daddr);
+ __entry->l3index = inet_sdif(skb) ? inet_iif(skb) : 0;
+
+ /* For filtering use */
+ __entry->sport = ntohs(th->source);
+ __entry->dport = ntohs(th->dest);
+ __entry->family = sk->sk_family;
+
+ __entry->fin = th->fin;
+ __entry->syn = th->syn;
+ __entry->rst = th->rst;
+ __entry->psh = th->psh;
+ __entry->ack = th->ack;
+
+ __entry->keyid = keyid;
+ __entry->rnext = rnext;
+ __entry->maclen = maclen;
+ ),
+
+ TP_printk("net=%llu state=%s family=%s src=%pISpc dest=%pISpc L3index=%d [%c%c%c%c%c] keyid=%u rnext=%u maclen=%u",
+ __entry->net_cookie,
+ show_tcp_state_name(__entry->state),
+ show_family_name(__entry->family),
+ __entry->saddr, __entry->daddr,
+ __entry->l3index,
+ __entry->fin ? 'F' : ' ',
+ __entry->syn ? 'S' : ' ',
+ __entry->rst ? 'R' : ' ',
+ __entry->psh ? 'P' : ' ',
+ __entry->ack ? '.' : ' ',
+ __entry->keyid, __entry->rnext, __entry->maclen)
+);
+
+DEFINE_EVENT(tcp_ao_event, tcp_ao_handshake_failure,
+ TP_PROTO(const struct sock *sk, const struct sk_buff *skb,
+ const __u8 keyid, const __u8 rnext, const __u8 maclen),
+ TP_ARGS(sk, skb, keyid, rnext, maclen)
+);
+
+#ifdef CONFIG_TCP_AO
+DEFINE_EVENT(tcp_ao_event, tcp_ao_wrong_maclen,
+ TP_PROTO(const struct sock *sk, const struct sk_buff *skb,
+ const __u8 keyid, const __u8 rnext, const __u8 maclen),
+ TP_ARGS(sk, skb, keyid, rnext, maclen)
+);
+
+DEFINE_EVENT(tcp_ao_event, tcp_ao_mismatch,
+ TP_PROTO(const struct sock *sk, const struct sk_buff *skb,
+ const __u8 keyid, const __u8 rnext, const __u8 maclen),
+ TP_ARGS(sk, skb, keyid, rnext, maclen)
+);
+
+DEFINE_EVENT(tcp_ao_event, tcp_ao_key_not_found,
+ TP_PROTO(const struct sock *sk, const struct sk_buff *skb,
+ const __u8 keyid, const __u8 rnext, const __u8 maclen),
+ TP_ARGS(sk, skb, keyid, rnext, maclen)
+);
+
+DEFINE_EVENT(tcp_ao_event, tcp_ao_rnext_request,
+ TP_PROTO(const struct sock *sk, const struct sk_buff *skb,
+ const __u8 keyid, const __u8 rnext, const __u8 maclen),
+ TP_ARGS(sk, skb, keyid, rnext, maclen)
+);
+
+DECLARE_EVENT_CLASS(tcp_ao_event_sk,
+
+ TP_PROTO(const struct sock *sk, const __u8 keyid, const __u8 rnext),
+
+ TP_ARGS(sk, keyid, rnext),
+
+ TP_STRUCT__entry(
+ __field(__u64, net_cookie)
+ __field(const void *, skaddr)
+ __field(int, state)
+
+ /* sockaddr_in6 is always bigger than sockaddr_in */
+ __array(__u8, saddr, sizeof(struct sockaddr_in6))
+ __array(__u8, daddr, sizeof(struct sockaddr_in6))
+
+ __field(__u16, sport)
+ __field(__u16, dport)
+ __field(__u16, family)
+
+ __field(__u8, keyid)
+ __field(__u8, rnext)
+ ),
+
+ TP_fast_assign(
+ const struct inet_sock *inet = inet_sk(sk);
+
+ __entry->net_cookie = sock_net(sk)->net_cookie;
+ __entry->skaddr = sk;
+ __entry->state = sk->sk_state;
+
+ memset(__entry->saddr, 0, sizeof(struct sockaddr_in6));
+ memset(__entry->daddr, 0, sizeof(struct sockaddr_in6));
+ TP_STORE_ADDR_PORTS(__entry, inet, sk);
+
+ /* For filtering use */
+ __entry->sport = ntohs(inet->inet_sport);
+ __entry->dport = ntohs(inet->inet_dport);
+ __entry->family = sk->sk_family;
+
+ __entry->keyid = keyid;
+ __entry->rnext = rnext;
+ ),
+
+ TP_printk("net=%llu state=%s family=%s src=%pISpc dest=%pISpc keyid=%u rnext=%u",
+ __entry->net_cookie,
+ show_tcp_state_name(__entry->state),
+ show_family_name(__entry->family),
+ __entry->saddr, __entry->daddr,
+ __entry->keyid, __entry->rnext)
+);
+
+DEFINE_EVENT(tcp_ao_event_sk, tcp_ao_synack_no_key,
+ TP_PROTO(const struct sock *sk, const __u8 keyid, const __u8 rnext),
+ TP_ARGS(sk, keyid, rnext)
+);
+
+DECLARE_EVENT_CLASS(tcp_ao_event_sne,
+
+ TP_PROTO(const struct sock *sk, __u32 new_sne),
+
+ TP_ARGS(sk, new_sne),
+
+ TP_STRUCT__entry(
+ __field(__u64, net_cookie)
+ __field(const void *, skaddr)
+ __field(int, state)
+
+ /* sockaddr_in6 is always bigger than sockaddr_in */
+ __array(__u8, saddr, sizeof(struct sockaddr_in6))
+ __array(__u8, daddr, sizeof(struct sockaddr_in6))
+
+ __field(__u16, sport)
+ __field(__u16, dport)
+ __field(__u16, family)
+
+ __field(__u32, new_sne)
+ ),
+
+ TP_fast_assign(
+ const struct inet_sock *inet = inet_sk(sk);
+
+ __entry->net_cookie = sock_net(sk)->net_cookie;
+ __entry->skaddr = sk;
+ __entry->state = sk->sk_state;
+
+ memset(__entry->saddr, 0, sizeof(struct sockaddr_in6));
+ memset(__entry->daddr, 0, sizeof(struct sockaddr_in6));
+ TP_STORE_ADDR_PORTS(__entry, inet, sk);
+
+ /* For filtering use */
+ __entry->sport = ntohs(inet->inet_sport);
+ __entry->dport = ntohs(inet->inet_dport);
+ __entry->family = sk->sk_family;
+
+ __entry->new_sne = new_sne;
+ ),
+
+ TP_printk("net=%llu state=%s family=%s src=%pISpc dest=%pISpc sne=%u",
+ __entry->net_cookie,
+ show_tcp_state_name(__entry->state),
+ show_family_name(__entry->family),
+ __entry->saddr, __entry->daddr,
+ __entry->new_sne)
+);
+
+DEFINE_EVENT(tcp_ao_event_sne, tcp_ao_snd_sne_update,
+ TP_PROTO(const struct sock *sk, __u32 new_sne),
+ TP_ARGS(sk, new_sne)
+);
+
+DEFINE_EVENT(tcp_ao_event_sne, tcp_ao_rcv_sne_update,
+ TP_PROTO(const struct sock *sk, __u32 new_sne),
+ TP_ARGS(sk, new_sne)
+);
+#endif /* CONFIG_TCP_AO */
+
#endif /* _TRACE_TCP_H */
/* This part must be outside protection */
diff --git a/include/trace/events/tegra_apb_dma.h b/include/trace/events/tegra_apb_dma.h
index 971cd02d2daf..6d9f5075baa3 100644
--- a/include/trace/events/tegra_apb_dma.h
+++ b/include/trace/events/tegra_apb_dma.h
@@ -16,7 +16,7 @@ TRACE_EVENT(tegra_dma_tx_status,
__field(__u32, residue)
),
TP_fast_assign(
- __assign_str(chan, dev_name(&dc->dev->device));
+ __assign_str(chan);
__entry->cookie = cookie;
__entry->residue = state ? state->residue : (u32)-1;
),
@@ -33,7 +33,7 @@ TRACE_EVENT(tegra_dma_complete_cb,
__field(void *, ptr)
),
TP_fast_assign(
- __assign_str(chan, dev_name(&dc->dev->device));
+ __assign_str(chan);
__entry->count = count;
__entry->ptr = ptr;
),
@@ -49,7 +49,7 @@ TRACE_EVENT(tegra_dma_isr,
__field(int, irq)
),
TP_fast_assign(
- __assign_str(chan, dev_name(&dc->dev->device));
+ __assign_str(chan);
__entry->irq = irq;
),
TP_printk("%s: irq %d\n", __get_str(chan), __entry->irq)
diff --git a/include/trace/events/thermal.h b/include/trace/events/thermal.h
deleted file mode 100644
index e58bf3072f32..000000000000
--- a/include/trace/events/thermal.h
+++ /dev/null
@@ -1,199 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM thermal
-
-#if !defined(_TRACE_THERMAL_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_THERMAL_H
-
-#include <linux/devfreq.h>
-#include <linux/thermal.h>
-#include <linux/tracepoint.h>
-
-TRACE_DEFINE_ENUM(THERMAL_TRIP_CRITICAL);
-TRACE_DEFINE_ENUM(THERMAL_TRIP_HOT);
-TRACE_DEFINE_ENUM(THERMAL_TRIP_PASSIVE);
-TRACE_DEFINE_ENUM(THERMAL_TRIP_ACTIVE);
-
-#define show_tzt_type(type) \
- __print_symbolic(type, \
- { THERMAL_TRIP_CRITICAL, "CRITICAL"}, \
- { THERMAL_TRIP_HOT, "HOT"}, \
- { THERMAL_TRIP_PASSIVE, "PASSIVE"}, \
- { THERMAL_TRIP_ACTIVE, "ACTIVE"})
-
-TRACE_EVENT(thermal_temperature,
-
- TP_PROTO(struct thermal_zone_device *tz),
-
- TP_ARGS(tz),
-
- TP_STRUCT__entry(
- __string(thermal_zone, tz->type)
- __field(int, id)
- __field(int, temp_prev)
- __field(int, temp)
- ),
-
- TP_fast_assign(
- __assign_str(thermal_zone, tz->type);
- __entry->id = tz->id;
- __entry->temp_prev = tz->last_temperature;
- __entry->temp = tz->temperature;
- ),
-
- TP_printk("thermal_zone=%s id=%d temp_prev=%d temp=%d",
- __get_str(thermal_zone), __entry->id, __entry->temp_prev,
- __entry->temp)
-);
-
-TRACE_EVENT(cdev_update,
-
- TP_PROTO(struct thermal_cooling_device *cdev, unsigned long target),
-
- TP_ARGS(cdev, target),
-
- TP_STRUCT__entry(
- __string(type, cdev->type)
- __field(unsigned long, target)
- ),
-
- TP_fast_assign(
- __assign_str(type, cdev->type);
- __entry->target = target;
- ),
-
- TP_printk("type=%s target=%lu", __get_str(type), __entry->target)
-);
-
-TRACE_EVENT(thermal_zone_trip,
-
- TP_PROTO(struct thermal_zone_device *tz, int trip,
- enum thermal_trip_type trip_type),
-
- TP_ARGS(tz, trip, trip_type),
-
- TP_STRUCT__entry(
- __string(thermal_zone, tz->type)
- __field(int, id)
- __field(int, trip)
- __field(enum thermal_trip_type, trip_type)
- ),
-
- TP_fast_assign(
- __assign_str(thermal_zone, tz->type);
- __entry->id = tz->id;
- __entry->trip = trip;
- __entry->trip_type = trip_type;
- ),
-
- TP_printk("thermal_zone=%s id=%d trip=%d trip_type=%s",
- __get_str(thermal_zone), __entry->id, __entry->trip,
- show_tzt_type(__entry->trip_type))
-);
-
-#ifdef CONFIG_CPU_THERMAL
-TRACE_EVENT(thermal_power_cpu_get_power_simple,
- TP_PROTO(int cpu, u32 power),
-
- TP_ARGS(cpu, power),
-
- TP_STRUCT__entry(
- __field(int, cpu)
- __field(u32, power)
- ),
-
- TP_fast_assign(
- __entry->cpu = cpu;
- __entry->power = power;
- ),
-
- TP_printk("cpu=%d power=%u", __entry->cpu, __entry->power)
-);
-
-TRACE_EVENT(thermal_power_cpu_limit,
- TP_PROTO(const struct cpumask *cpus, unsigned int freq,
- unsigned long cdev_state, u32 power),
-
- TP_ARGS(cpus, freq, cdev_state, power),
-
- TP_STRUCT__entry(
- __bitmask(cpumask, num_possible_cpus())
- __field(unsigned int, freq )
- __field(unsigned long, cdev_state)
- __field(u32, power )
- ),
-
- TP_fast_assign(
- __assign_bitmask(cpumask, cpumask_bits(cpus),
- num_possible_cpus());
- __entry->freq = freq;
- __entry->cdev_state = cdev_state;
- __entry->power = power;
- ),
-
- TP_printk("cpus=%s freq=%u cdev_state=%lu power=%u",
- __get_bitmask(cpumask), __entry->freq, __entry->cdev_state,
- __entry->power)
-);
-#endif /* CONFIG_CPU_THERMAL */
-
-#ifdef CONFIG_DEVFREQ_THERMAL
-TRACE_EVENT(thermal_power_devfreq_get_power,
- TP_PROTO(struct thermal_cooling_device *cdev,
- struct devfreq_dev_status *status, unsigned long freq,
- u32 power),
-
- TP_ARGS(cdev, status, freq, power),
-
- TP_STRUCT__entry(
- __string(type, cdev->type )
- __field(unsigned long, freq )
- __field(u32, busy_time)
- __field(u32, total_time)
- __field(u32, power)
- ),
-
- TP_fast_assign(
- __assign_str(type, cdev->type);
- __entry->freq = freq;
- __entry->busy_time = status->busy_time;
- __entry->total_time = status->total_time;
- __entry->power = power;
- ),
-
- TP_printk("type=%s freq=%lu load=%u power=%u",
- __get_str(type), __entry->freq,
- __entry->total_time == 0 ? 0 :
- (100 * __entry->busy_time) / __entry->total_time,
- __entry->power)
-);
-
-TRACE_EVENT(thermal_power_devfreq_limit,
- TP_PROTO(struct thermal_cooling_device *cdev, unsigned long freq,
- unsigned long cdev_state, u32 power),
-
- TP_ARGS(cdev, freq, cdev_state, power),
-
- TP_STRUCT__entry(
- __string(type, cdev->type)
- __field(unsigned int, freq )
- __field(unsigned long, cdev_state)
- __field(u32, power )
- ),
-
- TP_fast_assign(
- __assign_str(type, cdev->type);
- __entry->freq = freq;
- __entry->cdev_state = cdev_state;
- __entry->power = power;
- ),
-
- TP_printk("type=%s freq=%u cdev_state=%lu power=%u",
- __get_str(type), __entry->freq, __entry->cdev_state,
- __entry->power)
-);
-#endif /* CONFIG_DEVFREQ_THERMAL */
-#endif /* _TRACE_THERMAL_H */
-
-/* This part must be outside protection */
-#include <trace/define_trace.h>
diff --git a/include/trace/events/thermal_power_allocator.h b/include/trace/events/thermal_power_allocator.h
deleted file mode 100644
index 1c8fb95544f9..000000000000
--- a/include/trace/events/thermal_power_allocator.h
+++ /dev/null
@@ -1,88 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM thermal_power_allocator
-
-#if !defined(_TRACE_THERMAL_POWER_ALLOCATOR_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_THERMAL_POWER_ALLOCATOR_H
-
-#include <linux/tracepoint.h>
-
-TRACE_EVENT(thermal_power_allocator,
- TP_PROTO(struct thermal_zone_device *tz, u32 *req_power,
- u32 total_req_power, u32 *granted_power,
- u32 total_granted_power, size_t num_actors,
- u32 power_range, u32 max_allocatable_power,
- int current_temp, s32 delta_temp),
- TP_ARGS(tz, req_power, total_req_power, granted_power,
- total_granted_power, num_actors, power_range,
- max_allocatable_power, current_temp, delta_temp),
- TP_STRUCT__entry(
- __field(int, tz_id )
- __dynamic_array(u32, req_power, num_actors )
- __field(u32, total_req_power )
- __dynamic_array(u32, granted_power, num_actors)
- __field(u32, total_granted_power )
- __field(size_t, num_actors )
- __field(u32, power_range )
- __field(u32, max_allocatable_power )
- __field(int, current_temp )
- __field(s32, delta_temp )
- ),
- TP_fast_assign(
- __entry->tz_id = tz->id;
- memcpy(__get_dynamic_array(req_power), req_power,
- num_actors * sizeof(*req_power));
- __entry->total_req_power = total_req_power;
- memcpy(__get_dynamic_array(granted_power), granted_power,
- num_actors * sizeof(*granted_power));
- __entry->total_granted_power = total_granted_power;
- __entry->num_actors = num_actors;
- __entry->power_range = power_range;
- __entry->max_allocatable_power = max_allocatable_power;
- __entry->current_temp = current_temp;
- __entry->delta_temp = delta_temp;
- ),
-
- TP_printk("thermal_zone_id=%d req_power={%s} total_req_power=%u granted_power={%s} total_granted_power=%u power_range=%u max_allocatable_power=%u current_temperature=%d delta_temperature=%d",
- __entry->tz_id,
- __print_array(__get_dynamic_array(req_power),
- __entry->num_actors, 4),
- __entry->total_req_power,
- __print_array(__get_dynamic_array(granted_power),
- __entry->num_actors, 4),
- __entry->total_granted_power, __entry->power_range,
- __entry->max_allocatable_power, __entry->current_temp,
- __entry->delta_temp)
-);
-
-TRACE_EVENT(thermal_power_allocator_pid,
- TP_PROTO(struct thermal_zone_device *tz, s32 err, s32 err_integral,
- s64 p, s64 i, s64 d, s32 output),
- TP_ARGS(tz, err, err_integral, p, i, d, output),
- TP_STRUCT__entry(
- __field(int, tz_id )
- __field(s32, err )
- __field(s32, err_integral)
- __field(s64, p )
- __field(s64, i )
- __field(s64, d )
- __field(s32, output )
- ),
- TP_fast_assign(
- __entry->tz_id = tz->id;
- __entry->err = err;
- __entry->err_integral = err_integral;
- __entry->p = p;
- __entry->i = i;
- __entry->d = d;
- __entry->output = output;
- ),
-
- TP_printk("thermal_zone_id=%d err=%d err_integral=%d p=%lld i=%lld d=%lld output=%d",
- __entry->tz_id, __entry->err, __entry->err_integral,
- __entry->p, __entry->i, __entry->d, __entry->output)
-);
-#endif /* _TRACE_THERMAL_POWER_ALLOCATOR_H */
-
-/* This part must be outside protection */
-#include <trace/define_trace.h>
diff --git a/include/trace/events/thp.h b/include/trace/events/thp.h
index 202b3e3e67ff..c8fe879d5828 100644
--- a/include/trace/events/thp.h
+++ b/include/trace/events/thp.h
@@ -8,25 +8,35 @@
#include <linux/types.h>
#include <linux/tracepoint.h>
-TRACE_EVENT(hugepage_set_pmd,
+#ifdef CONFIG_PPC_BOOK3S_64
+DECLARE_EVENT_CLASS(hugepage_set,
- TP_PROTO(unsigned long addr, unsigned long pmd),
- TP_ARGS(addr, pmd),
+ TP_PROTO(unsigned long addr, unsigned long pte),
+ TP_ARGS(addr, pte),
TP_STRUCT__entry(
__field(unsigned long, addr)
- __field(unsigned long, pmd)
+ __field(unsigned long, pte)
),
TP_fast_assign(
__entry->addr = addr;
- __entry->pmd = pmd;
+ __entry->pte = pte;
),
- TP_printk("Set pmd with 0x%lx with 0x%lx", __entry->addr, __entry->pmd)
+ TP_printk("Set page table entry with 0x%lx with 0x%lx", __entry->addr, __entry->pte)
);
+DEFINE_EVENT(hugepage_set, hugepage_set_pmd,
+ TP_PROTO(unsigned long addr, unsigned long pmd),
+ TP_ARGS(addr, pmd)
+);
-TRACE_EVENT(hugepage_update,
+DEFINE_EVENT(hugepage_set, hugepage_set_pud,
+ TP_PROTO(unsigned long addr, unsigned long pud),
+ TP_ARGS(addr, pud)
+);
+
+DECLARE_EVENT_CLASS(hugepage_update,
TP_PROTO(unsigned long addr, unsigned long pte, unsigned long clr, unsigned long set),
TP_ARGS(addr, pte, clr, set),
@@ -48,6 +58,17 @@ TRACE_EVENT(hugepage_update,
TP_printk("hugepage update at addr 0x%lx and pte = 0x%lx clr = 0x%lx, set = 0x%lx", __entry->addr, __entry->pte, __entry->clr, __entry->set)
);
+DEFINE_EVENT(hugepage_update, hugepage_update_pmd,
+ TP_PROTO(unsigned long addr, unsigned long pmd, unsigned long clr, unsigned long set),
+ TP_ARGS(addr, pmd, clr, set)
+);
+
+DEFINE_EVENT(hugepage_update, hugepage_update_pud,
+ TP_PROTO(unsigned long addr, unsigned long pud, unsigned long clr, unsigned long set),
+ TP_ARGS(addr, pud, clr, set)
+);
+#endif /* CONFIG_PPC_BOOK3S_64 */
+
DECLARE_EVENT_CLASS(migration_pmd,
TP_PROTO(unsigned long addr, unsigned long pmd),
diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
index 2e713a7d9aa3..1641ae3e6ca0 100644
--- a/include/trace/events/timer.h
+++ b/include/trace/events/timer.h
@@ -46,22 +46,21 @@ DEFINE_EVENT(timer_class, timer_init,
/**
* timer_start - called when the timer is started
- * @timer: pointer to struct timer_list
- * @expires: the timers expiry time
- * @flags: the timers flags
+ * @timer: pointer to struct timer_list
+ * @bucket_expiry: the bucket expiry time
*/
TRACE_EVENT(timer_start,
TP_PROTO(struct timer_list *timer,
- unsigned long expires,
- unsigned int flags),
+ unsigned long bucket_expiry),
- TP_ARGS(timer, expires, flags),
+ TP_ARGS(timer, bucket_expiry),
TP_STRUCT__entry(
__field( void *, timer )
__field( void *, function )
__field( unsigned long, expires )
+ __field( unsigned long, bucket_expiry )
__field( unsigned long, now )
__field( unsigned int, flags )
),
@@ -69,15 +68,16 @@ TRACE_EVENT(timer_start,
TP_fast_assign(
__entry->timer = timer;
__entry->function = timer->function;
- __entry->expires = expires;
+ __entry->expires = timer->expires;
+ __entry->bucket_expiry = bucket_expiry;
__entry->now = jiffies;
- __entry->flags = flags;
+ __entry->flags = timer->flags;
),
- TP_printk("timer=%p function=%ps expires=%lu [timeout=%ld] cpu=%u idx=%u flags=%s",
+ TP_printk("timer=%p function=%ps expires=%lu [timeout=%ld] bucket_expiry=%lu cpu=%u idx=%u flags=%s",
__entry->timer, __entry->function, __entry->expires,
(long)__entry->expires - __entry->now,
- __entry->flags & TIMER_CPUMASK,
+ __entry->bucket_expiry, __entry->flags & TIMER_CPUMASK,
__entry->flags >> TIMER_ARRAYSHIFT,
decode_timer_flags(__entry->flags & TIMER_TRACE_FLAGMASK))
);
@@ -142,6 +142,26 @@ DEFINE_EVENT(timer_class, timer_cancel,
TP_ARGS(timer)
);
+TRACE_EVENT(timer_base_idle,
+
+ TP_PROTO(bool is_idle, unsigned int cpu),
+
+ TP_ARGS(is_idle, cpu),
+
+ TP_STRUCT__entry(
+ __field( bool, is_idle )
+ __field( unsigned int, cpu )
+ ),
+
+ TP_fast_assign(
+ __entry->is_idle = is_idle;
+ __entry->cpu = cpu;
+ ),
+
+ TP_printk("is_idle=%d cpu=%d",
+ __entry->is_idle, __entry->cpu)
+);
+
#define decode_clockid(type) \
__print_symbolic(type, \
{ CLOCK_REALTIME, "CLOCK_REALTIME" }, \
@@ -158,15 +178,19 @@ DEFINE_EVENT(timer_class, timer_cancel,
{ HRTIMER_MODE_ABS_SOFT, "ABS|SOFT" }, \
{ HRTIMER_MODE_REL_SOFT, "REL|SOFT" }, \
{ HRTIMER_MODE_ABS_PINNED_SOFT, "ABS|PINNED|SOFT" }, \
- { HRTIMER_MODE_REL_PINNED_SOFT, "REL|PINNED|SOFT" })
+ { HRTIMER_MODE_REL_PINNED_SOFT, "REL|PINNED|SOFT" }, \
+ { HRTIMER_MODE_ABS_HARD, "ABS|HARD" }, \
+ { HRTIMER_MODE_REL_HARD, "REL|HARD" }, \
+ { HRTIMER_MODE_ABS_PINNED_HARD, "ABS|PINNED|HARD" }, \
+ { HRTIMER_MODE_REL_PINNED_HARD, "REL|PINNED|HARD" })
/**
- * hrtimer_init - called when the hrtimer is initialized
+ * hrtimer_setup - called when the hrtimer is initialized
* @hrtimer: pointer to struct hrtimer
* @clockid: the hrtimers clock
* @mode: the hrtimers mode
*/
-TRACE_EVENT(hrtimer_init,
+TRACE_EVENT(hrtimer_setup,
TP_PROTO(struct hrtimer *hrtimer, clockid_t clockid,
enum hrtimer_mode mode),
@@ -211,7 +235,7 @@ TRACE_EVENT(hrtimer_start,
TP_fast_assign(
__entry->hrtimer = hrtimer;
- __entry->function = hrtimer->function;
+ __entry->function = ACCESS_PRIVATE(hrtimer, function);
__entry->expires = hrtimer_get_expires(hrtimer);
__entry->softexpires = hrtimer_get_softexpires(hrtimer);
__entry->mode = mode;
@@ -247,7 +271,7 @@ TRACE_EVENT(hrtimer_expire_entry,
TP_fast_assign(
__entry->hrtimer = hrtimer;
__entry->now = *now;
- __entry->function = hrtimer->function;
+ __entry->function = ACCESS_PRIVATE(hrtimer, function);
),
TP_printk("hrtimer=%p function=%ps now=%llu",
@@ -371,7 +395,8 @@ TRACE_EVENT(itimer_expire,
tick_dep_name(PERF_EVENTS) \
tick_dep_name(SCHED) \
tick_dep_name(CLOCK_UNSTABLE) \
- tick_dep_name_end(RCU)
+ tick_dep_name(RCU) \
+ tick_dep_name_end(RCU_EXP)
#undef tick_dep_name
#undef tick_dep_mask_name
diff --git a/include/trace/events/timer_migration.h b/include/trace/events/timer_migration.h
new file mode 100644
index 000000000000..61171b13c687
--- /dev/null
+++ b/include/trace/events/timer_migration.h
@@ -0,0 +1,298 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM timer_migration
+
+#if !defined(_TRACE_TIMER_MIGRATION_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_TIMER_MIGRATION_H
+
+#include <linux/tracepoint.h>
+
+/* Group events */
+TRACE_EVENT(tmigr_group_set,
+
+ TP_PROTO(struct tmigr_group *group),
+
+ TP_ARGS(group),
+
+ TP_STRUCT__entry(
+ __field( void *, group )
+ __field( unsigned int, lvl )
+ __field( unsigned int, numa_node )
+ ),
+
+ TP_fast_assign(
+ __entry->group = group;
+ __entry->lvl = group->level;
+ __entry->numa_node = group->numa_node;
+ ),
+
+ TP_printk("group=%p lvl=%d numa=%d",
+ __entry->group, __entry->lvl, __entry->numa_node)
+);
+
+TRACE_EVENT(tmigr_connect_child_parent,
+
+ TP_PROTO(struct tmigr_group *child),
+
+ TP_ARGS(child),
+
+ TP_STRUCT__entry(
+ __field( void *, child )
+ __field( void *, parent )
+ __field( unsigned int, lvl )
+ __field( unsigned int, numa_node )
+ __field( unsigned int, num_children )
+ __field( u32, groupmask )
+ ),
+
+ TP_fast_assign(
+ __entry->child = child;
+ __entry->parent = child->parent;
+ __entry->lvl = child->parent->level;
+ __entry->numa_node = child->parent->numa_node;
+ __entry->num_children = child->parent->num_children;
+ __entry->groupmask = child->groupmask;
+ ),
+
+ TP_printk("group=%p groupmask=%0x parent=%p lvl=%d numa=%d num_children=%d",
+ __entry->child, __entry->groupmask, __entry->parent,
+ __entry->lvl, __entry->numa_node, __entry->num_children)
+);
+
+TRACE_EVENT(tmigr_connect_cpu_parent,
+
+ TP_PROTO(struct tmigr_cpu *tmc),
+
+ TP_ARGS(tmc),
+
+ TP_STRUCT__entry(
+ __field( void *, parent )
+ __field( unsigned int, cpu )
+ __field( unsigned int, lvl )
+ __field( unsigned int, numa_node )
+ __field( unsigned int, num_children )
+ __field( u32, groupmask )
+ ),
+
+ TP_fast_assign(
+ __entry->parent = tmc->tmgroup;
+ __entry->cpu = tmc->cpuevt.cpu;
+ __entry->lvl = tmc->tmgroup->level;
+ __entry->numa_node = tmc->tmgroup->numa_node;
+ __entry->num_children = tmc->tmgroup->num_children;
+ __entry->groupmask = tmc->groupmask;
+ ),
+
+ TP_printk("cpu=%d groupmask=%0x parent=%p lvl=%d numa=%d num_children=%d",
+ __entry->cpu, __entry->groupmask, __entry->parent,
+ __entry->lvl, __entry->numa_node, __entry->num_children)
+);
+
+DECLARE_EVENT_CLASS(tmigr_group_and_cpu,
+
+ TP_PROTO(struct tmigr_group *group, union tmigr_state state, u32 childmask),
+
+ TP_ARGS(group, state, childmask),
+
+ TP_STRUCT__entry(
+ __field( void *, group )
+ __field( void *, parent )
+ __field( unsigned int, lvl )
+ __field( unsigned int, numa_node )
+ __field( u32, childmask )
+ __field( u8, active )
+ __field( u8, migrator )
+ ),
+
+ TP_fast_assign(
+ __entry->group = group;
+ __entry->parent = group->parent;
+ __entry->lvl = group->level;
+ __entry->numa_node = group->numa_node;
+ __entry->childmask = childmask;
+ __entry->active = state.active;
+ __entry->migrator = state.migrator;
+ ),
+
+ TP_printk("group=%p lvl=%d numa=%d active=%0x migrator=%0x "
+ "parent=%p childmask=%0x",
+ __entry->group, __entry->lvl, __entry->numa_node,
+ __entry->active, __entry->migrator,
+ __entry->parent, __entry->childmask)
+);
+
+DEFINE_EVENT(tmigr_group_and_cpu, tmigr_group_set_cpu_inactive,
+
+ TP_PROTO(struct tmigr_group *group, union tmigr_state state, u32 childmask),
+
+ TP_ARGS(group, state, childmask)
+);
+
+DEFINE_EVENT(tmigr_group_and_cpu, tmigr_group_set_cpu_active,
+
+ TP_PROTO(struct tmigr_group *group, union tmigr_state state, u32 childmask),
+
+ TP_ARGS(group, state, childmask)
+);
+
+/* CPU events*/
+DECLARE_EVENT_CLASS(tmigr_cpugroup,
+
+ TP_PROTO(struct tmigr_cpu *tmc),
+
+ TP_ARGS(tmc),
+
+ TP_STRUCT__entry(
+ __field( u64, wakeup )
+ __field( void *, parent )
+ __field( unsigned int, cpu )
+
+ ),
+
+ TP_fast_assign(
+ __entry->wakeup = tmc->wakeup;
+ __entry->parent = tmc->tmgroup;
+ __entry->cpu = tmc->cpuevt.cpu;
+ ),
+
+ TP_printk("cpu=%d parent=%p wakeup=%llu", __entry->cpu, __entry->parent, __entry->wakeup)
+);
+
+DEFINE_EVENT(tmigr_cpugroup, tmigr_cpu_new_timer,
+
+ TP_PROTO(struct tmigr_cpu *tmc),
+
+ TP_ARGS(tmc)
+);
+
+DEFINE_EVENT(tmigr_cpugroup, tmigr_cpu_active,
+
+ TP_PROTO(struct tmigr_cpu *tmc),
+
+ TP_ARGS(tmc)
+);
+
+DEFINE_EVENT(tmigr_cpugroup, tmigr_cpu_available,
+
+ TP_PROTO(struct tmigr_cpu *tmc),
+
+ TP_ARGS(tmc)
+);
+
+DEFINE_EVENT(tmigr_cpugroup, tmigr_cpu_unavailable,
+
+ TP_PROTO(struct tmigr_cpu *tmc),
+
+ TP_ARGS(tmc)
+);
+
+DEFINE_EVENT(tmigr_cpugroup, tmigr_handle_remote_cpu,
+
+ TP_PROTO(struct tmigr_cpu *tmc),
+
+ TP_ARGS(tmc)
+);
+
+DECLARE_EVENT_CLASS(tmigr_idle,
+
+ TP_PROTO(struct tmigr_cpu *tmc, u64 nextevt),
+
+ TP_ARGS(tmc, nextevt),
+
+ TP_STRUCT__entry(
+ __field( u64, nextevt)
+ __field( u64, wakeup)
+ __field( void *, parent)
+ __field( unsigned int, cpu)
+ ),
+
+ TP_fast_assign(
+ __entry->nextevt = nextevt;
+ __entry->wakeup = tmc->wakeup;
+ __entry->parent = tmc->tmgroup;
+ __entry->cpu = tmc->cpuevt.cpu;
+ ),
+
+ TP_printk("cpu=%d parent=%p nextevt=%llu wakeup=%llu",
+ __entry->cpu, __entry->parent, __entry->nextevt, __entry->wakeup)
+);
+
+DEFINE_EVENT(tmigr_idle, tmigr_cpu_idle,
+
+ TP_PROTO(struct tmigr_cpu *tmc, u64 nextevt),
+
+ TP_ARGS(tmc, nextevt)
+);
+
+DEFINE_EVENT(tmigr_idle, tmigr_cpu_new_timer_idle,
+
+ TP_PROTO(struct tmigr_cpu *tmc, u64 nextevt),
+
+ TP_ARGS(tmc, nextevt)
+);
+
+TRACE_EVENT(tmigr_update_events,
+
+ TP_PROTO(struct tmigr_group *child, struct tmigr_group *group,
+ union tmigr_state childstate, union tmigr_state groupstate,
+ u64 nextevt),
+
+ TP_ARGS(child, group, childstate, groupstate, nextevt),
+
+ TP_STRUCT__entry(
+ __field( void *, child )
+ __field( void *, group )
+ __field( u64, nextevt )
+ __field( u64, group_next_expiry )
+ __field( u64, child_evt_expiry )
+ __field( unsigned int, group_lvl )
+ __field( unsigned int, child_evtcpu )
+ __field( u8, child_active )
+ __field( u8, group_active )
+ ),
+
+ TP_fast_assign(
+ __entry->child = child;
+ __entry->group = group;
+ __entry->nextevt = nextevt;
+ __entry->group_next_expiry = group->next_expiry;
+ __entry->child_evt_expiry = child ? child->groupevt.nextevt.expires : 0;
+ __entry->group_lvl = group->level;
+ __entry->child_evtcpu = child ? child->groupevt.cpu : 0;
+ __entry->child_active = childstate.active;
+ __entry->group_active = groupstate.active;
+ ),
+
+ TP_printk("child=%p group=%p group_lvl=%d child_active=%0x group_active=%0x "
+ "nextevt=%llu next_expiry=%llu child_evt_expiry=%llu child_evtcpu=%d",
+ __entry->child, __entry->group, __entry->group_lvl, __entry->child_active,
+ __entry->group_active,
+ __entry->nextevt, __entry->group_next_expiry, __entry->child_evt_expiry,
+ __entry->child_evtcpu)
+);
+
+TRACE_EVENT(tmigr_handle_remote,
+
+ TP_PROTO(struct tmigr_group *group),
+
+ TP_ARGS(group),
+
+ TP_STRUCT__entry(
+ __field( void * , group )
+ __field( unsigned int , lvl )
+ ),
+
+ TP_fast_assign(
+ __entry->group = group;
+ __entry->lvl = group->level;
+ ),
+
+ TP_printk("group=%p lvl=%d",
+ __entry->group, __entry->lvl)
+);
+
+#endif /* _TRACE_TIMER_MIGRATION_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/timestamp.h b/include/trace/events/timestamp.h
new file mode 100644
index 000000000000..c9e5ec930054
--- /dev/null
+++ b/include/trace/events/timestamp.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM timestamp
+
+#if !defined(_TRACE_TIMESTAMP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_TIMESTAMP_H
+
+#include <linux/tracepoint.h>
+#include <linux/fs.h>
+
+#define CTIME_QUERIED_FLAGS \
+ { I_CTIME_QUERIED, "Q" }
+
+DECLARE_EVENT_CLASS(ctime,
+ TP_PROTO(struct inode *inode,
+ struct timespec64 *ctime),
+
+ TP_ARGS(inode, ctime),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(time64_t, ctime_s)
+ __field(u32, ctime_ns)
+ __field(u32, gen)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->gen = inode->i_generation;
+ __entry->ctime_s = ctime->tv_sec;
+ __entry->ctime_ns = ctime->tv_nsec;
+ ),
+
+ TP_printk("ino=%d:%d:%ld:%u ctime=%lld.%u",
+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, __entry->gen,
+ __entry->ctime_s, __entry->ctime_ns
+ )
+);
+
+DEFINE_EVENT(ctime, inode_set_ctime_to_ts,
+ TP_PROTO(struct inode *inode,
+ struct timespec64 *ctime),
+ TP_ARGS(inode, ctime));
+
+DEFINE_EVENT(ctime, ctime_xchg_skip,
+ TP_PROTO(struct inode *inode,
+ struct timespec64 *ctime),
+ TP_ARGS(inode, ctime));
+
+TRACE_EVENT(ctime_ns_xchg,
+ TP_PROTO(struct inode *inode,
+ u32 old,
+ u32 new,
+ u32 cur),
+
+ TP_ARGS(inode, old, new, cur),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(u32, gen)
+ __field(u32, old)
+ __field(u32, new)
+ __field(u32, cur)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->gen = inode->i_generation;
+ __entry->old = old;
+ __entry->new = new;
+ __entry->cur = cur;
+ ),
+
+ TP_printk("ino=%d:%d:%ld:%u old=%u:%s new=%u cur=%u:%s",
+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, __entry->gen,
+ __entry->old & ~I_CTIME_QUERIED,
+ __print_flags(__entry->old & I_CTIME_QUERIED, "|", CTIME_QUERIED_FLAGS),
+ __entry->new,
+ __entry->cur & ~I_CTIME_QUERIED,
+ __print_flags(__entry->cur & I_CTIME_QUERIED, "|", CTIME_QUERIED_FLAGS)
+ )
+);
+
+TRACE_EVENT(fill_mg_cmtime,
+ TP_PROTO(struct inode *inode,
+ struct timespec64 *ctime,
+ struct timespec64 *mtime),
+
+ TP_ARGS(inode, ctime, mtime),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(time64_t, ctime_s)
+ __field(time64_t, mtime_s)
+ __field(u32, ctime_ns)
+ __field(u32, mtime_ns)
+ __field(u32, gen)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->gen = inode->i_generation;
+ __entry->ctime_s = ctime->tv_sec;
+ __entry->mtime_s = mtime->tv_sec;
+ __entry->ctime_ns = ctime->tv_nsec;
+ __entry->mtime_ns = mtime->tv_nsec;
+ ),
+
+ TP_printk("ino=%d:%d:%ld:%u ctime=%lld.%u mtime=%lld.%u",
+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, __entry->gen,
+ __entry->ctime_s, __entry->ctime_ns,
+ __entry->mtime_s, __entry->mtime_ns
+ )
+);
+#endif /* _TRACE_TIMESTAMP_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/tsm_mr.h b/include/trace/events/tsm_mr.h
new file mode 100644
index 000000000000..f40de4ad3e2d
--- /dev/null
+++ b/include/trace/events/tsm_mr.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM tsm_mr
+
+#if !defined(_TRACE_TSM_MR_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_TSM_MR_H
+
+#include <linux/tracepoint.h>
+#include <linux/tsm-mr.h>
+
+TRACE_EVENT(tsm_mr_read,
+
+ TP_PROTO(const struct tsm_measurement_register *mr),
+
+ TP_ARGS(mr),
+
+ TP_STRUCT__entry(
+ __string(mr, mr->mr_name)
+ __string(hash, mr->mr_flags & TSM_MR_F_NOHASH ?
+ "data" : hash_algo_name[mr->mr_hash])
+ __dynamic_array(u8, d, mr->mr_size)
+ ),
+
+ TP_fast_assign(
+ __assign_str(mr);
+ __assign_str(hash);
+ memcpy(__get_dynamic_array(d), mr->mr_value, __get_dynamic_array_len(d));
+ ),
+
+ TP_printk("[%s] %s:%s", __get_str(mr), __get_str(hash),
+ __print_hex_str(__get_dynamic_array(d), __get_dynamic_array_len(d)))
+);
+
+TRACE_EVENT(tsm_mr_refresh,
+
+ TP_PROTO(const struct tsm_measurement_register *mr, int rc),
+
+ TP_ARGS(mr, rc),
+
+ TP_STRUCT__entry(
+ __string(mr, mr->mr_name)
+ __field(int, rc)
+ ),
+
+ TP_fast_assign(
+ __assign_str(mr);
+ __entry->rc = rc;
+ ),
+
+ TP_printk("[%s] %s:%d", __get_str(mr),
+ __entry->rc ? "failed" : "succeeded", __entry->rc)
+);
+
+TRACE_EVENT(tsm_mr_write,
+
+ TP_PROTO(const struct tsm_measurement_register *mr, const u8 *data),
+
+ TP_ARGS(mr, data),
+
+ TP_STRUCT__entry(
+ __string(mr, mr->mr_name)
+ __string(hash, mr->mr_flags & TSM_MR_F_NOHASH ?
+ "data" : hash_algo_name[mr->mr_hash])
+ __dynamic_array(u8, d, mr->mr_size)
+ ),
+
+ TP_fast_assign(
+ __assign_str(mr);
+ __assign_str(hash);
+ memcpy(__get_dynamic_array(d), data, __get_dynamic_array_len(d));
+ ),
+
+ TP_printk("[%s] %s:%s", __get_str(mr), __get_str(hash),
+ __print_hex_str(__get_dynamic_array(d), __get_dynamic_array_len(d)))
+);
+
+#endif
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/udp.h b/include/trace/events/udp.h
index 336fe272889f..6142be4068e2 100644
--- a/include/trace/events/udp.h
+++ b/include/trace/events/udp.h
@@ -7,24 +7,43 @@
#include <linux/udp.h>
#include <linux/tracepoint.h>
+#include <trace/events/net_probe_common.h>
TRACE_EVENT(udp_fail_queue_rcv_skb,
- TP_PROTO(int rc, struct sock *sk),
+ TP_PROTO(int rc, struct sock *sk, struct sk_buff *skb),
- TP_ARGS(rc, sk),
+ TP_ARGS(rc, sk, skb),
TP_STRUCT__entry(
__field(int, rc)
- __field(__u16, lport)
+
+ __field(__u16, sport)
+ __field(__u16, dport)
+ __field(__u16, family)
+ __array(__u8, saddr, sizeof(struct sockaddr_in6))
+ __array(__u8, daddr, sizeof(struct sockaddr_in6))
),
TP_fast_assign(
+ const struct udphdr *uh = (const struct udphdr *)udp_hdr(skb);
+
__entry->rc = rc;
- __entry->lport = inet_sk(sk)->inet_num;
+
+ /* for filtering use */
+ __entry->sport = ntohs(uh->source);
+ __entry->dport = ntohs(uh->dest);
+ __entry->family = sk->sk_family;
+
+ memset(__entry->saddr, 0, sizeof(struct sockaddr_in6));
+ memset(__entry->daddr, 0, sizeof(struct sockaddr_in6));
+
+ TP_STORE_ADDR_PORTS_SKB(skb, uh, __entry->saddr, __entry->daddr);
),
- TP_printk("rc=%d port=%hu", __entry->rc, __entry->lport)
+ TP_printk("rc=%d family=%s src=%pISpc dest=%pISpc", __entry->rc,
+ show_family_name(__entry->family),
+ __entry->saddr, __entry->daddr)
);
#endif /* _TRACE_UDP_H */
diff --git a/include/trace/events/ufs.h b/include/trace/events/ufs.h
deleted file mode 100644
index 599739ee7b20..000000000000
--- a/include/trace/events/ufs.h
+++ /dev/null
@@ -1,396 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
- */
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM ufs
-
-#if !defined(_TRACE_UFS_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_UFS_H
-
-#include <linux/tracepoint.h>
-
-#define str_opcode(opcode) \
- __print_symbolic(opcode, \
- { WRITE_16, "WRITE_16" }, \
- { WRITE_10, "WRITE_10" }, \
- { READ_16, "READ_16" }, \
- { READ_10, "READ_10" }, \
- { SYNCHRONIZE_CACHE, "SYNC" }, \
- { UNMAP, "UNMAP" })
-
-#define UFS_LINK_STATES \
- EM(UIC_LINK_OFF_STATE, "UIC_LINK_OFF_STATE") \
- EM(UIC_LINK_ACTIVE_STATE, "UIC_LINK_ACTIVE_STATE") \
- EMe(UIC_LINK_HIBERN8_STATE, "UIC_LINK_HIBERN8_STATE")
-
-#define UFS_PWR_MODES \
- EM(UFS_ACTIVE_PWR_MODE, "UFS_ACTIVE_PWR_MODE") \
- EM(UFS_SLEEP_PWR_MODE, "UFS_SLEEP_PWR_MODE") \
- EM(UFS_POWERDOWN_PWR_MODE, "UFS_POWERDOWN_PWR_MODE") \
- EMe(UFS_DEEPSLEEP_PWR_MODE, "UFS_DEEPSLEEP_PWR_MODE")
-
-#define UFSCHD_CLK_GATING_STATES \
- EM(CLKS_OFF, "CLKS_OFF") \
- EM(CLKS_ON, "CLKS_ON") \
- EM(REQ_CLKS_OFF, "REQ_CLKS_OFF") \
- EMe(REQ_CLKS_ON, "REQ_CLKS_ON")
-
-#define UFS_CMD_TRACE_STRINGS \
- EM(UFS_CMD_SEND, "send_req") \
- EM(UFS_CMD_COMP, "complete_rsp") \
- EM(UFS_DEV_COMP, "dev_complete") \
- EM(UFS_QUERY_SEND, "query_send") \
- EM(UFS_QUERY_COMP, "query_complete") \
- EM(UFS_QUERY_ERR, "query_complete_err") \
- EM(UFS_TM_SEND, "tm_send") \
- EM(UFS_TM_COMP, "tm_complete") \
- EMe(UFS_TM_ERR, "tm_complete_err")
-
-#define UFS_CMD_TRACE_TSF_TYPES \
- EM(UFS_TSF_CDB, "CDB") \
- EM(UFS_TSF_OSF, "OSF") \
- EM(UFS_TSF_TM_INPUT, "TM_INPUT") \
- EMe(UFS_TSF_TM_OUTPUT, "TM_OUTPUT")
-
-/* Enums require being exported to userspace, for user tool parsing */
-#undef EM
-#undef EMe
-#define EM(a, b) TRACE_DEFINE_ENUM(a);
-#define EMe(a, b) TRACE_DEFINE_ENUM(a);
-
-UFS_LINK_STATES;
-UFS_PWR_MODES;
-UFSCHD_CLK_GATING_STATES;
-UFS_CMD_TRACE_STRINGS
-UFS_CMD_TRACE_TSF_TYPES
-
-/*
- * Now redefine the EM() and EMe() macros to map the enums to the strings
- * that will be printed in the output.
- */
-#undef EM
-#undef EMe
-#define EM(a, b) {a, b},
-#define EMe(a, b) {a, b}
-
-#define show_ufs_cmd_trace_str(str_t) \
- __print_symbolic(str_t, UFS_CMD_TRACE_STRINGS)
-#define show_ufs_cmd_trace_tsf(tsf) \
- __print_symbolic(tsf, UFS_CMD_TRACE_TSF_TYPES)
-
-TRACE_EVENT(ufshcd_clk_gating,
-
- TP_PROTO(const char *dev_name, int state),
-
- TP_ARGS(dev_name, state),
-
- TP_STRUCT__entry(
- __string(dev_name, dev_name)
- __field(int, state)
- ),
-
- TP_fast_assign(
- __assign_str(dev_name, dev_name);
- __entry->state = state;
- ),
-
- TP_printk("%s: gating state changed to %s",
- __get_str(dev_name),
- __print_symbolic(__entry->state, UFSCHD_CLK_GATING_STATES))
-);
-
-TRACE_EVENT(ufshcd_clk_scaling,
-
- TP_PROTO(const char *dev_name, const char *state, const char *clk,
- u32 prev_state, u32 curr_state),
-
- TP_ARGS(dev_name, state, clk, prev_state, curr_state),
-
- TP_STRUCT__entry(
- __string(dev_name, dev_name)
- __string(state, state)
- __string(clk, clk)
- __field(u32, prev_state)
- __field(u32, curr_state)
- ),
-
- TP_fast_assign(
- __assign_str(dev_name, dev_name);
- __assign_str(state, state);
- __assign_str(clk, clk);
- __entry->prev_state = prev_state;
- __entry->curr_state = curr_state;
- ),
-
- TP_printk("%s: %s %s from %u to %u Hz",
- __get_str(dev_name), __get_str(state), __get_str(clk),
- __entry->prev_state, __entry->curr_state)
-);
-
-TRACE_EVENT(ufshcd_auto_bkops_state,
-
- TP_PROTO(const char *dev_name, const char *state),
-
- TP_ARGS(dev_name, state),
-
- TP_STRUCT__entry(
- __string(dev_name, dev_name)
- __string(state, state)
- ),
-
- TP_fast_assign(
- __assign_str(dev_name, dev_name);
- __assign_str(state, state);
- ),
-
- TP_printk("%s: auto bkops - %s",
- __get_str(dev_name), __get_str(state))
-);
-
-DECLARE_EVENT_CLASS(ufshcd_profiling_template,
- TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us,
- int err),
-
- TP_ARGS(dev_name, profile_info, time_us, err),
-
- TP_STRUCT__entry(
- __string(dev_name, dev_name)
- __string(profile_info, profile_info)
- __field(s64, time_us)
- __field(int, err)
- ),
-
- TP_fast_assign(
- __assign_str(dev_name, dev_name);
- __assign_str(profile_info, profile_info);
- __entry->time_us = time_us;
- __entry->err = err;
- ),
-
- TP_printk("%s: %s: took %lld usecs, err %d",
- __get_str(dev_name), __get_str(profile_info),
- __entry->time_us, __entry->err)
-);
-
-DEFINE_EVENT(ufshcd_profiling_template, ufshcd_profile_hibern8,
- TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us,
- int err),
- TP_ARGS(dev_name, profile_info, time_us, err));
-
-DEFINE_EVENT(ufshcd_profiling_template, ufshcd_profile_clk_gating,
- TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us,
- int err),
- TP_ARGS(dev_name, profile_info, time_us, err));
-
-DEFINE_EVENT(ufshcd_profiling_template, ufshcd_profile_clk_scaling,
- TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us,
- int err),
- TP_ARGS(dev_name, profile_info, time_us, err));
-
-DECLARE_EVENT_CLASS(ufshcd_template,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
- int dev_state, int link_state),
-
- TP_ARGS(dev_name, err, usecs, dev_state, link_state),
-
- TP_STRUCT__entry(
- __field(s64, usecs)
- __field(int, err)
- __string(dev_name, dev_name)
- __field(int, dev_state)
- __field(int, link_state)
- ),
-
- TP_fast_assign(
- __entry->usecs = usecs;
- __entry->err = err;
- __assign_str(dev_name, dev_name);
- __entry->dev_state = dev_state;
- __entry->link_state = link_state;
- ),
-
- TP_printk(
- "%s: took %lld usecs, dev_state: %s, link_state: %s, err %d",
- __get_str(dev_name),
- __entry->usecs,
- __print_symbolic(__entry->dev_state, UFS_PWR_MODES),
- __print_symbolic(__entry->link_state, UFS_LINK_STATES),
- __entry->err
- )
-);
-
-DEFINE_EVENT(ufshcd_template, ufshcd_system_suspend,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
- int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
-
-DEFINE_EVENT(ufshcd_template, ufshcd_system_resume,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
- int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
-
-DEFINE_EVENT(ufshcd_template, ufshcd_runtime_suspend,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
- int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
-
-DEFINE_EVENT(ufshcd_template, ufshcd_runtime_resume,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
- int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
-
-DEFINE_EVENT(ufshcd_template, ufshcd_init,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
- int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
-
-DEFINE_EVENT(ufshcd_template, ufshcd_wl_suspend,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
- int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
-
-DEFINE_EVENT(ufshcd_template, ufshcd_wl_resume,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
- int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
-
-DEFINE_EVENT(ufshcd_template, ufshcd_wl_runtime_suspend,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
- int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
-
-DEFINE_EVENT(ufshcd_template, ufshcd_wl_runtime_resume,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
- int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
-
-TRACE_EVENT(ufshcd_command,
- TP_PROTO(const char *dev_name, enum ufs_trace_str_t str_t,
- unsigned int tag, u32 doorbell, int transfer_len, u32 intr,
- u64 lba, u8 opcode, u8 group_id),
-
- TP_ARGS(dev_name, str_t, tag, doorbell, transfer_len,
- intr, lba, opcode, group_id),
-
- TP_STRUCT__entry(
- __string(dev_name, dev_name)
- __field(enum ufs_trace_str_t, str_t)
- __field(unsigned int, tag)
- __field(u32, doorbell)
- __field(int, transfer_len)
- __field(u32, intr)
- __field(u64, lba)
- __field(u8, opcode)
- __field(u8, group_id)
- ),
-
- TP_fast_assign(
- __assign_str(dev_name, dev_name);
- __entry->str_t = str_t;
- __entry->tag = tag;
- __entry->doorbell = doorbell;
- __entry->transfer_len = transfer_len;
- __entry->intr = intr;
- __entry->lba = lba;
- __entry->opcode = opcode;
- __entry->group_id = group_id;
- ),
-
- TP_printk(
- "%s: %s: tag: %u, DB: 0x%x, size: %d, IS: %u, LBA: %llu, opcode: 0x%x (%s), group_id: 0x%x",
- show_ufs_cmd_trace_str(__entry->str_t), __get_str(dev_name),
- __entry->tag, __entry->doorbell, __entry->transfer_len,
- __entry->intr, __entry->lba, (u32)__entry->opcode,
- str_opcode(__entry->opcode), (u32)__entry->group_id
- )
-);
-
-TRACE_EVENT(ufshcd_uic_command,
- TP_PROTO(const char *dev_name, enum ufs_trace_str_t str_t, u32 cmd,
- u32 arg1, u32 arg2, u32 arg3),
-
- TP_ARGS(dev_name, str_t, cmd, arg1, arg2, arg3),
-
- TP_STRUCT__entry(
- __string(dev_name, dev_name)
- __field(enum ufs_trace_str_t, str_t)
- __field(u32, cmd)
- __field(u32, arg1)
- __field(u32, arg2)
- __field(u32, arg3)
- ),
-
- TP_fast_assign(
- __assign_str(dev_name, dev_name);
- __entry->str_t = str_t;
- __entry->cmd = cmd;
- __entry->arg1 = arg1;
- __entry->arg2 = arg2;
- __entry->arg3 = arg3;
- ),
-
- TP_printk(
- "%s: %s: cmd: 0x%x, arg1: 0x%x, arg2: 0x%x, arg3: 0x%x",
- show_ufs_cmd_trace_str(__entry->str_t), __get_str(dev_name),
- __entry->cmd, __entry->arg1, __entry->arg2, __entry->arg3
- )
-);
-
-TRACE_EVENT(ufshcd_upiu,
- TP_PROTO(const char *dev_name, enum ufs_trace_str_t str_t, void *hdr,
- void *tsf, enum ufs_trace_tsf_t tsf_t),
-
- TP_ARGS(dev_name, str_t, hdr, tsf, tsf_t),
-
- TP_STRUCT__entry(
- __string(dev_name, dev_name)
- __field(enum ufs_trace_str_t, str_t)
- __array(unsigned char, hdr, 12)
- __array(unsigned char, tsf, 16)
- __field(enum ufs_trace_tsf_t, tsf_t)
- ),
-
- TP_fast_assign(
- __assign_str(dev_name, dev_name);
- __entry->str_t = str_t;
- memcpy(__entry->hdr, hdr, sizeof(__entry->hdr));
- memcpy(__entry->tsf, tsf, sizeof(__entry->tsf));
- __entry->tsf_t = tsf_t;
- ),
-
- TP_printk(
- "%s: %s: HDR:%s, %s:%s",
- show_ufs_cmd_trace_str(__entry->str_t), __get_str(dev_name),
- __print_hex(__entry->hdr, sizeof(__entry->hdr)),
- show_ufs_cmd_trace_tsf(__entry->tsf_t),
- __print_hex(__entry->tsf, sizeof(__entry->tsf))
- )
-);
-
-TRACE_EVENT(ufshcd_exception_event,
-
- TP_PROTO(const char *dev_name, u16 status),
-
- TP_ARGS(dev_name, status),
-
- TP_STRUCT__entry(
- __string(dev_name, dev_name)
- __field(u16, status)
- ),
-
- TP_fast_assign(
- __assign_str(dev_name, dev_name);
- __entry->status = status;
- ),
-
- TP_printk("%s: status 0x%x",
- __get_str(dev_name), __entry->status
- )
-);
-
-#endif /* if !defined(_TRACE_UFS_H) || defined(TRACE_HEADER_MULTI_READ) */
-
-/* This part must be outside protection */
-#include <trace/define_trace.h>
diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
index d2123dd960d5..490958fa10de 100644
--- a/include/trace/events/vmscan.h
+++ b/include/trace/events/vmscan.h
@@ -285,10 +285,9 @@ TRACE_EVENT(mm_vmscan_lru_isolate,
unsigned long nr_scanned,
unsigned long nr_skipped,
unsigned long nr_taken,
- isolate_mode_t isolate_mode,
int lru),
- TP_ARGS(highest_zoneidx, order, nr_requested, nr_scanned, nr_skipped, nr_taken, isolate_mode, lru),
+ TP_ARGS(highest_zoneidx, order, nr_requested, nr_scanned, nr_skipped, nr_taken, lru),
TP_STRUCT__entry(
__field(int, highest_zoneidx)
@@ -297,7 +296,6 @@ TRACE_EVENT(mm_vmscan_lru_isolate,
__field(unsigned long, nr_scanned)
__field(unsigned long, nr_skipped)
__field(unsigned long, nr_taken)
- __field(unsigned int, isolate_mode)
__field(int, lru)
),
@@ -308,7 +306,6 @@ TRACE_EVENT(mm_vmscan_lru_isolate,
__entry->nr_scanned = nr_scanned;
__entry->nr_skipped = nr_skipped;
__entry->nr_taken = nr_taken;
- __entry->isolate_mode = (__force unsigned int)isolate_mode;
__entry->lru = lru;
),
@@ -316,8 +313,7 @@ TRACE_EVENT(mm_vmscan_lru_isolate,
* classzone is previous name of the highest_zoneidx.
* Reason not to change it is the ABI requirement of the tracepoint.
*/
- TP_printk("isolate_mode=%d classzone=%d order=%d nr_requested=%lu nr_scanned=%lu nr_skipped=%lu nr_taken=%lu lru=%s",
- __entry->isolate_mode,
+ TP_printk("classzone=%d order=%d nr_requested=%lu nr_scanned=%lu nr_skipped=%lu nr_taken=%lu lru=%s",
__entry->highest_zoneidx,
__entry->order,
__entry->nr_requested,
@@ -350,6 +346,51 @@ TRACE_EVENT(mm_vmscan_write_folio,
show_reclaim_flags(__entry->reclaim_flags))
);
+TRACE_EVENT(mm_vmscan_reclaim_pages,
+
+ TP_PROTO(int nid,
+ unsigned long nr_scanned, unsigned long nr_reclaimed,
+ struct reclaim_stat *stat),
+
+ TP_ARGS(nid, nr_scanned, nr_reclaimed, stat),
+
+ TP_STRUCT__entry(
+ __field(int, nid)
+ __field(unsigned long, nr_scanned)
+ __field(unsigned long, nr_reclaimed)
+ __field(unsigned long, nr_dirty)
+ __field(unsigned long, nr_writeback)
+ __field(unsigned long, nr_congested)
+ __field(unsigned long, nr_immediate)
+ __field(unsigned int, nr_activate0)
+ __field(unsigned int, nr_activate1)
+ __field(unsigned long, nr_ref_keep)
+ __field(unsigned long, nr_unmap_fail)
+ ),
+
+ TP_fast_assign(
+ __entry->nid = nid;
+ __entry->nr_scanned = nr_scanned;
+ __entry->nr_reclaimed = nr_reclaimed;
+ __entry->nr_dirty = stat->nr_dirty;
+ __entry->nr_writeback = stat->nr_writeback;
+ __entry->nr_congested = stat->nr_congested;
+ __entry->nr_immediate = stat->nr_immediate;
+ __entry->nr_activate0 = stat->nr_activate[0];
+ __entry->nr_activate1 = stat->nr_activate[1];
+ __entry->nr_ref_keep = stat->nr_ref_keep;
+ __entry->nr_unmap_fail = stat->nr_unmap_fail;
+ ),
+
+ TP_printk("nid=%d nr_scanned=%ld nr_reclaimed=%ld nr_dirty=%ld nr_writeback=%ld nr_congested=%ld nr_immediate=%ld nr_activate_anon=%d nr_activate_file=%d nr_ref_keep=%ld nr_unmap_fail=%ld",
+ __entry->nid,
+ __entry->nr_scanned, __entry->nr_reclaimed,
+ __entry->nr_dirty, __entry->nr_writeback,
+ __entry->nr_congested, __entry->nr_immediate,
+ __entry->nr_activate0, __entry->nr_activate1,
+ __entry->nr_ref_keep, __entry->nr_unmap_fail)
+);
+
TRACE_EVENT(mm_vmscan_lru_shrink_inactive,
TP_PROTO(int nid,
diff --git a/include/trace/events/vsock_virtio_transport_common.h b/include/trace/events/vsock_virtio_transport_common.h
index d0b3f0ea9ba1..f1ebe36787c3 100644
--- a/include/trace/events/vsock_virtio_transport_common.h
+++ b/include/trace/events/vsock_virtio_transport_common.h
@@ -43,7 +43,8 @@ TRACE_EVENT(virtio_transport_alloc_pkt,
__u32 len,
__u16 type,
__u16 op,
- __u32 flags
+ __u32 flags,
+ bool zcopy
),
TP_ARGS(
src_cid, src_port,
@@ -51,7 +52,8 @@ TRACE_EVENT(virtio_transport_alloc_pkt,
len,
type,
op,
- flags
+ flags,
+ zcopy
),
TP_STRUCT__entry(
__field(__u32, src_cid)
@@ -62,6 +64,7 @@ TRACE_EVENT(virtio_transport_alloc_pkt,
__field(__u16, type)
__field(__u16, op)
__field(__u32, flags)
+ __field(bool, zcopy)
),
TP_fast_assign(
__entry->src_cid = src_cid;
@@ -72,14 +75,15 @@ TRACE_EVENT(virtio_transport_alloc_pkt,
__entry->type = type;
__entry->op = op;
__entry->flags = flags;
+ __entry->zcopy = zcopy;
),
- TP_printk("%u:%u -> %u:%u len=%u type=%s op=%s flags=%#x",
+ TP_printk("%u:%u -> %u:%u len=%u type=%s op=%s flags=%#x zcopy=%s",
__entry->src_cid, __entry->src_port,
__entry->dst_cid, __entry->dst_port,
__entry->len,
show_type(__entry->type),
show_op(__entry->op),
- __entry->flags)
+ __entry->flags, __entry->zcopy ? "true" : "false")
);
TRACE_EVENT(virtio_transport_recv_pkt,
diff --git a/include/trace/events/wbt.h b/include/trace/events/wbt.h
index 9c66e59d859c..4661f0d27062 100644
--- a/include/trace/events/wbt.h
+++ b/include/trace/events/wbt.h
@@ -33,7 +33,7 @@ TRACE_EVENT(wbt_stat,
),
TP_fast_assign(
- strlcpy(__entry->name, bdi_dev_name(bdi),
+ strscpy(__entry->name, bdi_dev_name(bdi),
ARRAY_SIZE(__entry->name));
__entry->rmean = stat[0].mean;
__entry->rmin = stat[0].min;
@@ -68,7 +68,7 @@ TRACE_EVENT(wbt_lat,
),
TP_fast_assign(
- strlcpy(__entry->name, bdi_dev_name(bdi),
+ strscpy(__entry->name, bdi_dev_name(bdi),
ARRAY_SIZE(__entry->name));
__entry->lat = div_u64(lat, 1000);
),
@@ -105,7 +105,7 @@ TRACE_EVENT(wbt_step,
),
TP_fast_assign(
- strlcpy(__entry->name, bdi_dev_name(bdi),
+ strscpy(__entry->name, bdi_dev_name(bdi),
ARRAY_SIZE(__entry->name));
__entry->msg = msg;
__entry->step = step;
@@ -141,7 +141,7 @@ TRACE_EVENT(wbt_timer,
),
TP_fast_assign(
- strlcpy(__entry->name, bdi_dev_name(bdi),
+ strscpy(__entry->name, bdi_dev_name(bdi),
ARRAY_SIZE(__entry->name));
__entry->status = status;
__entry->step = step;
diff --git a/include/trace/events/workqueue.h b/include/trace/events/workqueue.h
index 262d52021c23..b0de2bc9ed52 100644
--- a/include/trace/events/workqueue.h
+++ b/include/trace/events/workqueue.h
@@ -38,7 +38,7 @@ TRACE_EVENT(workqueue_queue_work,
TP_fast_assign(
__entry->work = work;
__entry->function = work->func;
- __assign_str(workqueue, pwq->wq->name);
+ __assign_str(workqueue);
__entry->req_cpu = req_cpu;
__entry->cpu = pwq->pool->cpu;
),
@@ -64,13 +64,15 @@ TRACE_EVENT(workqueue_activate_work,
TP_STRUCT__entry(
__field( void *, work )
+ __field( void *, function)
),
TP_fast_assign(
__entry->work = work;
+ __entry->function = work->func;
),
- TP_printk("work struct %p", __entry->work)
+ TP_printk("work struct %p function=%ps ", __entry->work, __entry->function)
);
/**
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index 86b2a82da546..311a341e6fe4 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -20,7 +20,15 @@
{I_CLEAR, "I_CLEAR"}, \
{I_SYNC, "I_SYNC"}, \
{I_DIRTY_TIME, "I_DIRTY_TIME"}, \
- {I_REFERENCED, "I_REFERENCED"} \
+ {I_REFERENCED, "I_REFERENCED"}, \
+ {I_LINKABLE, "I_LINKABLE"}, \
+ {I_WB_SWITCH, "I_WB_SWITCH"}, \
+ {I_OVL_INUSE, "I_OVL_INUSE"}, \
+ {I_CREATING, "I_CREATING"}, \
+ {I_DONTCACHE, "I_DONTCACHE"}, \
+ {I_SYNC_QUEUED, "I_SYNC_QUEUED"}, \
+ {I_PINNING_NETFS_WB, "I_PINNING_NETFS_WB"}, \
+ {I_LRU_ISOLATING, "I_LRU_ISOLATING"} \
)
/* enums need to be exported to user space */
@@ -68,7 +76,7 @@ DECLARE_EVENT_CLASS(writeback_folio_template,
strscpy_pad(__entry->name,
bdi_dev_name(mapping ? inode_to_bdi(mapping->host) :
NULL), 32);
- __entry->ino = mapping ? mapping->host->i_ino : 0;
+ __entry->ino = (mapping && mapping->host) ? mapping->host->i_ino : 0;
__entry->index = folio->index;
),
@@ -112,7 +120,7 @@ DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
/* may be called for files on pseudo FSes w/ unregistered bdi */
strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
__entry->ino = inode->i_ino;
- __entry->state = inode->i_state;
+ __entry->state = inode_state_read_once(inode);
__entry->flags = flags;
),
@@ -205,6 +213,35 @@ TRACE_EVENT(inode_foreign_history,
)
);
+TRACE_EVENT(inode_switch_wbs_queue,
+
+ TP_PROTO(struct bdi_writeback *old_wb, struct bdi_writeback *new_wb,
+ unsigned int count),
+
+ TP_ARGS(old_wb, new_wb, count),
+
+ TP_STRUCT__entry(
+ __array(char, name, 32)
+ __field(ino_t, old_cgroup_ino)
+ __field(ino_t, new_cgroup_ino)
+ __field(unsigned int, count)
+ ),
+
+ TP_fast_assign(
+ strscpy_pad(__entry->name, bdi_dev_name(old_wb->bdi), 32);
+ __entry->old_cgroup_ino = __trace_wb_assign_cgroup(old_wb);
+ __entry->new_cgroup_ino = __trace_wb_assign_cgroup(new_wb);
+ __entry->count = count;
+ ),
+
+ TP_printk("bdi %s: old_cgroup_ino=%lu new_cgroup_ino=%lu count=%u",
+ __entry->name,
+ (unsigned long)__entry->old_cgroup_ino,
+ (unsigned long)__entry->new_cgroup_ino,
+ __entry->count
+ )
+);
+
TRACE_EVENT(inode_switch_wbs,
TP_PROTO(struct inode *inode, struct bdi_writeback *old_wb,
@@ -451,7 +488,6 @@ DECLARE_EVENT_CLASS(wbc_class,
__field(int, sync_mode)
__field(int, for_kupdate)
__field(int, for_background)
- __field(int, for_reclaim)
__field(int, range_cyclic)
__field(long, range_start)
__field(long, range_end)
@@ -465,23 +501,20 @@ DECLARE_EVENT_CLASS(wbc_class,
__entry->sync_mode = wbc->sync_mode;
__entry->for_kupdate = wbc->for_kupdate;
__entry->for_background = wbc->for_background;
- __entry->for_reclaim = wbc->for_reclaim;
__entry->range_cyclic = wbc->range_cyclic;
__entry->range_start = (long)wbc->range_start;
__entry->range_end = (long)wbc->range_end;
__entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
),
- TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d "
- "bgrd=%d reclm=%d cyclic=%d "
- "start=0x%lx end=0x%lx cgroup_ino=%lu",
+ TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d bgrd=%d "
+ "cyclic=%d start=0x%lx end=0x%lx cgroup_ino=%lu",
__entry->name,
__entry->nr_to_write,
__entry->pages_skipped,
__entry->sync_mode,
__entry->for_kupdate,
__entry->for_background,
- __entry->for_reclaim,
__entry->range_cyclic,
__entry->range_start,
__entry->range_end,
@@ -621,11 +654,7 @@ TRACE_EVENT(bdi_dirty_ratelimit,
TRACE_EVENT(balance_dirty_pages,
TP_PROTO(struct bdi_writeback *wb,
- unsigned long thresh,
- unsigned long bg_thresh,
- unsigned long dirty,
- unsigned long bdi_thresh,
- unsigned long bdi_dirty,
+ struct dirty_throttle_control *dtc,
unsigned long dirty_ratelimit,
unsigned long task_ratelimit,
unsigned long dirtied,
@@ -633,7 +662,7 @@ TRACE_EVENT(balance_dirty_pages,
long pause,
unsigned long start_time),
- TP_ARGS(wb, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty,
+ TP_ARGS(wb, dtc,
dirty_ratelimit, task_ratelimit,
dirtied, period, pause, start_time),
@@ -642,8 +671,8 @@ TRACE_EVENT(balance_dirty_pages,
__field(unsigned long, limit)
__field(unsigned long, setpoint)
__field(unsigned long, dirty)
- __field(unsigned long, bdi_setpoint)
- __field(unsigned long, bdi_dirty)
+ __field(unsigned long, wb_setpoint)
+ __field(unsigned long, wb_dirty)
__field(unsigned long, dirty_ratelimit)
__field(unsigned long, task_ratelimit)
__field(unsigned int, dirtied)
@@ -656,16 +685,15 @@ TRACE_EVENT(balance_dirty_pages,
),
TP_fast_assign(
- unsigned long freerun = (thresh + bg_thresh) / 2;
+ unsigned long freerun = (dtc->thresh + dtc->bg_thresh) / 2;
strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32);
- __entry->limit = global_wb_domain.dirty_limit;
- __entry->setpoint = (global_wb_domain.dirty_limit +
- freerun) / 2;
- __entry->dirty = dirty;
- __entry->bdi_setpoint = __entry->setpoint *
- bdi_thresh / (thresh + 1);
- __entry->bdi_dirty = bdi_dirty;
+ __entry->limit = dtc->limit;
+ __entry->setpoint = (dtc->limit + freerun) / 2;
+ __entry->dirty = dtc->dirty;
+ __entry->wb_setpoint = __entry->setpoint *
+ dtc->wb_thresh / (dtc->thresh + 1);
+ __entry->wb_dirty = dtc->wb_dirty;
__entry->dirty_ratelimit = KBps(dirty_ratelimit);
__entry->task_ratelimit = KBps(task_ratelimit);
__entry->dirtied = dirtied;
@@ -681,7 +709,7 @@ TRACE_EVENT(balance_dirty_pages,
TP_printk("bdi %s: "
"limit=%lu setpoint=%lu dirty=%lu "
- "bdi_setpoint=%lu bdi_dirty=%lu "
+ "wb_setpoint=%lu wb_dirty=%lu "
"dirty_ratelimit=%lu task_ratelimit=%lu "
"dirtied=%u dirtied_pause=%u "
"paused=%lu pause=%ld period=%lu think=%ld cgroup_ino=%lu",
@@ -689,8 +717,8 @@ TRACE_EVENT(balance_dirty_pages,
__entry->limit,
__entry->setpoint,
__entry->dirty,
- __entry->bdi_setpoint,
- __entry->bdi_dirty,
+ __entry->wb_setpoint,
+ __entry->wb_dirty,
__entry->dirty_ratelimit,
__entry->task_ratelimit,
__entry->dirtied,
@@ -720,7 +748,7 @@ TRACE_EVENT(writeback_sb_inodes_requeue,
strscpy_pad(__entry->name,
bdi_dev_name(inode_to_bdi(inode)), 32);
__entry->ino = inode->i_ino;
- __entry->state = inode->i_state;
+ __entry->state = inode_state_read_once(inode);
__entry->dirtied_when = inode->dirtied_when;
__entry->cgroup_ino = __trace_wb_assign_cgroup(inode_to_wb(inode));
),
@@ -759,7 +787,7 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template,
strscpy_pad(__entry->name,
bdi_dev_name(inode_to_bdi(inode)), 32);
__entry->ino = inode->i_ino;
- __entry->state = inode->i_state;
+ __entry->state = inode_state_read_once(inode);
__entry->dirtied_when = inode->dirtied_when;
__entry->writeback_index = inode->i_mapping->writeback_index;
__entry->nr_to_write = nr_to_write;
@@ -811,7 +839,7 @@ DECLARE_EVENT_CLASS(writeback_inode_template,
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
- __entry->state = inode->i_state;
+ __entry->state = inode_state_read_once(inode);
__entry->mode = inode->i_mode;
__entry->dirtied_when = inode->dirtied_when;
),
diff --git a/include/trace/events/xdp.h b/include/trace/events/xdp.h
index c40fc97f9417..18c0ac514fcb 100644
--- a/include/trace/events/xdp.h
+++ b/include/trace/events/xdp.h
@@ -9,6 +9,7 @@
#include <linux/filter.h>
#include <linux/tracepoint.h>
#include <linux/bpf.h>
+#include <net/xdp.h>
#define __XDP_ACT_MAP(FN) \
FN(ABORTED) \
@@ -167,25 +168,7 @@ DEFINE_EVENT(xdp_redirect_template, xdp_redirect_err,
#define _trace_xdp_redirect_map_err(dev, xdp, to, map_type, map_id, index, err) \
trace_xdp_redirect_err(dev, xdp, to, err, map_type, map_id, index)
-/* not used anymore, but kept around so as not to break old programs */
-DEFINE_EVENT(xdp_redirect_template, xdp_redirect_map,
- TP_PROTO(const struct net_device *dev,
- const struct bpf_prog *xdp,
- const void *tgt, int err,
- enum bpf_map_type map_type,
- u32 map_id, u32 index),
- TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index)
-);
-
-DEFINE_EVENT(xdp_redirect_template, xdp_redirect_map_err,
- TP_PROTO(const struct net_device *dev,
- const struct bpf_prog *xdp,
- const void *tgt, int err,
- enum bpf_map_type map_type,
- u32 map_id, u32 index),
- TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index)
-);
-
+#ifdef CONFIG_BPF_SYSCALL
TRACE_EVENT(xdp_cpumap_kthread,
TP_PROTO(int map_id, unsigned int processed, unsigned int drops,
@@ -299,6 +282,7 @@ TRACE_EVENT(xdp_devmap_xmit,
__entry->sent, __entry->drops,
__entry->err)
);
+#endif /* CONFIG_BPF_SYSCALL */
/* Expect users already include <net/xdp.h>, but not xdp_priv.h */
#include <net/xdp_priv.h>
@@ -378,30 +362,21 @@ TRACE_EVENT(mem_connect,
)
);
-TRACE_EVENT(mem_return_failed,
+TRACE_EVENT(bpf_xdp_link_attach_failed,
- TP_PROTO(const struct xdp_mem_info *mem,
- const struct page *page),
+ TP_PROTO(const char *msg),
- TP_ARGS(mem, page),
+ TP_ARGS(msg),
TP_STRUCT__entry(
- __field(const struct page *, page)
- __field(u32, mem_id)
- __field(u32, mem_type)
+ __string(msg, msg)
),
TP_fast_assign(
- __entry->page = page;
- __entry->mem_id = mem->id;
- __entry->mem_type = mem->type;
+ __assign_str(msg);
),
- TP_printk("mem_id=%d mem_type=%s page=%p",
- __entry->mem_id,
- __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB),
- __entry->page
- )
+ TP_printk("errmsg=%s", __get_str(msg))
);
#endif /* _TRACE_XDP_H */
diff --git a/include/trace/events/xen.h b/include/trace/events/xen.h
index 44a3f565264d..0577f0cdd231 100644
--- a/include/trace/events/xen.h
+++ b/include/trace/events/xen.h
@@ -6,26 +6,26 @@
#define _TRACE_XEN_H
#include <linux/tracepoint.h>
-#include <asm/paravirt_types.h>
+#include <asm/xen/hypervisor.h>
#include <asm/xen/trace_types.h>
struct multicall_entry;
/* Multicalls */
DECLARE_EVENT_CLASS(xen_mc__batch,
- TP_PROTO(enum paravirt_lazy_mode mode),
+ TP_PROTO(enum xen_lazy_mode mode),
TP_ARGS(mode),
TP_STRUCT__entry(
- __field(enum paravirt_lazy_mode, mode)
+ __field(enum xen_lazy_mode, mode)
),
TP_fast_assign(__entry->mode = mode),
TP_printk("start batch LAZY_%s",
- (__entry->mode == PARAVIRT_LAZY_MMU) ? "MMU" :
- (__entry->mode == PARAVIRT_LAZY_CPU) ? "CPU" : "NONE")
+ (__entry->mode == XEN_LAZY_MMU) ? "MMU" :
+ (__entry->mode == XEN_LAZY_CPU) ? "CPU" : "NONE")
);
#define DEFINE_XEN_MC_BATCH(name) \
DEFINE_EVENT(xen_mc__batch, name, \
- TP_PROTO(enum paravirt_lazy_mode mode), \
+ TP_PROTO(enum xen_lazy_mode mode), \
TP_ARGS(mode))
DEFINE_XEN_MC_BATCH(xen_mc_batch);
diff --git a/include/trace/misc/fs.h b/include/trace/misc/fs.h
index 738b97f22f36..7ead1c61f0cb 100644
--- a/include/trace/misc/fs.h
+++ b/include/trace/misc/fs.h
@@ -120,3 +120,46 @@
{ LOOKUP_BENEATH, "BENEATH" }, \
{ LOOKUP_IN_ROOT, "IN_ROOT" }, \
{ LOOKUP_CACHED, "CACHED" })
+
+#define show_ia_valid_flags(flags) \
+ __print_flags(flags, "|", \
+ { ATTR_MODE, "MODE" }, \
+ { ATTR_UID, "UID" }, \
+ { ATTR_GID, "GID" }, \
+ { ATTR_SIZE, "SIZE" }, \
+ { ATTR_ATIME, "ATIME" }, \
+ { ATTR_MTIME, "MTIME" }, \
+ { ATTR_CTIME, "CTIME" }, \
+ { ATTR_ATIME_SET, "ATIME_SET" }, \
+ { ATTR_MTIME_SET, "MTIME_SET" }, \
+ { ATTR_FORCE, "FORCE" }, \
+ { ATTR_KILL_SUID, "KILL_SUID" }, \
+ { ATTR_KILL_SGID, "KILL_SGID" }, \
+ { ATTR_FILE, "FILE" }, \
+ { ATTR_KILL_PRIV, "KILL_PRIV" }, \
+ { ATTR_OPEN, "OPEN" }, \
+ { ATTR_TIMES_SET, "TIMES_SET" }, \
+ { ATTR_TOUCH, "TOUCH"}, \
+ { ATTR_DELEG, "DELEG"})
+
+#define show_statx_mask(flags) \
+ __print_flags(flags, "|", \
+ { STATX_TYPE, "TYPE" }, \
+ { STATX_MODE, "MODE" }, \
+ { STATX_NLINK, "NLINK" }, \
+ { STATX_UID, "UID" }, \
+ { STATX_GID, "GID" }, \
+ { STATX_ATIME, "ATIME" }, \
+ { STATX_MTIME, "MTIME" }, \
+ { STATX_CTIME, "CTIME" }, \
+ { STATX_INO, "INO" }, \
+ { STATX_SIZE, "SIZE" }, \
+ { STATX_BLOCKS, "BLOCKS" }, \
+ { STATX_BASIC_STATS, "BASIC_STATS" }, \
+ { STATX_BTIME, "BTIME" }, \
+ { STATX_MNT_ID, "MNT_ID" }, \
+ { STATX_DIOALIGN, "DIOALIGN" }, \
+ { STATX_MNT_ID_UNIQUE, "MNT_ID_UNIQUE" }, \
+ { STATX_SUBVOL, "SUBVOL" }, \
+ { STATX_WRITE_ATOMIC, "WRITE_ATOMIC" }, \
+ { STATX_DIO_READ_ALIGN, "DIO_READ_ALIGN" })
diff --git a/include/trace/misc/nfs.h b/include/trace/misc/nfs.h
index 0d9d48dca38a..c82233e950ac 100644
--- a/include/trace/misc/nfs.h
+++ b/include/trace/misc/nfs.h
@@ -28,7 +28,6 @@ TRACE_DEFINE_ENUM(NFSERR_FBIG);
TRACE_DEFINE_ENUM(NFSERR_NOSPC);
TRACE_DEFINE_ENUM(NFSERR_ROFS);
TRACE_DEFINE_ENUM(NFSERR_MLINK);
-TRACE_DEFINE_ENUM(NFSERR_OPNOTSUPP);
TRACE_DEFINE_ENUM(NFSERR_NAMETOOLONG);
TRACE_DEFINE_ENUM(NFSERR_NOTEMPTY);
TRACE_DEFINE_ENUM(NFSERR_DQUOT);
@@ -52,6 +51,7 @@ TRACE_DEFINE_ENUM(NFSERR_JUKEBOX);
{ NFSERR_IO, "IO" }, \
{ NFSERR_NXIO, "NXIO" }, \
{ ECHILD, "CHILD" }, \
+ { ETIMEDOUT, "TIMEDOUT" }, \
{ NFSERR_EAGAIN, "AGAIN" }, \
{ NFSERR_ACCES, "ACCES" }, \
{ NFSERR_EXIST, "EXIST" }, \
@@ -64,7 +64,6 @@ TRACE_DEFINE_ENUM(NFSERR_JUKEBOX);
{ NFSERR_NOSPC, "NOSPC" }, \
{ NFSERR_ROFS, "ROFS" }, \
{ NFSERR_MLINK, "MLINK" }, \
- { NFSERR_OPNOTSUPP, "OPNOTSUPP" }, \
{ NFSERR_NAMETOOLONG, "NAMETOOLONG" }, \
{ NFSERR_NOTEMPTY, "NOTEMPTY" }, \
{ NFSERR_DQUOT, "DQUOT" }, \
@@ -239,6 +238,7 @@ TRACE_DEFINE_ENUM(NFS4ERR_RESET_TO_PNFS);
{ EHOSTDOWN, "EHOSTDOWN" }, \
{ EPIPE, "EPIPE" }, \
{ EPFNOSUPPORT, "EPFNOSUPPORT" }, \
+ { EINVAL, "EINVAL" }, \
{ EPROTONOSUPPORT, "EPROTONOSUPPORT" }, \
{ NFS4ERR_ACCESS, "ACCESS" }, \
{ NFS4ERR_ATTRNOTSUPP, "ATTRNOTSUPP" }, \
@@ -385,3 +385,37 @@ TRACE_DEFINE_ENUM(IOMODE_ANY);
{ SEQ4_STATUS_RESTART_RECLAIM_NEEDED, "RESTART_RECLAIM_NEEDED" }, \
{ SEQ4_STATUS_CB_PATH_DOWN_SESSION, "CB_PATH_DOWN_SESSION" }, \
{ SEQ4_STATUS_BACKCHANNEL_FAULT, "BACKCHANNEL_FAULT" })
+
+TRACE_DEFINE_ENUM(OP_CB_GETATTR);
+TRACE_DEFINE_ENUM(OP_CB_RECALL);
+TRACE_DEFINE_ENUM(OP_CB_LAYOUTRECALL);
+TRACE_DEFINE_ENUM(OP_CB_NOTIFY);
+TRACE_DEFINE_ENUM(OP_CB_PUSH_DELEG);
+TRACE_DEFINE_ENUM(OP_CB_RECALL_ANY);
+TRACE_DEFINE_ENUM(OP_CB_RECALLABLE_OBJ_AVAIL);
+TRACE_DEFINE_ENUM(OP_CB_RECALL_SLOT);
+TRACE_DEFINE_ENUM(OP_CB_SEQUENCE);
+TRACE_DEFINE_ENUM(OP_CB_WANTS_CANCELLED);
+TRACE_DEFINE_ENUM(OP_CB_NOTIFY_LOCK);
+TRACE_DEFINE_ENUM(OP_CB_NOTIFY_DEVICEID);
+TRACE_DEFINE_ENUM(OP_CB_OFFLOAD);
+TRACE_DEFINE_ENUM(OP_CB_ILLEGAL);
+
+#define show_nfs4_cb_op(x) \
+ __print_symbolic(x, \
+ { 0, "CB_NULL" }, \
+ { 1, "CB_COMPOUND" }, \
+ { OP_CB_GETATTR, "CB_GETATTR" }, \
+ { OP_CB_RECALL, "CB_RECALL" }, \
+ { OP_CB_LAYOUTRECALL, "CB_LAYOUTRECALL" }, \
+ { OP_CB_NOTIFY, "CB_NOTIFY" }, \
+ { OP_CB_PUSH_DELEG, "CB_PUSH_DELEG" }, \
+ { OP_CB_RECALL_ANY, "CB_RECALL_ANY" }, \
+ { OP_CB_RECALLABLE_OBJ_AVAIL, "CB_RECALLABLE_OBJ_AVAIL" }, \
+ { OP_CB_RECALL_SLOT, "CB_RECALL_SLOT" }, \
+ { OP_CB_SEQUENCE, "CB_SEQUENCE" }, \
+ { OP_CB_WANTS_CANCELLED, "CB_WANTS_CANCELLED" }, \
+ { OP_CB_NOTIFY_LOCK, "CB_NOTIFY_LOCK" }, \
+ { OP_CB_NOTIFY_DEVICEID, "CB_NOTIFY_DEVICEID" }, \
+ { OP_CB_OFFLOAD, "CB_OFFLOAD" }, \
+ { OP_CB_ILLEGAL, "CB_ILLEGAL" })
diff --git a/include/trace/perf.h b/include/trace/perf.h
index 8f3bf1e17707..a1754b73a8f5 100644
--- a/include/trace/perf.h
+++ b/include/trace/perf.h
@@ -4,51 +4,7 @@
#ifdef CONFIG_PERF_EVENTS
-#undef __entry
-#define __entry entry
-
-#undef __get_dynamic_array
-#define __get_dynamic_array(field) \
- ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
-
-#undef __get_dynamic_array_len
-#define __get_dynamic_array_len(field) \
- ((__entry->__data_loc_##field >> 16) & 0xffff)
-
-#undef __get_str
-#define __get_str(field) ((char *)__get_dynamic_array(field))
-
-#undef __get_bitmask
-#define __get_bitmask(field) (char *)__get_dynamic_array(field)
-
-#undef __get_cpumask
-#define __get_cpumask(field) (char *)__get_dynamic_array(field)
-
-#undef __get_sockaddr
-#define __get_sockaddr(field) ((struct sockaddr *)__get_dynamic_array(field))
-
-#undef __get_rel_dynamic_array
-#define __get_rel_dynamic_array(field) \
- ((void *)__entry + \
- offsetof(typeof(*__entry), __rel_loc_##field) + \
- sizeof(__entry->__rel_loc_##field) + \
- (__entry->__rel_loc_##field & 0xffff))
-
-#undef __get_rel_dynamic_array_len
-#define __get_rel_dynamic_array_len(field) \
- ((__entry->__rel_loc_##field >> 16) & 0xffff)
-
-#undef __get_rel_str
-#define __get_rel_str(field) ((char *)__get_rel_dynamic_array(field))
-
-#undef __get_rel_bitmask
-#define __get_rel_bitmask(field) (char *)__get_rel_dynamic_array(field)
-
-#undef __get_rel_cpumask
-#define __get_rel_cpumask(field) (char *)__get_rel_dynamic_array(field)
-
-#undef __get_rel_sockaddr
-#define __get_rel_sockaddr(field) ((struct sockaddr *)__get_rel_dynamic_array(field))
+#include "stages/stage6_event_callback.h"
#undef __perf_count
#define __perf_count(c) (__count = (c))
@@ -56,10 +12,10 @@
#undef __perf_task
#define __perf_task(t) (__task = (t))
-#undef DECLARE_EVENT_CLASS
-#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
+#undef __DECLARE_EVENT_CLASS
+#define __DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
static notrace void \
-perf_trace_##call(void *__data, proto) \
+do_perf_trace_##call(void *__data, proto) \
{ \
struct trace_event_call *event_call = __data; \
struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\
@@ -100,6 +56,41 @@ perf_trace_##call(void *__data, proto) \
}
/*
+ * Define unused __count and __task variables to use @args to pass
+ * arguments to do_perf_trace_##call. This is needed because the
+ * macros __perf_count and __perf_task introduce the side-effect to
+ * store copies into those local variables.
+ */
+#undef DECLARE_EVENT_CLASS
+#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
+__DECLARE_EVENT_CLASS(call, PARAMS(proto), PARAMS(args), PARAMS(tstruct), \
+ PARAMS(assign), PARAMS(print)) \
+static notrace void \
+perf_trace_##call(void *__data, proto) \
+{ \
+ u64 __count __attribute__((unused)); \
+ struct task_struct *__task __attribute__((unused)); \
+ \
+ do_perf_trace_##call(__data, args); \
+}
+
+#undef DECLARE_EVENT_SYSCALL_CLASS
+#define DECLARE_EVENT_SYSCALL_CLASS(call, proto, args, tstruct, assign, print) \
+__DECLARE_EVENT_CLASS(call, PARAMS(proto), PARAMS(args), PARAMS(tstruct), \
+ PARAMS(assign), PARAMS(print)) \
+static notrace void \
+perf_trace_##call(void *__data, proto) \
+{ \
+ u64 __count __attribute__((unused)); \
+ struct task_struct *__task __attribute__((unused)); \
+ \
+ might_fault(); \
+ preempt_disable_notrace(); \
+ do_perf_trace_##call(__data, args); \
+ preempt_enable_notrace(); \
+}
+
+/*
* This part is compiled out, it is only here as a build time check
* to make sure that if the tracepoint handling changes, the
* perf probe will fail to compile unless it too is updated.
@@ -117,4 +108,7 @@ static inline void perf_test_probe_##call(void) \
DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+
+#undef __DECLARE_EVENT_CLASS
+
#endif /* CONFIG_PERF_EVENTS */
diff --git a/include/trace/stages/stage2_data_offsets.h b/include/trace/stages/stage2_data_offsets.h
index 469b6a64293d..8b0cff06d346 100644
--- a/include/trace/stages/stage2_data_offsets.h
+++ b/include/trace/stages/stage2_data_offsets.h
@@ -24,7 +24,7 @@
#define __array(type, item, len)
#undef __dynamic_array
-#define __dynamic_array(type, item, len) u32 item;
+#define __dynamic_array(type, item, len) u32 item; const void *item##_ptr_;
#undef __string
#define __string(item, src) __dynamic_array(char, item, -1)
@@ -45,7 +45,7 @@
#define __sockaddr(field, len) __dynamic_array(u8, field, len)
#undef __rel_dynamic_array
-#define __rel_dynamic_array(type, item, len) u32 item;
+#define __rel_dynamic_array(type, item, len) u32 item; const void *item##_ptr_;
#undef __rel_string
#define __rel_string(item, src) __rel_dynamic_array(char, item, -1)
diff --git a/include/trace/stages/stage3_trace_output.h b/include/trace/stages/stage3_trace_output.h
index 66374df61ed3..1e7b0bef95f5 100644
--- a/include/trace/stages/stage3_trace_output.h
+++ b/include/trace/stages/stage3_trace_output.h
@@ -119,6 +119,14 @@
trace_print_array_seq(p, array, count, el_size); \
})
+#undef __print_dynamic_array
+#define __print_dynamic_array(array, el_size) \
+ ({ \
+ __print_array(__get_dynamic_array(array), \
+ __get_dynamic_array_len(array) / (el_size), \
+ (el_size)); \
+ })
+
#undef __print_hex_dump
#define __print_hex_dump(prefix_str, prefix_type, \
rowsize, groupsize, buf, len, ascii) \
@@ -139,3 +147,6 @@
u64 ____val = (u64)(value); \
(u32) do_div(____val, NSEC_PER_SEC); \
})
+
+#undef __get_buf
+#define __get_buf(len) trace_seq_acquire(p, (len))
diff --git a/include/trace/stages/stage4_event_fields.h b/include/trace/stages/stage4_event_fields.h
index affd541fd25e..b6f679ae21aa 100644
--- a/include/trace/stages/stage4_event_fields.h
+++ b/include/trace/stages/stage4_event_fields.h
@@ -26,7 +26,8 @@
#define __array(_type, _item, _len) { \
.type = #_type"["__stringify(_len)"]", .name = #_item, \
.size = sizeof(_type[_len]), .align = ALIGN_STRUCTFIELD(_type), \
- .is_signed = is_signed_type(_type), .filter_type = FILTER_OTHER },
+ .is_signed = is_signed_type(_type), .filter_type = FILTER_OTHER,\
+ .len = _len },
#undef __dynamic_array
#define __dynamic_array(_type, _item, _len) { \
diff --git a/include/trace/stages/stage5_get_offsets.h b/include/trace/stages/stage5_get_offsets.h
index ac5c24d3beeb..c6a62dfb18ef 100644
--- a/include/trace/stages/stage5_get_offsets.h
+++ b/include/trace/stages/stage5_get_offsets.h
@@ -9,17 +9,40 @@
#undef __entry
#define __entry entry
+#ifndef __STAGE5_STRING_SRC_H
+#define __STAGE5_STRING_SRC_H
+static inline const char *__string_src(const char *str)
+{
+ if (!str)
+ return EVENT_NULL_STR;
+ return str;
+}
+#endif /* __STAGE5_STRING_SRC_H */
+
+/*
+ * Fields should never declare an array: i.e. __field(int, arr[5])
+ * If they do, it will cause issues in parsing and possibly corrupt the
+ * events. To prevent that from happening, test the sizeof() a fictitious
+ * type called "struct _test_no_array_##item" which will fail if "item"
+ * contains array elements (like "arr[5]").
+ *
+ * If you hit this, use __array(int, arr, 5) instead.
+ */
#undef __field
-#define __field(type, item)
+#define __field(type, item) \
+ { (void)sizeof(struct _test_no_array_##item *); }
#undef __field_ext
-#define __field_ext(type, item, filter_type)
+#define __field_ext(type, item, filter_type) \
+ { (void)sizeof(struct _test_no_array_##item *); }
#undef __field_struct
-#define __field_struct(type, item)
+#define __field_struct(type, item) \
+ { (void)sizeof(struct _test_no_array_##item *); }
#undef __field_struct_ext
-#define __field_struct_ext(type, item, filter_type)
+#define __field_struct_ext(type, item, filter_type) \
+ { (void)sizeof(struct _test_no_array_##item *); }
#undef __array
#define __array(type, item, len)
@@ -34,10 +57,12 @@
#undef __string
#define __string(item, src) __dynamic_array(char, item, \
- strlen((src) ? (const char *)(src) : "(null)") + 1)
+ strlen(__string_src(src)) + 1) \
+ __data_offsets->item##_ptr_ = src;
#undef __string_len
-#define __string_len(item, src, len) __dynamic_array(char, item, (len) + 1)
+#define __string_len(item, src, len) __dynamic_array(char, item, (len) + 1)\
+ __data_offsets->item##_ptr_ = src;
#undef __vstring
#define __vstring(item, fmt, ap) __dynamic_array(char, item, \
@@ -54,11 +79,14 @@
__data_size += __item_length;
#undef __rel_string
-#define __rel_string(item, src) __rel_dynamic_array(char, item, \
- strlen((src) ? (const char *)(src) : "(null)") + 1)
+#define __rel_string(item, src) __rel_dynamic_array(char, item, \
+ strlen(__string_src(src)) + 1) \
+ __data_offsets->item##_ptr_ = src;
#undef __rel_string_len
-#define __rel_string_len(item, src, len) __rel_dynamic_array(char, item, (len) + 1)
+#define __rel_string_len(item, src, len) __rel_dynamic_array(char, item, (len) + 1)\
+ __data_offsets->item##_ptr_ = src;
+
/*
* __bitmask_size_in_bytes_raw is the number of bytes needed to hold
* num_possible_cpus().
diff --git a/include/trace/stages/stage6_event_callback.h b/include/trace/stages/stage6_event_callback.h
index 49c32394b53f..1691676fd858 100644
--- a/include/trace/stages/stage6_event_callback.h
+++ b/include/trace/stages/stage6_event_callback.h
@@ -2,6 +2,9 @@
/* Stage 6 definitions for creating trace events */
+/* Reuse some of the stage 3 macros */
+#include "stage3_trace_output.h"
+
#undef __entry
#define __entry entry
@@ -28,15 +31,14 @@
#define __vstring(item, fmt, ap) __dynamic_array(char, item, -1)
#undef __assign_str
-#define __assign_str(dst, src) \
- strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)");
-
-#undef __assign_str_len
-#define __assign_str_len(dst, src, len) \
+#define __assign_str(dst) \
do { \
- memcpy(__get_str(dst), (src), (len)); \
- __get_str(dst)[len] = '\0'; \
- } while(0)
+ char *__str__ = __get_str(dst); \
+ int __len__ = __get_dynamic_array_len(dst) - 1; \
+ memcpy(__str__, __data_offsets.dst##_ptr_ ? : \
+ EVENT_NULL_STR, __len__); \
+ __str__[__len__] = '\0'; \
+ } while (0)
#undef __assign_vstr
#define __assign_vstr(dst, fmt, va) \
@@ -88,14 +90,13 @@
#define __rel_string_len(item, src, len) __rel_dynamic_array(char, item, -1)
#undef __assign_rel_str
-#define __assign_rel_str(dst, src) \
- strcpy(__get_rel_str(dst), (src) ? (const char *)(src) : "(null)");
-
-#undef __assign_rel_str_len
-#define __assign_rel_str_len(dst, src, len) \
+#define __assign_rel_str(dst) \
do { \
- memcpy(__get_rel_str(dst), (src), (len)); \
- __get_rel_str(dst)[len] = '\0'; \
+ char *__str__ = __get_rel_str(dst); \
+ int __len__ = __get_rel_dynamic_array_len(dst) - 1; \
+ memcpy(__str__, __data_offsets.dst##_ptr_ ? : \
+ EVENT_NULL_STR, __len__); \
+ __str__[__len__] = '\0'; \
} while (0)
#undef __rel_bitmask
diff --git a/include/trace/stages/stage7_class_define.h b/include/trace/stages/stage7_class_define.h
index 8795429f388b..fcd564a590f4 100644
--- a/include/trace/stages/stage7_class_define.h
+++ b/include/trace/stages/stage7_class_define.h
@@ -22,7 +22,9 @@
#undef __get_rel_cpumask
#undef __get_rel_sockaddr
#undef __print_array
+#undef __print_dynamic_array
#undef __print_hex_dump
+#undef __get_buf
/*
* The below is not executed in the kernel. It is only what is
diff --git a/include/trace/syscall.h b/include/trace/syscall.h
index 8e193f3a33b3..0dd7f2b33431 100644
--- a/include/trace/syscall.h
+++ b/include/trace/syscall.h
@@ -16,6 +16,9 @@
* @name: name of the syscall
* @syscall_nr: number of the syscall
* @nb_args: number of parameters it takes
+ * @user_arg_is_str: set if the arg for @user_arg_size is a string
+ * @user_arg_size: holds @arg that has size of the user space to read
+ * @user_mask: mask of @args that will read user space
* @types: list of types as strings
* @args: list of args as strings (args[i] matches types[i])
* @enter_fields: list of fields for syscall_enter trace event
@@ -25,7 +28,10 @@
struct syscall_metadata {
const char *name;
int syscall_nr;
- int nb_args;
+ u8 nb_args:7;
+ u8 user_arg_is_str:1;
+ s8 user_arg_size;
+ short user_mask;
const char **types;
const char **args;
struct list_head enter_fields;
diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h
index c2f9cabf154d..4f22136fd465 100644
--- a/include/trace/trace_events.h
+++ b/include/trace/trace_events.h
@@ -45,6 +45,16 @@
PARAMS(print)); \
DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args));
+#undef TRACE_EVENT_SYSCALL
+#define TRACE_EVENT_SYSCALL(name, proto, args, tstruct, assign, print, reg, unreg) \
+ DECLARE_EVENT_SYSCALL_CLASS(name, \
+ PARAMS(proto), \
+ PARAMS(args), \
+ PARAMS(tstruct), \
+ PARAMS(assign), \
+ PARAMS(print)); \
+ DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args));
+
#include "stages/stage1_struct_define.h"
#undef DECLARE_EVENT_CLASS
@@ -57,6 +67,9 @@
\
static struct trace_event_class event_class_##name;
+#undef DECLARE_EVENT_SYSCALL_CLASS
+#define DECLARE_EVENT_SYSCALL_CLASS DECLARE_EVENT_CLASS
+
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, name, proto, args) \
static struct trace_event_call __used \
@@ -117,6 +130,9 @@
tstruct; \
};
+#undef DECLARE_EVENT_SYSCALL_CLASS
+#define DECLARE_EVENT_SYSCALL_CLASS DECLARE_EVENT_CLASS
+
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, name, proto, args)
@@ -208,6 +224,9 @@ static struct trace_event_functions trace_event_type_funcs_##call = { \
.trace = trace_raw_output_##call, \
};
+#undef DECLARE_EVENT_SYSCALL_CLASS
+#define DECLARE_EVENT_SYSCALL_CLASS DECLARE_EVENT_CLASS
+
#undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
static notrace enum print_line_t \
@@ -244,6 +263,9 @@ static struct trace_event_fields trace_event_fields_##call[] = { \
tstruct \
{} };
+#undef DECLARE_EVENT_SYSCALL_CLASS
+#define DECLARE_EVENT_SYSCALL_CLASS DECLARE_EVENT_CLASS
+
#undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, name, proto, args, print)
@@ -265,6 +287,9 @@ static inline notrace int trace_event_get_offsets_##call( \
return __data_size; \
}
+#undef DECLARE_EVENT_SYSCALL_CLASS
+#define DECLARE_EVENT_SYSCALL_CLASS DECLARE_EVENT_CLASS
+
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
/*
@@ -374,11 +399,11 @@ static inline notrace int trace_event_get_offsets_##call( \
#include "stages/stage6_event_callback.h"
-#undef DECLARE_EVENT_CLASS
-#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
- \
+
+#undef __DECLARE_EVENT_CLASS
+#define __DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
static notrace void \
-trace_event_raw_event_##call(void *__data, proto) \
+do_trace_event_raw_event_##call(void *__data, proto) \
{ \
struct trace_event_file *trace_file = __data; \
struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\
@@ -403,6 +428,30 @@ trace_event_raw_event_##call(void *__data, proto) \
\
trace_event_buffer_commit(&fbuffer); \
}
+
+#undef DECLARE_EVENT_CLASS
+#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
+__DECLARE_EVENT_CLASS(call, PARAMS(proto), PARAMS(args), PARAMS(tstruct), \
+ PARAMS(assign), PARAMS(print)) \
+static notrace void \
+trace_event_raw_event_##call(void *__data, proto) \
+{ \
+ do_trace_event_raw_event_##call(__data, args); \
+}
+
+#undef DECLARE_EVENT_SYSCALL_CLASS
+#define DECLARE_EVENT_SYSCALL_CLASS(call, proto, args, tstruct, assign, print) \
+__DECLARE_EVENT_CLASS(call, PARAMS(proto), PARAMS(args), PARAMS(tstruct), \
+ PARAMS(assign), PARAMS(print)) \
+static notrace void \
+trace_event_raw_event_##call(void *__data, proto) \
+{ \
+ might_fault(); \
+ preempt_disable_notrace(); \
+ do_trace_event_raw_event_##call(__data, args); \
+ preempt_enable_notrace(); \
+}
+
/*
* The ftrace_test_probe is compiled out, it is only here as a build time check
* to make sure that if the tracepoint handling changes, the ftrace probe will
@@ -418,6 +467,8 @@ static inline void ftrace_test_probe_##call(void) \
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+#undef __DECLARE_EVENT_CLASS
+
#include "stages/stage7_class_define.h"
#undef DECLARE_EVENT_CLASS
@@ -434,6 +485,9 @@ static struct trace_event_class __used __refdata event_class_##call = { \
_TRACE_PERF_INIT(call) \
};
+#undef DECLARE_EVENT_SYSCALL_CLASS
+#define DECLARE_EVENT_SYSCALL_CLASS DECLARE_EVENT_CLASS
+
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, call, proto, args) \
\