summaryrefslogtreecommitdiff
path: root/include/trace/events
diff options
context:
space:
mode:
Diffstat (limited to 'include/trace/events')
-rw-r--r--include/trace/events/afs.h293
-rw-r--r--include/trace/events/bpf.h5
-rw-r--r--include/trace/events/btrfs.h41
-rw-r--r--include/trace/events/dma_fence.h40
-rw-r--r--include/trace/events/f2fs.h116
-rw-r--r--include/trace/events/fib6.h6
-rw-r--r--include/trace/events/fs_dax.h3
-rw-r--r--include/trace/events/kmem.h11
-rw-r--r--include/trace/events/mmflags.h2
-rw-r--r--include/trace/events/preemptirq.h70
-rw-r--r--include/trace/events/sock.h2
-rw-r--r--include/trace/events/sunrpc.h92
-rw-r--r--include/trace/events/tcp.h299
-rw-r--r--include/trace/events/thermal.h4
-rw-r--r--include/trace/events/vmscan.h4
-rw-r--r--include/trace/events/writeback.h1
-rw-r--r--include/trace/events/xdp.h80
-rw-r--r--include/trace/events/xen.h35
18 files changed, 987 insertions, 117 deletions
diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h
index 8b95c16b7045..6b59c63a8e51 100644
--- a/include/trace/events/afs.h
+++ b/include/trace/events/afs.h
@@ -30,6 +30,38 @@ enum afs_call_trace {
afs_call_trace_work,
};
+enum afs_fs_operation {
+ afs_FS_FetchData = 130, /* AFS Fetch file data */
+ afs_FS_FetchStatus = 132, /* AFS Fetch file status */
+ afs_FS_StoreData = 133, /* AFS Store file data */
+ afs_FS_StoreStatus = 135, /* AFS Store file status */
+ afs_FS_RemoveFile = 136, /* AFS Remove a file */
+ afs_FS_CreateFile = 137, /* AFS Create a file */
+ afs_FS_Rename = 138, /* AFS Rename or move a file or directory */
+ afs_FS_Symlink = 139, /* AFS Create a symbolic link */
+ afs_FS_Link = 140, /* AFS Create a hard link */
+ afs_FS_MakeDir = 141, /* AFS Create a directory */
+ afs_FS_RemoveDir = 142, /* AFS Remove a directory */
+ afs_FS_GetVolumeInfo = 148, /* AFS Get information about a volume */
+ afs_FS_GetVolumeStatus = 149, /* AFS Get volume status information */
+ afs_FS_GetRootVolume = 151, /* AFS Get root volume name */
+ afs_FS_SetLock = 156, /* AFS Request a file lock */
+ afs_FS_ExtendLock = 157, /* AFS Extend a file lock */
+ afs_FS_ReleaseLock = 158, /* AFS Release a file lock */
+ afs_FS_Lookup = 161, /* AFS lookup file in directory */
+ afs_FS_FetchData64 = 65537, /* AFS Fetch file data */
+ afs_FS_StoreData64 = 65538, /* AFS Store file data */
+ afs_FS_GiveUpAllCallBacks = 65539, /* AFS Give up all our callbacks on a server */
+ afs_FS_GetCapabilities = 65540, /* AFS Get FS server capabilities */
+};
+
+enum afs_vl_operation {
+ afs_VL_GetEntryByNameU = 527, /* AFS Get Vol Entry By Name operation ID */
+ afs_VL_GetAddrsU = 533, /* AFS Get FS server addresses */
+ afs_YFSVL_GetEndpoints = 64002, /* YFS Get FS & Vol server addresses */
+ afs_VL_GetCapabilities = 65537, /* AFS Get VL server capabilities */
+};
+
#endif /* end __AFS_DECLARE_TRACE_ENUMS_ONCE_ONLY */
/*
@@ -42,6 +74,37 @@ enum afs_call_trace {
EM(afs_call_trace_wake, "WAKE ") \
E_(afs_call_trace_work, "WORK ")
+#define afs_fs_operations \
+ EM(afs_FS_FetchData, "FS.FetchData") \
+ EM(afs_FS_FetchStatus, "FS.FetchStatus") \
+ EM(afs_FS_StoreData, "FS.StoreData") \
+ EM(afs_FS_StoreStatus, "FS.StoreStatus") \
+ EM(afs_FS_RemoveFile, "FS.RemoveFile") \
+ EM(afs_FS_CreateFile, "FS.CreateFile") \
+ EM(afs_FS_Rename, "FS.Rename") \
+ EM(afs_FS_Symlink, "FS.Symlink") \
+ EM(afs_FS_Link, "FS.Link") \
+ EM(afs_FS_MakeDir, "FS.MakeDir") \
+ EM(afs_FS_RemoveDir, "FS.RemoveDir") \
+ EM(afs_FS_GetVolumeInfo, "FS.GetVolumeInfo") \
+ EM(afs_FS_GetVolumeStatus, "FS.GetVolumeStatus") \
+ EM(afs_FS_GetRootVolume, "FS.GetRootVolume") \
+ EM(afs_FS_SetLock, "FS.SetLock") \
+ EM(afs_FS_ExtendLock, "FS.ExtendLock") \
+ EM(afs_FS_ReleaseLock, "FS.ReleaseLock") \
+ EM(afs_FS_Lookup, "FS.Lookup") \
+ EM(afs_FS_FetchData64, "FS.FetchData64") \
+ EM(afs_FS_StoreData64, "FS.StoreData64") \
+ EM(afs_FS_GiveUpAllCallBacks, "FS.GiveUpAllCallBacks") \
+ E_(afs_FS_GetCapabilities, "FS.GetCapabilities")
+
+#define afs_vl_operations \
+ EM(afs_VL_GetEntryByNameU, "VL.GetEntryByNameU") \
+ EM(afs_VL_GetAddrsU, "VL.GetAddrsU") \
+ EM(afs_YFSVL_GetEndpoints, "YFSVL.GetEndpoints") \
+ E_(afs_VL_GetCapabilities, "VL.GetCapabilities")
+
+
/*
* Export enum symbols via userspace.
*/
@@ -51,6 +114,8 @@ enum afs_call_trace {
#define E_(a, b) TRACE_DEFINE_ENUM(a);
afs_call_traces;
+afs_fs_operations;
+afs_vl_operations;
/*
* Now redefine the EM() and E_() macros to map the enums to the strings that
@@ -178,6 +243,234 @@ TRACE_EVENT(afs_call,
__entry->where)
);
+TRACE_EVENT(afs_make_fs_call,
+ TP_PROTO(struct afs_call *call, const struct afs_fid *fid),
+
+ TP_ARGS(call, fid),
+
+ TP_STRUCT__entry(
+ __field(struct afs_call *, call )
+ __field(enum afs_fs_operation, op )
+ __field_struct(struct afs_fid, fid )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->op = call->operation_ID;
+ if (fid) {
+ __entry->fid = *fid;
+ } else {
+ __entry->fid.vid = 0;
+ __entry->fid.vnode = 0;
+ __entry->fid.unique = 0;
+ }
+ ),
+
+ TP_printk("c=%p %06x:%06x:%06x %s",
+ __entry->call,
+ __entry->fid.vid,
+ __entry->fid.vnode,
+ __entry->fid.unique,
+ __print_symbolic(__entry->op, afs_fs_operations))
+ );
+
+TRACE_EVENT(afs_make_vl_call,
+ TP_PROTO(struct afs_call *call),
+
+ TP_ARGS(call),
+
+ TP_STRUCT__entry(
+ __field(struct afs_call *, call )
+ __field(enum afs_vl_operation, op )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->op = call->operation_ID;
+ ),
+
+ TP_printk("c=%p %s",
+ __entry->call,
+ __print_symbolic(__entry->op, afs_vl_operations))
+ );
+
+TRACE_EVENT(afs_call_done,
+ TP_PROTO(struct afs_call *call),
+
+ TP_ARGS(call),
+
+ TP_STRUCT__entry(
+ __field(struct afs_call *, call )
+ __field(struct rxrpc_call *, rx_call )
+ __field(int, ret )
+ __field(u32, abort_code )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->rx_call = call->rxcall;
+ __entry->ret = call->error;
+ __entry->abort_code = call->abort_code;
+ ),
+
+ TP_printk(" c=%p ret=%d ab=%d [%p]",
+ __entry->call,
+ __entry->ret,
+ __entry->abort_code,
+ __entry->rx_call)
+ );
+
+TRACE_EVENT(afs_send_pages,
+ TP_PROTO(struct afs_call *call, struct msghdr *msg,
+ pgoff_t first, pgoff_t last, unsigned int offset),
+
+ TP_ARGS(call, msg, first, last, offset),
+
+ TP_STRUCT__entry(
+ __field(struct afs_call *, call )
+ __field(pgoff_t, first )
+ __field(pgoff_t, last )
+ __field(unsigned int, nr )
+ __field(unsigned int, bytes )
+ __field(unsigned int, offset )
+ __field(unsigned int, flags )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->first = first;
+ __entry->last = last;
+ __entry->nr = msg->msg_iter.nr_segs;
+ __entry->bytes = msg->msg_iter.count;
+ __entry->offset = offset;
+ __entry->flags = msg->msg_flags;
+ ),
+
+ TP_printk(" c=%p %lx-%lx-%lx b=%x o=%x f=%x",
+ __entry->call,
+ __entry->first, __entry->first + __entry->nr - 1, __entry->last,
+ __entry->bytes, __entry->offset,
+ __entry->flags)
+ );
+
+TRACE_EVENT(afs_sent_pages,
+ TP_PROTO(struct afs_call *call, pgoff_t first, pgoff_t last,
+ pgoff_t cursor, int ret),
+
+ TP_ARGS(call, first, last, cursor, ret),
+
+ TP_STRUCT__entry(
+ __field(struct afs_call *, call )
+ __field(pgoff_t, first )
+ __field(pgoff_t, last )
+ __field(pgoff_t, cursor )
+ __field(int, ret )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->first = first;
+ __entry->last = last;
+ __entry->cursor = cursor;
+ __entry->ret = ret;
+ ),
+
+ TP_printk(" c=%p %lx-%lx c=%lx r=%d",
+ __entry->call,
+ __entry->first, __entry->last,
+ __entry->cursor, __entry->ret)
+ );
+
+TRACE_EVENT(afs_dir_check_failed,
+ TP_PROTO(struct afs_vnode *vnode, loff_t off, loff_t i_size),
+
+ TP_ARGS(vnode, off, i_size),
+
+ TP_STRUCT__entry(
+ __field(struct afs_vnode *, vnode )
+ __field(loff_t, off )
+ __field(loff_t, i_size )
+ ),
+
+ TP_fast_assign(
+ __entry->vnode = vnode;
+ __entry->off = off;
+ __entry->i_size = i_size;
+ ),
+
+ TP_printk("vn=%p %llx/%llx",
+ __entry->vnode, __entry->off, __entry->i_size)
+ );
+
+/*
+ * We use page->private to hold the amount of the page that we've written to,
+ * splitting the field into two parts. However, we need to represent a range
+ * 0...PAGE_SIZE inclusive, so we can't support 64K pages on a 32-bit system.
+ */
+#if PAGE_SIZE > 32768
+#define AFS_PRIV_MAX 0xffffffff
+#define AFS_PRIV_SHIFT 32
+#else
+#define AFS_PRIV_MAX 0xffff
+#define AFS_PRIV_SHIFT 16
+#endif
+
+TRACE_EVENT(afs_page_dirty,
+ TP_PROTO(struct afs_vnode *vnode, const char *where,
+ pgoff_t page, unsigned long priv),
+
+ TP_ARGS(vnode, where, page, priv),
+
+ TP_STRUCT__entry(
+ __field(struct afs_vnode *, vnode )
+ __field(const char *, where )
+ __field(pgoff_t, page )
+ __field(unsigned long, priv )
+ ),
+
+ TP_fast_assign(
+ __entry->vnode = vnode;
+ __entry->where = where;
+ __entry->page = page;
+ __entry->priv = priv;
+ ),
+
+ TP_printk("vn=%p %lx %s %lu-%lu",
+ __entry->vnode, __entry->page, __entry->where,
+ __entry->priv & AFS_PRIV_MAX,
+ __entry->priv >> AFS_PRIV_SHIFT)
+ );
+
+TRACE_EVENT(afs_call_state,
+ TP_PROTO(struct afs_call *call,
+ enum afs_call_state from,
+ enum afs_call_state to,
+ int ret, u32 remote_abort),
+
+ TP_ARGS(call, from, to, ret, remote_abort),
+
+ TP_STRUCT__entry(
+ __field(struct afs_call *, call )
+ __field(enum afs_call_state, from )
+ __field(enum afs_call_state, to )
+ __field(int, ret )
+ __field(u32, abort )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->from = from;
+ __entry->to = to;
+ __entry->ret = ret;
+ __entry->abort = remote_abort;
+ ),
+
+ TP_printk("c=%p %u->%u r=%d ab=%d",
+ __entry->call,
+ __entry->from, __entry->to,
+ __entry->ret, __entry->abort)
+ );
+
#endif /* _TRACE_AFS_H */
/* This part must be outside protection */
diff --git a/include/trace/events/bpf.h b/include/trace/events/bpf.h
index cc749d7099fb..150185647e6b 100644
--- a/include/trace/events/bpf.h
+++ b/include/trace/events/bpf.h
@@ -5,6 +5,9 @@
#if !defined(_TRACE_BPF_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_BPF_H
+/* These are only used within the BPF_SYSCALL code */
+#ifdef CONFIG_BPF_SYSCALL
+
#include <linux/filter.h>
#include <linux/bpf.h>
#include <linux/fs.h>
@@ -346,7 +349,7 @@ TRACE_EVENT(bpf_map_next_key,
__print_hex(__get_dynamic_array(nxt), __entry->key_len),
__entry->key_trunc ? " ..." : "")
);
-
+#endif /* CONFIG_BPF_SYSCALL */
#endif /* _TRACE_BPF_H */
#include <trace/define_trace.h>
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 32d0c1fe2bfa..4342a329821f 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -29,6 +29,13 @@ struct btrfs_qgroup_extent_record;
struct btrfs_qgroup;
struct prelim_ref;
+TRACE_DEFINE_ENUM(FLUSH_DELAYED_ITEMS_NR);
+TRACE_DEFINE_ENUM(FLUSH_DELAYED_ITEMS);
+TRACE_DEFINE_ENUM(FLUSH_DELALLOC);
+TRACE_DEFINE_ENUM(FLUSH_DELALLOC_WAIT);
+TRACE_DEFINE_ENUM(ALLOC_CHUNK);
+TRACE_DEFINE_ENUM(COMMIT_TRANS);
+
#define show_ref_type(type) \
__print_symbolic(type, \
{ BTRFS_TREE_BLOCK_REF_KEY, "TREE_BLOCK_REF" }, \
@@ -792,11 +799,10 @@ DEFINE_EVENT(btrfs_delayed_data_ref, run_delayed_data_ref,
DECLARE_EVENT_CLASS(btrfs_delayed_ref_head,
TP_PROTO(const struct btrfs_fs_info *fs_info,
- const struct btrfs_delayed_ref_node *ref,
const struct btrfs_delayed_ref_head *head_ref,
int action),
- TP_ARGS(fs_info, ref, head_ref, action),
+ TP_ARGS(fs_info, head_ref, action),
TP_STRUCT__entry_btrfs(
__field( u64, bytenr )
@@ -806,8 +812,8 @@ DECLARE_EVENT_CLASS(btrfs_delayed_ref_head,
),
TP_fast_assign_btrfs(fs_info,
- __entry->bytenr = ref->bytenr;
- __entry->num_bytes = ref->num_bytes;
+ __entry->bytenr = head_ref->bytenr;
+ __entry->num_bytes = head_ref->num_bytes;
__entry->action = action;
__entry->is_data = head_ref->is_data;
),
@@ -822,21 +828,19 @@ DECLARE_EVENT_CLASS(btrfs_delayed_ref_head,
DEFINE_EVENT(btrfs_delayed_ref_head, add_delayed_ref_head,
TP_PROTO(const struct btrfs_fs_info *fs_info,
- const struct btrfs_delayed_ref_node *ref,
const struct btrfs_delayed_ref_head *head_ref,
int action),
- TP_ARGS(fs_info, ref, head_ref, action)
+ TP_ARGS(fs_info, head_ref, action)
);
DEFINE_EVENT(btrfs_delayed_ref_head, run_delayed_ref_head,
TP_PROTO(const struct btrfs_fs_info *fs_info,
- const struct btrfs_delayed_ref_node *ref,
const struct btrfs_delayed_ref_head *head_ref,
int action),
- TP_ARGS(fs_info, ref, head_ref, action)
+ TP_ARGS(fs_info, head_ref, action)
);
#define show_chunk_type(type) \
@@ -1692,6 +1696,27 @@ DEFINE_EVENT(btrfs__prelim_ref, btrfs_prelim_ref_insert,
TP_ARGS(fs_info, oldref, newref, tree_size)
);
+TRACE_EVENT(btrfs_inode_mod_outstanding_extents,
+ TP_PROTO(struct btrfs_root *root, u64 ino, int mod),
+
+ TP_ARGS(root, ino, mod),
+
+ TP_STRUCT__entry_btrfs(
+ __field( u64, root_objectid )
+ __field( u64, ino )
+ __field( int, mod )
+ ),
+
+ TP_fast_assign_btrfs(root->fs_info,
+ __entry->root_objectid = root->objectid;
+ __entry->ino = ino;
+ __entry->mod = mod;
+ ),
+
+ TP_printk_btrfs("root=%llu(%s) ino=%llu mod=%d",
+ show_root_type(__entry->root_objectid),
+ (unsigned long long)__entry->ino, __entry->mod)
+);
#endif /* _TRACE_BTRFS_H */
/* This part must be outside protection */
diff --git a/include/trace/events/dma_fence.h b/include/trace/events/dma_fence.h
index d61bfddcc621..2212adda8f77 100644
--- a/include/trace/events/dma_fence.h
+++ b/include/trace/events/dma_fence.h
@@ -9,46 +9,6 @@
struct dma_fence;
-TRACE_EVENT(dma_fence_annotate_wait_on,
-
- /* fence: the fence waiting on f1, f1: the fence to be waited on. */
- TP_PROTO(struct dma_fence *fence, struct dma_fence *f1),
-
- TP_ARGS(fence, f1),
-
- TP_STRUCT__entry(
- __string(driver, fence->ops->get_driver_name(fence))
- __string(timeline, fence->ops->get_timeline_name(fence))
- __field(unsigned int, context)
- __field(unsigned int, seqno)
-
- __string(waiting_driver, f1->ops->get_driver_name(f1))
- __string(waiting_timeline, f1->ops->get_timeline_name(f1))
- __field(unsigned int, waiting_context)
- __field(unsigned int, waiting_seqno)
- ),
-
- TP_fast_assign(
- __assign_str(driver, fence->ops->get_driver_name(fence))
- __assign_str(timeline, fence->ops->get_timeline_name(fence))
- __entry->context = fence->context;
- __entry->seqno = fence->seqno;
-
- __assign_str(waiting_driver, f1->ops->get_driver_name(f1))
- __assign_str(waiting_timeline, f1->ops->get_timeline_name(f1))
- __entry->waiting_context = f1->context;
- __entry->waiting_seqno = f1->seqno;
-
- ),
-
- TP_printk("driver=%s timeline=%s context=%u seqno=%u " \
- "waits on driver=%s timeline=%s context=%u seqno=%u",
- __get_str(driver), __get_str(timeline), __entry->context,
- __entry->seqno,
- __get_str(waiting_driver), __get_str(waiting_timeline),
- __entry->waiting_context, __entry->waiting_seqno)
-);
-
DECLARE_EVENT_CLASS(dma_fence,
TP_PROTO(struct dma_fence *fence),
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index 7ab40491485b..8f8dd42fa57b 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -137,6 +137,18 @@ TRACE_DEFINE_ENUM(CP_TRIMMED);
{ CP_UMOUNT, "Umount" }, \
{ CP_TRIMMED, "Trimmed" })
+#define show_fsync_cpreason(type) \
+ __print_symbolic(type, \
+ { CP_NO_NEEDED, "no needed" }, \
+ { CP_NON_REGULAR, "non regular" }, \
+ { CP_HARDLINK, "hardlink" }, \
+ { CP_SB_NEED_CP, "sb needs cp" }, \
+ { CP_WRONG_PINO, "wrong pino" }, \
+ { CP_NO_SPC_ROLL, "no space roll forward" }, \
+ { CP_NODE_NEED_CP, "node needs cp" }, \
+ { CP_FASTBOOT_MODE, "fastboot mode" }, \
+ { CP_SPEC_LOG_NUM, "log type is 2" })
+
struct victim_sel_policy;
struct f2fs_map_blocks;
@@ -211,14 +223,14 @@ DEFINE_EVENT(f2fs__inode, f2fs_sync_file_enter,
TRACE_EVENT(f2fs_sync_file_exit,
- TP_PROTO(struct inode *inode, int need_cp, int datasync, int ret),
+ TP_PROTO(struct inode *inode, int cp_reason, int datasync, int ret),
- TP_ARGS(inode, need_cp, datasync, ret),
+ TP_ARGS(inode, cp_reason, datasync, ret),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(ino_t, ino)
- __field(int, need_cp)
+ __field(int, cp_reason)
__field(int, datasync)
__field(int, ret)
),
@@ -226,15 +238,15 @@ TRACE_EVENT(f2fs_sync_file_exit,
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
- __entry->need_cp = need_cp;
+ __entry->cp_reason = cp_reason;
__entry->datasync = datasync;
__entry->ret = ret;
),
- TP_printk("dev = (%d,%d), ino = %lu, checkpoint is %s, "
+ TP_printk("dev = (%d,%d), ino = %lu, cp_reason: %s, "
"datasync = %d, ret = %d",
show_dev_ino(__entry),
- __entry->need_cp ? "needed" : "not needed",
+ show_fsync_cpreason(__entry->cp_reason),
__entry->datasync,
__entry->ret)
);
@@ -729,6 +741,91 @@ TRACE_EVENT(f2fs_get_victim,
__entry->free)
);
+TRACE_EVENT(f2fs_lookup_start,
+
+ TP_PROTO(struct inode *dir, struct dentry *dentry, unsigned int flags),
+
+ TP_ARGS(dir, dentry, flags),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(const char *, name)
+ __field(unsigned int, flags)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dir->i_sb->s_dev;
+ __entry->ino = dir->i_ino;
+ __entry->name = dentry->d_name.name;
+ __entry->flags = flags;
+ ),
+
+ TP_printk("dev = (%d,%d), pino = %lu, name:%s, flags:%u",
+ show_dev_ino(__entry),
+ __entry->name,
+ __entry->flags)
+);
+
+TRACE_EVENT(f2fs_lookup_end,
+
+ TP_PROTO(struct inode *dir, struct dentry *dentry, nid_t ino,
+ int err),
+
+ TP_ARGS(dir, dentry, ino, err),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(const char *, name)
+ __field(nid_t, cino)
+ __field(int, err)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dir->i_sb->s_dev;
+ __entry->ino = dir->i_ino;
+ __entry->name = dentry->d_name.name;
+ __entry->cino = ino;
+ __entry->err = err;
+ ),
+
+ TP_printk("dev = (%d,%d), pino = %lu, name:%s, ino:%u, err:%d",
+ show_dev_ino(__entry),
+ __entry->name,
+ __entry->cino,
+ __entry->err)
+);
+
+TRACE_EVENT(f2fs_readdir,
+
+ TP_PROTO(struct inode *dir, loff_t start_pos, loff_t end_pos, int err),
+
+ TP_ARGS(dir, start_pos, end_pos, err),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(loff_t, start)
+ __field(loff_t, end)
+ __field(int, err)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dir->i_sb->s_dev;
+ __entry->ino = dir->i_ino;
+ __entry->start = start_pos;
+ __entry->end = end_pos;
+ __entry->err = err;
+ ),
+
+ TP_printk("dev = (%d,%d), ino = %lu, start_pos:%llu, end_pos:%llu, err:%d",
+ show_dev_ino(__entry),
+ __entry->start,
+ __entry->end,
+ __entry->err)
+);
+
TRACE_EVENT(f2fs_fallocate,
TP_PROTO(struct inode *inode, int mode,
@@ -1287,6 +1384,13 @@ DEFINE_EVENT(f2fs_discard, f2fs_issue_discard,
TP_ARGS(dev, blkstart, blklen)
);
+DEFINE_EVENT(f2fs_discard, f2fs_remove_discard,
+
+ TP_PROTO(struct block_device *dev, block_t blkstart, block_t blklen),
+
+ TP_ARGS(dev, blkstart, blklen)
+);
+
TRACE_EVENT(f2fs_issue_reset_zone,
TP_PROTO(struct block_device *dev, block_t blkstart),
diff --git a/include/trace/events/fib6.h b/include/trace/events/fib6.h
index d46e24702765..7e8d48a81b91 100644
--- a/include/trace/events/fib6.h
+++ b/include/trace/events/fib6.h
@@ -13,9 +13,9 @@
TRACE_EVENT(fib6_table_lookup,
TP_PROTO(const struct net *net, const struct rt6_info *rt,
- u32 tb_id, const struct flowi6 *flp),
+ struct fib6_table *table, const struct flowi6 *flp),
- TP_ARGS(net, rt, tb_id, flp),
+ TP_ARGS(net, rt, table, flp),
TP_STRUCT__entry(
__field( u32, tb_id )
@@ -35,7 +35,7 @@ TRACE_EVENT(fib6_table_lookup,
TP_fast_assign(
struct in6_addr *in6;
- __entry->tb_id = tb_id;
+ __entry->tb_id = table->tb6_id;
__entry->oif = flp->flowi6_oif;
__entry->iif = flp->flowi6_iif;
__entry->tos = ip6_tclass(flp->flowlabel);
diff --git a/include/trace/events/fs_dax.h b/include/trace/events/fs_dax.h
index 8a8df5423dca..97b09fcf7e52 100644
--- a/include/trace/events/fs_dax.h
+++ b/include/trace/events/fs_dax.h
@@ -149,7 +149,6 @@ DEFINE_EVENT(dax_pmd_insert_mapping_class, name, \
TP_ARGS(inode, vmf, length, pfn, radix_entry))
DEFINE_PMD_INSERT_MAPPING_EVENT(dax_pmd_insert_mapping);
-DEFINE_PMD_INSERT_MAPPING_EVENT(dax_pmd_insert_mapping_fallback);
DECLARE_EVENT_CLASS(dax_pte_fault_class,
TP_PROTO(struct inode *inode, struct vm_fault *vmf, int result),
@@ -192,6 +191,8 @@ DEFINE_EVENT(dax_pte_fault_class, name, \
DEFINE_PTE_FAULT_EVENT(dax_pte_fault);
DEFINE_PTE_FAULT_EVENT(dax_pte_fault_done);
DEFINE_PTE_FAULT_EVENT(dax_load_hole);
+DEFINE_PTE_FAULT_EVENT(dax_insert_pfn_mkwrite_no_entry);
+DEFINE_PTE_FAULT_EVENT(dax_insert_pfn_mkwrite);
TRACE_EVENT(dax_insert_mapping,
TP_PROTO(struct inode *inode, struct vm_fault *vmf, void *radix_entry),
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
index 285feeadac39..eb57e3037deb 100644
--- a/include/trace/events/kmem.h
+++ b/include/trace/events/kmem.h
@@ -172,24 +172,21 @@ TRACE_EVENT(mm_page_free,
TRACE_EVENT(mm_page_free_batched,
- TP_PROTO(struct page *page, int cold),
+ TP_PROTO(struct page *page),
- TP_ARGS(page, cold),
+ TP_ARGS(page),
TP_STRUCT__entry(
__field( unsigned long, pfn )
- __field( int, cold )
),
TP_fast_assign(
__entry->pfn = page_to_pfn(page);
- __entry->cold = cold;
),
- TP_printk("page=%p pfn=%lu order=0 cold=%d",
+ TP_printk("page=%p pfn=%lu order=0",
pfn_to_page(__entry->pfn),
- __entry->pfn,
- __entry->cold)
+ __entry->pfn)
);
TRACE_EVENT(mm_page_alloc,
diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h
index 648cbf603736..dbe1bb058c09 100644
--- a/include/trace/events/mmflags.h
+++ b/include/trace/events/mmflags.h
@@ -32,7 +32,6 @@
{(unsigned long)__GFP_ATOMIC, "__GFP_ATOMIC"}, \
{(unsigned long)__GFP_IO, "__GFP_IO"}, \
{(unsigned long)__GFP_FS, "__GFP_FS"}, \
- {(unsigned long)__GFP_COLD, "__GFP_COLD"}, \
{(unsigned long)__GFP_NOWARN, "__GFP_NOWARN"}, \
{(unsigned long)__GFP_RETRY_MAYFAIL, "__GFP_RETRY_MAYFAIL"}, \
{(unsigned long)__GFP_NOFAIL, "__GFP_NOFAIL"}, \
@@ -46,7 +45,6 @@
{(unsigned long)__GFP_RECLAIMABLE, "__GFP_RECLAIMABLE"}, \
{(unsigned long)__GFP_MOVABLE, "__GFP_MOVABLE"}, \
{(unsigned long)__GFP_ACCOUNT, "__GFP_ACCOUNT"}, \
- {(unsigned long)__GFP_NOTRACK, "__GFP_NOTRACK"}, \
{(unsigned long)__GFP_WRITE, "__GFP_WRITE"}, \
{(unsigned long)__GFP_RECLAIM, "__GFP_RECLAIM"}, \
{(unsigned long)__GFP_DIRECT_RECLAIM, "__GFP_DIRECT_RECLAIM"},\
diff --git a/include/trace/events/preemptirq.h b/include/trace/events/preemptirq.h
new file mode 100644
index 000000000000..f5024c560d8f
--- /dev/null
+++ b/include/trace/events/preemptirq.h
@@ -0,0 +1,70 @@
+#ifdef CONFIG_PREEMPTIRQ_EVENTS
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM preemptirq
+
+#if !defined(_TRACE_PREEMPTIRQ_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_PREEMPTIRQ_H
+
+#include <linux/ktime.h>
+#include <linux/tracepoint.h>
+#include <linux/string.h>
+#include <asm/sections.h>
+
+DECLARE_EVENT_CLASS(preemptirq_template,
+
+ TP_PROTO(unsigned long ip, unsigned long parent_ip),
+
+ TP_ARGS(ip, parent_ip),
+
+ TP_STRUCT__entry(
+ __field(u32, caller_offs)
+ __field(u32, parent_offs)
+ ),
+
+ TP_fast_assign(
+ __entry->caller_offs = (u32)(ip - (unsigned long)_stext);
+ __entry->parent_offs = (u32)(parent_ip - (unsigned long)_stext);
+ ),
+
+ TP_printk("caller=%pF parent=%pF",
+ (void *)((unsigned long)(_stext) + __entry->caller_offs),
+ (void *)((unsigned long)(_stext) + __entry->parent_offs))
+);
+
+#ifndef CONFIG_PROVE_LOCKING
+DEFINE_EVENT(preemptirq_template, irq_disable,
+ TP_PROTO(unsigned long ip, unsigned long parent_ip),
+ TP_ARGS(ip, parent_ip));
+
+DEFINE_EVENT(preemptirq_template, irq_enable,
+ TP_PROTO(unsigned long ip, unsigned long parent_ip),
+ TP_ARGS(ip, parent_ip));
+#endif
+
+#ifdef CONFIG_DEBUG_PREEMPT
+DEFINE_EVENT(preemptirq_template, preempt_disable,
+ TP_PROTO(unsigned long ip, unsigned long parent_ip),
+ TP_ARGS(ip, parent_ip));
+
+DEFINE_EVENT(preemptirq_template, preempt_enable,
+ TP_PROTO(unsigned long ip, unsigned long parent_ip),
+ TP_ARGS(ip, parent_ip));
+#endif
+
+#endif /* _TRACE_PREEMPTIRQ_H */
+
+#include <trace/define_trace.h>
+
+#else /* !CONFIG_PREEMPTIRQ_EVENTS */
+
+#define trace_irq_enable(...)
+#define trace_irq_disable(...)
+#define trace_preempt_enable(...)
+#define trace_preempt_disable(...)
+#define trace_irq_enable_rcuidle(...)
+#define trace_irq_disable_rcuidle(...)
+#define trace_preempt_enable_rcuidle(...)
+#define trace_preempt_disable_rcuidle(...)
+
+#endif
diff --git a/include/trace/events/sock.h b/include/trace/events/sock.h
index 6d31c0520ef3..ec4dade24466 100644
--- a/include/trace/events/sock.h
+++ b/include/trace/events/sock.h
@@ -48,7 +48,7 @@ TRACE_EVENT(sock_exceed_buf_limit,
strncpy(__entry->name, prot->name, 32);
__entry->sysctl_mem = prot->sysctl_mem;
__entry->allocated = allocated;
- __entry->sysctl_rmem = prot->sysctl_rmem[0];
+ __entry->sysctl_rmem = sk_get_rmem0(sk, prot);
__entry->rmem_alloc = atomic_read(&sk->sk_rmem_alloc);
),
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index 25a7739514cd..8c153f68509e 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -71,6 +71,36 @@ TRACE_EVENT(rpc_connect_status,
__entry->status)
);
+TRACE_EVENT(rpc_request,
+ TP_PROTO(const struct rpc_task *task),
+
+ TP_ARGS(task),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(int, version)
+ __field(bool, async)
+ __string(progname, task->tk_client->cl_program->name)
+ __string(procname, rpc_proc_name(task))
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = task->tk_pid;
+ __entry->client_id = task->tk_client->cl_clid;
+ __entry->version = task->tk_client->cl_vers;
+ __entry->async = RPC_IS_ASYNC(task);
+ __assign_str(progname, task->tk_client->cl_program->name)
+ __assign_str(procname, rpc_proc_name(task))
+ ),
+
+ TP_printk("task:%u@%u %sv%d %s (%ssync)",
+ __entry->task_id, __entry->client_id,
+ __get_str(progname), __entry->version,
+ __get_str(procname), __entry->async ? "a": ""
+ )
+);
+
DECLARE_EVENT_CLASS(rpc_task_running,
TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const void *action),
@@ -342,21 +372,21 @@ DECLARE_EVENT_CLASS(rpc_xprt_event,
TP_ARGS(xprt, xid, status),
TP_STRUCT__entry(
- __field(__be32, xid)
+ __field(u32, xid)
__field(int, status)
__string(addr, xprt->address_strings[RPC_DISPLAY_ADDR])
__string(port, xprt->address_strings[RPC_DISPLAY_PORT])
),
TP_fast_assign(
- __entry->xid = xid;
+ __entry->xid = be32_to_cpu(xid);
__entry->status = status;
__assign_str(addr, xprt->address_strings[RPC_DISPLAY_ADDR]);
__assign_str(port, xprt->address_strings[RPC_DISPLAY_PORT]);
),
- TP_printk("peer=[%s]:%s xid=0x%x status=%d", __get_str(addr),
- __get_str(port), be32_to_cpu(__entry->xid),
+ TP_printk("peer=[%s]:%s xid=0x%08x status=%d", __get_str(addr),
+ __get_str(port), __entry->xid,
__entry->status)
);
@@ -417,7 +447,7 @@ TRACE_EVENT(xs_tcp_data_recv,
TP_STRUCT__entry(
__string(addr, xs->xprt.address_strings[RPC_DISPLAY_ADDR])
__string(port, xs->xprt.address_strings[RPC_DISPLAY_PORT])
- __field(__be32, xid)
+ __field(u32, xid)
__field(unsigned long, flags)
__field(unsigned long, copied)
__field(unsigned int, reclen)
@@ -427,15 +457,15 @@ TRACE_EVENT(xs_tcp_data_recv,
TP_fast_assign(
__assign_str(addr, xs->xprt.address_strings[RPC_DISPLAY_ADDR]);
__assign_str(port, xs->xprt.address_strings[RPC_DISPLAY_PORT]);
- __entry->xid = xs->tcp_xid;
+ __entry->xid = be32_to_cpu(xs->tcp_xid);
__entry->flags = xs->tcp_flags;
__entry->copied = xs->tcp_copied;
__entry->reclen = xs->tcp_reclen;
__entry->offset = xs->tcp_offset;
),
- TP_printk("peer=[%s]:%s xid=0x%x flags=%s copied=%lu reclen=%u offset=%lu",
- __get_str(addr), __get_str(port), be32_to_cpu(__entry->xid),
+ TP_printk("peer=[%s]:%s xid=0x%08x flags=%s copied=%lu reclen=%u offset=%lu",
+ __get_str(addr), __get_str(port), __entry->xid,
rpc_show_sock_xprt_flags(__entry->flags),
__entry->copied, __entry->reclen, __entry->offset)
);
@@ -456,21 +486,23 @@ TRACE_EVENT(svc_recv,
TP_ARGS(rqst, status),
TP_STRUCT__entry(
- __field(struct sockaddr *, addr)
- __field(__be32, xid)
+ __field(u32, xid)
__field(int, status)
__field(unsigned long, flags)
+ __dynamic_array(unsigned char, addr, rqst->rq_addrlen)
),
TP_fast_assign(
- __entry->addr = (struct sockaddr *)&rqst->rq_addr;
- __entry->xid = status > 0 ? rqst->rq_xid : 0;
+ __entry->xid = status > 0 ? be32_to_cpu(rqst->rq_xid) : 0;
__entry->status = status;
__entry->flags = rqst->rq_flags;
+ memcpy(__get_dynamic_array(addr),
+ &rqst->rq_addr, rqst->rq_addrlen);
),
- TP_printk("addr=%pIScp xid=0x%x status=%d flags=%s", __entry->addr,
- be32_to_cpu(__entry->xid), __entry->status,
+ TP_printk("addr=%pIScp xid=0x%08x status=%d flags=%s",
+ (struct sockaddr *)__get_dynamic_array(addr),
+ __entry->xid, __entry->status,
show_rqstp_flags(__entry->flags))
);
@@ -481,21 +513,21 @@ DECLARE_EVENT_CLASS(svc_rqst_event,
TP_ARGS(rqst),
TP_STRUCT__entry(
- __field(__be32, xid)
+ __field(u32, xid)
__field(unsigned long, flags)
__dynamic_array(unsigned char, addr, rqst->rq_addrlen)
),
TP_fast_assign(
- __entry->xid = rqst->rq_xid;
+ __entry->xid = be32_to_cpu(rqst->rq_xid);
__entry->flags = rqst->rq_flags;
memcpy(__get_dynamic_array(addr),
&rqst->rq_addr, rqst->rq_addrlen);
),
- TP_printk("addr=%pIScp rq_xid=0x%x flags=%s",
+ TP_printk("addr=%pIScp rq_xid=0x%08x flags=%s",
(struct sockaddr *)__get_dynamic_array(addr),
- be32_to_cpu(__entry->xid),
+ __entry->xid,
show_rqstp_flags(__entry->flags))
);
@@ -514,22 +546,23 @@ DECLARE_EVENT_CLASS(svc_rqst_status,
TP_ARGS(rqst, status),
TP_STRUCT__entry(
- __field(struct sockaddr *, addr)
- __field(__be32, xid)
- __field(int, dropme)
+ __field(u32, xid)
__field(int, status)
__field(unsigned long, flags)
+ __dynamic_array(unsigned char, addr, rqst->rq_addrlen)
),
TP_fast_assign(
- __entry->addr = (struct sockaddr *)&rqst->rq_addr;
- __entry->xid = rqst->rq_xid;
+ __entry->xid = be32_to_cpu(rqst->rq_xid);
__entry->status = status;
__entry->flags = rqst->rq_flags;
+ memcpy(__get_dynamic_array(addr),
+ &rqst->rq_addr, rqst->rq_addrlen);
),
- TP_printk("addr=%pIScp rq_xid=0x%x status=%d flags=%s",
- __entry->addr, be32_to_cpu(__entry->xid),
+ TP_printk("addr=%pIScp rq_xid=0x%08x status=%d flags=%s",
+ (struct sockaddr *)__get_dynamic_array(addr),
+ __entry->xid,
__entry->status, show_rqstp_flags(__entry->flags))
);
@@ -678,18 +711,19 @@ DECLARE_EVENT_CLASS(svc_deferred_event,
TP_ARGS(dr),
TP_STRUCT__entry(
- __field(__be32, xid)
+ __field(u32, xid)
__dynamic_array(unsigned char, addr, dr->addrlen)
),
TP_fast_assign(
- __entry->xid = *(__be32 *)(dr->args + (dr->xprt_hlen>>2));
+ __entry->xid = be32_to_cpu(*(__be32 *)(dr->args +
+ (dr->xprt_hlen>>2)));
memcpy(__get_dynamic_array(addr), &dr->addr, dr->addrlen);
),
- TP_printk("addr=%pIScp xid=0x%x",
+ TP_printk("addr=%pIScp xid=0x%08x",
(struct sockaddr *)__get_dynamic_array(addr),
- be32_to_cpu(__entry->xid))
+ __entry->xid)
);
DEFINE_EVENT(svc_deferred_event, svc_drop_deferred,
diff --git a/include/trace/events/tcp.h b/include/trace/events/tcp.h
new file mode 100644
index 000000000000..07cccca6cbf1
--- /dev/null
+++ b/include/trace/events/tcp.h
@@ -0,0 +1,299 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM tcp
+
+#if !defined(_TRACE_TCP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_TCP_H
+
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/tracepoint.h>
+#include <net/ipv6.h>
+
+#define tcp_state_name(state) { state, #state }
+#define show_tcp_state_name(val) \
+ __print_symbolic(val, \
+ tcp_state_name(TCP_ESTABLISHED), \
+ tcp_state_name(TCP_SYN_SENT), \
+ tcp_state_name(TCP_SYN_RECV), \
+ tcp_state_name(TCP_FIN_WAIT1), \
+ tcp_state_name(TCP_FIN_WAIT2), \
+ tcp_state_name(TCP_TIME_WAIT), \
+ tcp_state_name(TCP_CLOSE), \
+ tcp_state_name(TCP_CLOSE_WAIT), \
+ tcp_state_name(TCP_LAST_ACK), \
+ tcp_state_name(TCP_LISTEN), \
+ tcp_state_name(TCP_CLOSING), \
+ tcp_state_name(TCP_NEW_SYN_RECV))
+
+/*
+ * tcp event with arguments sk and skb
+ *
+ * Note: this class requires a valid sk pointer; while skb pointer could
+ * be NULL.
+ */
+DECLARE_EVENT_CLASS(tcp_event_sk_skb,
+
+ TP_PROTO(const struct sock *sk, const struct sk_buff *skb),
+
+ TP_ARGS(sk, skb),
+
+ TP_STRUCT__entry(
+ __field(const void *, skbaddr)
+ __field(const void *, skaddr)
+ __field(__u16, sport)
+ __field(__u16, dport)
+ __array(__u8, saddr, 4)
+ __array(__u8, daddr, 4)
+ __array(__u8, saddr_v6, 16)
+ __array(__u8, daddr_v6, 16)
+ ),
+
+ TP_fast_assign(
+ struct inet_sock *inet = inet_sk(sk);
+ struct in6_addr *pin6;
+ __be32 *p32;
+
+ __entry->skbaddr = skb;
+ __entry->skaddr = sk;
+
+ __entry->sport = ntohs(inet->inet_sport);
+ __entry->dport = ntohs(inet->inet_dport);
+
+ p32 = (__be32 *) __entry->saddr;
+ *p32 = inet->inet_saddr;
+
+ p32 = (__be32 *) __entry->daddr;
+ *p32 = inet->inet_daddr;
+
+#if IS_ENABLED(CONFIG_IPV6)
+ if (sk->sk_family == AF_INET6) {
+ pin6 = (struct in6_addr *)__entry->saddr_v6;
+ *pin6 = sk->sk_v6_rcv_saddr;
+ pin6 = (struct in6_addr *)__entry->daddr_v6;
+ *pin6 = sk->sk_v6_daddr;
+ } else
+#endif
+ {
+ pin6 = (struct in6_addr *)__entry->saddr_v6;
+ ipv6_addr_set_v4mapped(inet->inet_saddr, pin6);
+ pin6 = (struct in6_addr *)__entry->daddr_v6;
+ ipv6_addr_set_v4mapped(inet->inet_daddr, pin6);
+ }
+ ),
+
+ TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c",
+ __entry->sport, __entry->dport, __entry->saddr, __entry->daddr,
+ __entry->saddr_v6, __entry->daddr_v6)
+);
+
+DEFINE_EVENT(tcp_event_sk_skb, tcp_retransmit_skb,
+
+ TP_PROTO(const struct sock *sk, const struct sk_buff *skb),
+
+ TP_ARGS(sk, skb)
+);
+
+/*
+ * skb of trace_tcp_send_reset is the skb that caused RST. In case of
+ * active reset, skb should be NULL
+ */
+DEFINE_EVENT(tcp_event_sk_skb, tcp_send_reset,
+
+ TP_PROTO(const struct sock *sk, const struct sk_buff *skb),
+
+ TP_ARGS(sk, skb)
+);
+
+/*
+ * tcp event with arguments sk
+ *
+ * Note: this class requires a valid sk pointer.
+ */
+DECLARE_EVENT_CLASS(tcp_event_sk,
+
+ TP_PROTO(const struct sock *sk),
+
+ TP_ARGS(sk),
+
+ TP_STRUCT__entry(
+ __field(const void *, skaddr)
+ __field(__u16, sport)
+ __field(__u16, dport)
+ __array(__u8, saddr, 4)
+ __array(__u8, daddr, 4)
+ __array(__u8, saddr_v6, 16)
+ __array(__u8, daddr_v6, 16)
+ ),
+
+ TP_fast_assign(
+ struct inet_sock *inet = inet_sk(sk);
+ struct in6_addr *pin6;
+ __be32 *p32;
+
+ __entry->skaddr = sk;
+
+ __entry->sport = ntohs(inet->inet_sport);
+ __entry->dport = ntohs(inet->inet_dport);
+
+ p32 = (__be32 *) __entry->saddr;
+ *p32 = inet->inet_saddr;
+
+ p32 = (__be32 *) __entry->daddr;
+ *p32 = inet->inet_daddr;
+
+#if IS_ENABLED(CONFIG_IPV6)
+ if (sk->sk_family == AF_INET6) {
+ pin6 = (struct in6_addr *)__entry->saddr_v6;
+ *pin6 = sk->sk_v6_rcv_saddr;
+ pin6 = (struct in6_addr *)__entry->daddr_v6;
+ *pin6 = sk->sk_v6_daddr;
+ } else
+#endif
+ {
+ pin6 = (struct in6_addr *)__entry->saddr_v6;
+ ipv6_addr_set_v4mapped(inet->inet_saddr, pin6);
+ pin6 = (struct in6_addr *)__entry->daddr_v6;
+ ipv6_addr_set_v4mapped(inet->inet_daddr, pin6);
+ }
+ ),
+
+ TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c",
+ __entry->sport, __entry->dport,
+ __entry->saddr, __entry->daddr,
+ __entry->saddr_v6, __entry->daddr_v6)
+);
+
+DEFINE_EVENT(tcp_event_sk, tcp_receive_reset,
+
+ TP_PROTO(const struct sock *sk),
+
+ TP_ARGS(sk)
+);
+
+DEFINE_EVENT(tcp_event_sk, tcp_destroy_sock,
+
+ TP_PROTO(const struct sock *sk),
+
+ TP_ARGS(sk)
+);
+
+TRACE_EVENT(tcp_set_state,
+
+ TP_PROTO(const struct sock *sk, const int oldstate, const int newstate),
+
+ TP_ARGS(sk, oldstate, newstate),
+
+ TP_STRUCT__entry(
+ __field(const void *, skaddr)
+ __field(int, oldstate)
+ __field(int, newstate)
+ __field(__u16, sport)
+ __field(__u16, dport)
+ __array(__u8, saddr, 4)
+ __array(__u8, daddr, 4)
+ __array(__u8, saddr_v6, 16)
+ __array(__u8, daddr_v6, 16)
+ ),
+
+ TP_fast_assign(
+ struct inet_sock *inet = inet_sk(sk);
+ struct in6_addr *pin6;
+ __be32 *p32;
+
+ __entry->skaddr = sk;
+ __entry->oldstate = oldstate;
+ __entry->newstate = newstate;
+
+ __entry->sport = ntohs(inet->inet_sport);
+ __entry->dport = ntohs(inet->inet_dport);
+
+ p32 = (__be32 *) __entry->saddr;
+ *p32 = inet->inet_saddr;
+
+ p32 = (__be32 *) __entry->daddr;
+ *p32 = inet->inet_daddr;
+
+#if IS_ENABLED(CONFIG_IPV6)
+ if (sk->sk_family == AF_INET6) {
+ pin6 = (struct in6_addr *)__entry->saddr_v6;
+ *pin6 = sk->sk_v6_rcv_saddr;
+ pin6 = (struct in6_addr *)__entry->daddr_v6;
+ *pin6 = sk->sk_v6_daddr;
+ } else
+#endif
+ {
+ pin6 = (struct in6_addr *)__entry->saddr_v6;
+ ipv6_addr_set_v4mapped(inet->inet_saddr, pin6);
+ pin6 = (struct in6_addr *)__entry->daddr_v6;
+ ipv6_addr_set_v4mapped(inet->inet_daddr, pin6);
+ }
+ ),
+
+ TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c oldstate=%s newstate=%s",
+ __entry->sport, __entry->dport,
+ __entry->saddr, __entry->daddr,
+ __entry->saddr_v6, __entry->daddr_v6,
+ show_tcp_state_name(__entry->oldstate),
+ show_tcp_state_name(__entry->newstate))
+);
+
+TRACE_EVENT(tcp_retransmit_synack,
+
+ TP_PROTO(const struct sock *sk, const struct request_sock *req),
+
+ TP_ARGS(sk, req),
+
+ TP_STRUCT__entry(
+ __field(const void *, skaddr)
+ __field(const void *, req)
+ __field(__u16, sport)
+ __field(__u16, dport)
+ __array(__u8, saddr, 4)
+ __array(__u8, daddr, 4)
+ __array(__u8, saddr_v6, 16)
+ __array(__u8, daddr_v6, 16)
+ ),
+
+ TP_fast_assign(
+ struct inet_request_sock *ireq = inet_rsk(req);
+ struct in6_addr *pin6;
+ __be32 *p32;
+
+ __entry->skaddr = sk;
+ __entry->req = req;
+
+ __entry->sport = ireq->ir_num;
+ __entry->dport = ntohs(ireq->ir_rmt_port);
+
+ p32 = (__be32 *) __entry->saddr;
+ *p32 = ireq->ir_loc_addr;
+
+ p32 = (__be32 *) __entry->daddr;
+ *p32 = ireq->ir_rmt_addr;
+
+#if IS_ENABLED(CONFIG_IPV6)
+ if (sk->sk_family == AF_INET6) {
+ pin6 = (struct in6_addr *)__entry->saddr_v6;
+ *pin6 = ireq->ir_v6_loc_addr;
+ pin6 = (struct in6_addr *)__entry->daddr_v6;
+ *pin6 = ireq->ir_v6_rmt_addr;
+ } else
+#endif
+ {
+ pin6 = (struct in6_addr *)__entry->saddr_v6;
+ ipv6_addr_set_v4mapped(ireq->ir_loc_addr, pin6);
+ pin6 = (struct in6_addr *)__entry->daddr_v6;
+ ipv6_addr_set_v4mapped(ireq->ir_rmt_addr, pin6);
+ }
+ ),
+
+ TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c",
+ __entry->sport, __entry->dport,
+ __entry->saddr, __entry->daddr,
+ __entry->saddr_v6, __entry->daddr_v6)
+);
+
+#endif /* _TRACE_TCP_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/thermal.h b/include/trace/events/thermal.h
index 466c09d882ad..78946640fe03 100644
--- a/include/trace/events/thermal.h
+++ b/include/trace/events/thermal.h
@@ -91,6 +91,7 @@ TRACE_EVENT(thermal_zone_trip,
show_tzt_type(__entry->trip_type))
);
+#ifdef CONFIG_CPU_THERMAL
TRACE_EVENT(thermal_power_cpu_get_power,
TP_PROTO(const struct cpumask *cpus, unsigned long freq, u32 *load,
size_t load_len, u32 dynamic_power, u32 static_power),
@@ -148,7 +149,9 @@ TRACE_EVENT(thermal_power_cpu_limit,
__get_bitmask(cpumask), __entry->freq, __entry->cdev_state,
__entry->power)
);
+#endif /* CONFIG_CPU_THERMAL */
+#ifdef CONFIG_DEVFREQ_THERMAL
TRACE_EVENT(thermal_power_devfreq_get_power,
TP_PROTO(struct thermal_cooling_device *cdev,
struct devfreq_dev_status *status, unsigned long freq,
@@ -204,6 +207,7 @@ TRACE_EVENT(thermal_power_devfreq_limit,
__get_str(type), __entry->freq, __entry->cdev_state,
__entry->power)
);
+#endif /* CONFIG_DEVFREQ_THERMAL */
#endif /* _TRACE_THERMAL_H */
/* This part must be outside protection */
diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
index dc23cf032403..d70b53e65f43 100644
--- a/include/trace/events/vmscan.h
+++ b/include/trace/events/vmscan.h
@@ -134,6 +134,7 @@ DEFINE_EVENT(mm_vmscan_direct_reclaim_begin_template, mm_vmscan_direct_reclaim_b
TP_ARGS(order, may_writepage, gfp_flags, classzone_idx)
);
+#ifdef CONFIG_MEMCG
DEFINE_EVENT(mm_vmscan_direct_reclaim_begin_template, mm_vmscan_memcg_reclaim_begin,
TP_PROTO(int order, int may_writepage, gfp_t gfp_flags, int classzone_idx),
@@ -147,6 +148,7 @@ DEFINE_EVENT(mm_vmscan_direct_reclaim_begin_template, mm_vmscan_memcg_softlimit_
TP_ARGS(order, may_writepage, gfp_flags, classzone_idx)
);
+#endif /* CONFIG_MEMCG */
DECLARE_EVENT_CLASS(mm_vmscan_direct_reclaim_end_template,
@@ -172,6 +174,7 @@ DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_direct_reclaim_end
TP_ARGS(nr_reclaimed)
);
+#ifdef CONFIG_MEMCG
DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_memcg_reclaim_end,
TP_PROTO(unsigned long nr_reclaimed),
@@ -185,6 +188,7 @@ DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_memcg_softlimit_re
TP_ARGS(nr_reclaimed)
);
+#endif /* CONFIG_MEMCG */
TRACE_EVENT(mm_shrink_slab_start,
TP_PROTO(struct shrinker *shr, struct shrink_control *sc,
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index 2e1fa7910306..32db72c7c055 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -287,7 +287,6 @@ DEFINE_EVENT(writeback_class, name, \
TP_PROTO(struct bdi_writeback *wb), \
TP_ARGS(wb))
-DEFINE_WRITEBACK_EVENT(writeback_nowork);
DEFINE_WRITEBACK_EVENT(writeback_wake_background);
TRACE_EVENT(writeback_bdi_register,
diff --git a/include/trace/events/xdp.h b/include/trace/events/xdp.h
index 810e94160c12..4cd0f05d0113 100644
--- a/include/trace/events/xdp.h
+++ b/include/trace/events/xdp.h
@@ -137,14 +137,90 @@ DEFINE_EVENT_PRINT(xdp_redirect_template, xdp_redirect_map_err,
__entry->map_id, __entry->map_index)
);
+#define devmap_ifindex(fwd, map) \
+ (!fwd ? 0 : \
+ (!map ? 0 : \
+ ((map->map_type == BPF_MAP_TYPE_DEVMAP) ? \
+ ((struct net_device *)fwd)->ifindex : 0)))
+
#define _trace_xdp_redirect_map(dev, xdp, fwd, map, idx) \
- trace_xdp_redirect_map(dev, xdp, fwd ? fwd->ifindex : 0, \
+ trace_xdp_redirect_map(dev, xdp, devmap_ifindex(fwd, map), \
0, map, idx)
#define _trace_xdp_redirect_map_err(dev, xdp, fwd, map, idx, err) \
- trace_xdp_redirect_map_err(dev, xdp, fwd ? fwd->ifindex : 0, \
+ trace_xdp_redirect_map_err(dev, xdp, devmap_ifindex(fwd, map), \
err, map, idx)
+TRACE_EVENT(xdp_cpumap_kthread,
+
+ TP_PROTO(int map_id, unsigned int processed, unsigned int drops,
+ int sched),
+
+ TP_ARGS(map_id, processed, drops, sched),
+
+ TP_STRUCT__entry(
+ __field(int, map_id)
+ __field(u32, act)
+ __field(int, cpu)
+ __field(unsigned int, drops)
+ __field(unsigned int, processed)
+ __field(int, sched)
+ ),
+
+ TP_fast_assign(
+ __entry->map_id = map_id;
+ __entry->act = XDP_REDIRECT;
+ __entry->cpu = smp_processor_id();
+ __entry->drops = drops;
+ __entry->processed = processed;
+ __entry->sched = sched;
+ ),
+
+ TP_printk("kthread"
+ " cpu=%d map_id=%d action=%s"
+ " processed=%u drops=%u"
+ " sched=%d",
+ __entry->cpu, __entry->map_id,
+ __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
+ __entry->processed, __entry->drops,
+ __entry->sched)
+);
+
+TRACE_EVENT(xdp_cpumap_enqueue,
+
+ TP_PROTO(int map_id, unsigned int processed, unsigned int drops,
+ int to_cpu),
+
+ TP_ARGS(map_id, processed, drops, to_cpu),
+
+ TP_STRUCT__entry(
+ __field(int, map_id)
+ __field(u32, act)
+ __field(int, cpu)
+ __field(unsigned int, drops)
+ __field(unsigned int, processed)
+ __field(int, to_cpu)
+ ),
+
+ TP_fast_assign(
+ __entry->map_id = map_id;
+ __entry->act = XDP_REDIRECT;
+ __entry->cpu = smp_processor_id();
+ __entry->drops = drops;
+ __entry->processed = processed;
+ __entry->to_cpu = to_cpu;
+ ),
+
+ TP_printk("enqueue"
+ " cpu=%d map_id=%d action=%s"
+ " processed=%u drops=%u"
+ " to_cpu=%d",
+ __entry->cpu, __entry->map_id,
+ __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
+ __entry->processed, __entry->drops,
+ __entry->to_cpu)
+);
+
#endif /* _TRACE_XDP_H */
#include <trace/define_trace.h>
diff --git a/include/trace/events/xen.h b/include/trace/events/xen.h
index a7c8b452aab9..b8adf05c534e 100644
--- a/include/trace/events/xen.h
+++ b/include/trace/events/xen.h
@@ -148,7 +148,6 @@ DECLARE_EVENT_CLASS(xen_mmu__set_pte,
TP_ARGS(ptep, pteval))
DEFINE_XEN_MMU_SET_PTE(xen_mmu_set_pte);
-DEFINE_XEN_MMU_SET_PTE(xen_mmu_set_pte_atomic);
TRACE_EVENT(xen_mmu_set_pte_at,
TP_PROTO(struct mm_struct *mm, unsigned long addr,
@@ -170,21 +169,6 @@ TRACE_EVENT(xen_mmu_set_pte_at,
(int)sizeof(pteval_t) * 2, (unsigned long long)__entry->pteval)
);
-TRACE_EVENT(xen_mmu_pte_clear,
- TP_PROTO(struct mm_struct *mm, unsigned long addr, pte_t *ptep),
- TP_ARGS(mm, addr, ptep),
- TP_STRUCT__entry(
- __field(struct mm_struct *, mm)
- __field(unsigned long, addr)
- __field(pte_t *, ptep)
- ),
- TP_fast_assign(__entry->mm = mm;
- __entry->addr = addr;
- __entry->ptep = ptep),
- TP_printk("mm %p addr %lx ptep %p",
- __entry->mm, __entry->addr, __entry->ptep)
- );
-
TRACE_DEFINE_SIZEOF(pmdval_t);
TRACE_EVENT(xen_mmu_set_pmd,
@@ -202,6 +186,24 @@ TRACE_EVENT(xen_mmu_set_pmd,
(int)sizeof(pmdval_t) * 2, (unsigned long long)__entry->pmdval)
);
+#ifdef CONFIG_X86_PAE
+DEFINE_XEN_MMU_SET_PTE(xen_mmu_set_pte_atomic);
+
+TRACE_EVENT(xen_mmu_pte_clear,
+ TP_PROTO(struct mm_struct *mm, unsigned long addr, pte_t *ptep),
+ TP_ARGS(mm, addr, ptep),
+ TP_STRUCT__entry(
+ __field(struct mm_struct *, mm)
+ __field(unsigned long, addr)
+ __field(pte_t *, ptep)
+ ),
+ TP_fast_assign(__entry->mm = mm;
+ __entry->addr = addr;
+ __entry->ptep = ptep),
+ TP_printk("mm %p addr %lx ptep %p",
+ __entry->mm, __entry->addr, __entry->ptep)
+ );
+
TRACE_EVENT(xen_mmu_pmd_clear,
TP_PROTO(pmd_t *pmdp),
TP_ARGS(pmdp),
@@ -211,6 +213,7 @@ TRACE_EVENT(xen_mmu_pmd_clear,
TP_fast_assign(__entry->pmdp = pmdp),
TP_printk("pmdp %p", __entry->pmdp)
);
+#endif
#if CONFIG_PGTABLE_LEVELS >= 4