summaryrefslogtreecommitdiff
path: root/include/trace
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-12-17 12:15:03 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2020-12-17 12:15:03 -0800
commit74f602dc96dd854c7b2034947798c1e2a6b84066 (patch)
tree3fa9f58c0e36e2caa7478d4d47bb0263e267f1ac /include/trace
parentbe695ee29e8fc0af266d9f1882868c47da01a790 (diff)
parent52104f274e2d7f134d34bab11cada8913d4544e2 (diff)
Merge tag 'nfs-for-5.11-1' of git://git.linux-nfs.org/projects/trondmy/linux-nfs
Pull NFS client updates from Trond Myklebust: "Highlights include: Features: - NFSv3: Add emulation of lookupp() to improve open_by_filehandle() support - A series of patches to improve readdir performance, particularly with large directories - Basic support for using NFS/RDMA with the pNFS files and flexfiles drivers - Micro-optimisations for RDMA - RDMA tracing improvements Bugfixes: - Fix a long standing bug with xs_read_xdr_buf() when receiving partial pages (Dan Aloni) - Various fixes for getxattr and listxattr, when used over non-TCP transports - Fixes for containerised NFS from Sargun Dhillon - switch nfsiod to be an UNBOUND workqueue (Neil Brown) - READDIR should not ask for security label information if there is no LSM policy (Olga Kornievskaia) - Avoid using interval-based rebinding with TCP in lockd (Calum Mackay) - A series of RPC and NFS layer fixes to support the NFSv4.2 READ_PLUS code - A couple of fixes for pnfs/flexfiles read failover Cleanups: - Various cleanups for the SUNRPC xdr code in conjunction with the READ_PLUS fixes" * tag 'nfs-for-5.11-1' of git://git.linux-nfs.org/projects/trondmy/linux-nfs: (90 commits) NFS/pNFS: Fix a typo in ff_layout_resend_pnfs_read() pNFS/flexfiles: Avoid spurious layout returns in ff_layout_choose_ds_for_read NFSv4/pnfs: Add tracing for the deviceid cache fs/lockd: convert comma to semicolon NFSv4.2: fix error return on memory allocation failure NFSv4.2/pnfs: Don't use READ_PLUS with pNFS yet NFSv4.2: Deal with potential READ_PLUS data extent buffer overflow NFSv4.2: Don't error when exiting early on a READ_PLUS buffer overflow NFSv4.2: Handle hole lengths that exceed the READ_PLUS read buffer NFSv4.2: decode_read_plus_hole() needs to check the extent offset NFSv4.2: decode_read_plus_data() must skip padding after data segment NFSv4.2: Ensure we always reset the result->count in decode_read_plus() SUNRPC: When expanding the buffer, we may need grow the sparse pages SUNRPC: Cleanup - constify a number of xdr_buf helpers SUNRPC: Clean up open coded setting of the xdr_stream 'nwords' field SUNRPC: _copy_to/from_pages() now check for zero length SUNRPC: Cleanup xdr_shrink_bufhead() SUNRPC: Fix xdr_expand_hole() SUNRPC: Fixes for xdr_align_data() SUNRPC: _shift_data_left/right_pages should check the shift length ...
Diffstat (limited to 'include/trace')
-rw-r--r--include/trace/events/rpcrdma.h450
1 files changed, 212 insertions, 238 deletions
diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h
index 896aafc37b09..76e85e16854b 100644
--- a/include/trace/events/rpcrdma.h
+++ b/include/trace/events/rpcrdma.h
@@ -60,7 +60,7 @@ DECLARE_EVENT_CLASS(rpcrdma_completion_class,
), \
TP_ARGS(wc, cid))
-DECLARE_EVENT_CLASS(xprtrdma_reply_event,
+DECLARE_EVENT_CLASS(xprtrdma_reply_class,
TP_PROTO(
const struct rpcrdma_rep *rep
),
@@ -68,29 +68,30 @@ DECLARE_EVENT_CLASS(xprtrdma_reply_event,
TP_ARGS(rep),
TP_STRUCT__entry(
- __field(const void *, rep)
- __field(const void *, r_xprt)
__field(u32, xid)
__field(u32, version)
__field(u32, proc)
+ __string(addr, rpcrdma_addrstr(rep->rr_rxprt))
+ __string(port, rpcrdma_portstr(rep->rr_rxprt))
),
TP_fast_assign(
- __entry->rep = rep;
- __entry->r_xprt = rep->rr_rxprt;
__entry->xid = be32_to_cpu(rep->rr_xid);
__entry->version = be32_to_cpu(rep->rr_vers);
__entry->proc = be32_to_cpu(rep->rr_proc);
+ __assign_str(addr, rpcrdma_addrstr(rep->rr_rxprt));
+ __assign_str(port, rpcrdma_portstr(rep->rr_rxprt));
),
- TP_printk("rxprt %p xid=0x%08x rep=%p: version %u proc %u",
- __entry->r_xprt, __entry->xid, __entry->rep,
- __entry->version, __entry->proc
+ TP_printk("peer=[%s]:%s xid=0x%08x version=%u proc=%u",
+ __get_str(addr), __get_str(port),
+ __entry->xid, __entry->version, __entry->proc
)
);
#define DEFINE_REPLY_EVENT(name) \
- DEFINE_EVENT(xprtrdma_reply_event, name, \
+ DEFINE_EVENT(xprtrdma_reply_class, \
+ xprtrdma_reply_##name##_err, \
TP_PROTO( \
const struct rpcrdma_rep *rep \
), \
@@ -261,54 +262,67 @@ DECLARE_EVENT_CLASS(xprtrdma_wrch_event,
), \
TP_ARGS(task, mr, nsegs))
-DECLARE_EVENT_CLASS(xprtrdma_frwr_done,
+TRACE_DEFINE_ENUM(DMA_BIDIRECTIONAL);
+TRACE_DEFINE_ENUM(DMA_TO_DEVICE);
+TRACE_DEFINE_ENUM(DMA_FROM_DEVICE);
+TRACE_DEFINE_ENUM(DMA_NONE);
+
+#define xprtrdma_show_direction(x) \
+ __print_symbolic(x, \
+ { DMA_BIDIRECTIONAL, "BIDIR" }, \
+ { DMA_TO_DEVICE, "TO_DEVICE" }, \
+ { DMA_FROM_DEVICE, "FROM_DEVICE" }, \
+ { DMA_NONE, "NONE" })
+
+DECLARE_EVENT_CLASS(xprtrdma_mr_class,
TP_PROTO(
- const struct ib_wc *wc,
- const struct rpcrdma_frwr *frwr
+ const struct rpcrdma_mr *mr
),
- TP_ARGS(wc, frwr),
+ TP_ARGS(mr),
TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
__field(u32, mr_id)
- __field(unsigned int, status)
- __field(unsigned int, vendor_err)
+ __field(int, nents)
+ __field(u32, handle)
+ __field(u32, length)
+ __field(u64, offset)
+ __field(u32, dir)
),
TP_fast_assign(
- __entry->mr_id = frwr->fr_mr->res.id;
- __entry->status = wc->status;
- __entry->vendor_err = __entry->status ? wc->vendor_err : 0;
+ const struct rpcrdma_req *req = mr->mr_req;
+ const struct rpc_task *task = req->rl_slot.rq_task;
+
+ __entry->task_id = task->tk_pid;
+ __entry->client_id = task->tk_client->cl_clid;
+ __entry->mr_id = mr->frwr.fr_mr->res.id;
+ __entry->nents = mr->mr_nents;
+ __entry->handle = mr->mr_handle;
+ __entry->length = mr->mr_length;
+ __entry->offset = mr->mr_offset;
+ __entry->dir = mr->mr_dir;
),
- TP_printk(
- "mr.id=%u: %s (%u/0x%x)",
- __entry->mr_id, rdma_show_wc_status(__entry->status),
- __entry->status, __entry->vendor_err
+ TP_printk("task:%u@%u mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s)",
+ __entry->task_id, __entry->client_id,
+ __entry->mr_id, __entry->nents, __entry->length,
+ (unsigned long long)__entry->offset, __entry->handle,
+ xprtrdma_show_direction(__entry->dir)
)
);
-#define DEFINE_FRWR_DONE_EVENT(name) \
- DEFINE_EVENT(xprtrdma_frwr_done, name, \
+#define DEFINE_MR_EVENT(name) \
+ DEFINE_EVENT(xprtrdma_mr_class, \
+ xprtrdma_mr_##name, \
TP_PROTO( \
- const struct ib_wc *wc, \
- const struct rpcrdma_frwr *frwr \
+ const struct rpcrdma_mr *mr \
), \
- TP_ARGS(wc, frwr))
-
-TRACE_DEFINE_ENUM(DMA_BIDIRECTIONAL);
-TRACE_DEFINE_ENUM(DMA_TO_DEVICE);
-TRACE_DEFINE_ENUM(DMA_FROM_DEVICE);
-TRACE_DEFINE_ENUM(DMA_NONE);
-
-#define xprtrdma_show_direction(x) \
- __print_symbolic(x, \
- { DMA_BIDIRECTIONAL, "BIDIR" }, \
- { DMA_TO_DEVICE, "TO_DEVICE" }, \
- { DMA_FROM_DEVICE, "FROM_DEVICE" }, \
- { DMA_NONE, "NONE" })
+ TP_ARGS(mr))
-DECLARE_EVENT_CLASS(xprtrdma_mr,
+DECLARE_EVENT_CLASS(xprtrdma_anonymous_mr_class,
TP_PROTO(
const struct rpcrdma_mr *mr
),
@@ -340,45 +354,47 @@ DECLARE_EVENT_CLASS(xprtrdma_mr,
)
);
-#define DEFINE_MR_EVENT(name) \
- DEFINE_EVENT(xprtrdma_mr, xprtrdma_mr_##name, \
- TP_PROTO( \
- const struct rpcrdma_mr *mr \
- ), \
+#define DEFINE_ANON_MR_EVENT(name) \
+ DEFINE_EVENT(xprtrdma_anonymous_mr_class, \
+ xprtrdma_mr_##name, \
+ TP_PROTO( \
+ const struct rpcrdma_mr *mr \
+ ), \
TP_ARGS(mr))
-DECLARE_EVENT_CLASS(xprtrdma_cb_event,
+DECLARE_EVENT_CLASS(xprtrdma_callback_class,
TP_PROTO(
+ const struct rpcrdma_xprt *r_xprt,
const struct rpc_rqst *rqst
),
- TP_ARGS(rqst),
+ TP_ARGS(r_xprt, rqst),
TP_STRUCT__entry(
- __field(const void *, rqst)
- __field(const void *, rep)
- __field(const void *, req)
__field(u32, xid)
+ __string(addr, rpcrdma_addrstr(r_xprt))
+ __string(port, rpcrdma_portstr(r_xprt))
),
TP_fast_assign(
- __entry->rqst = rqst;
- __entry->req = rpcr_to_rdmar(rqst);
- __entry->rep = rpcr_to_rdmar(rqst)->rl_reply;
__entry->xid = be32_to_cpu(rqst->rq_xid);
+ __assign_str(addr, rpcrdma_addrstr(r_xprt));
+ __assign_str(port, rpcrdma_portstr(r_xprt));
),
- TP_printk("xid=0x%08x, rqst=%p req=%p rep=%p",
- __entry->xid, __entry->rqst, __entry->req, __entry->rep
+ TP_printk("peer=[%s]:%s xid=0x%08x",
+ __get_str(addr), __get_str(port), __entry->xid
)
);
-#define DEFINE_CB_EVENT(name) \
- DEFINE_EVENT(xprtrdma_cb_event, name, \
+#define DEFINE_CALLBACK_EVENT(name) \
+ DEFINE_EVENT(xprtrdma_callback_class, \
+ xprtrdma_cb_##name, \
TP_PROTO( \
+ const struct rpcrdma_xprt *r_xprt, \
const struct rpc_rqst *rqst \
), \
- TP_ARGS(rqst))
+ TP_ARGS(r_xprt, rqst))
/**
** Connection events
@@ -549,61 +565,33 @@ TRACE_EVENT(xprtrdma_createmrs,
)
);
-TRACE_EVENT(xprtrdma_mr_get,
- TP_PROTO(
- const struct rpcrdma_req *req
- ),
-
- TP_ARGS(req),
-
- TP_STRUCT__entry(
- __field(const void *, req)
- __field(unsigned int, task_id)
- __field(unsigned int, client_id)
- __field(u32, xid)
- ),
-
- TP_fast_assign(
- const struct rpc_rqst *rqst = &req->rl_slot;
-
- __entry->req = req;
- __entry->task_id = rqst->rq_task->tk_pid;
- __entry->client_id = rqst->rq_task->tk_client->cl_clid;
- __entry->xid = be32_to_cpu(rqst->rq_xid);
- ),
-
- TP_printk("task:%u@%u xid=0x%08x req=%p",
- __entry->task_id, __entry->client_id, __entry->xid,
- __entry->req
- )
-);
-
-TRACE_EVENT(xprtrdma_nomrs,
+TRACE_EVENT(xprtrdma_nomrs_err,
TP_PROTO(
+ const struct rpcrdma_xprt *r_xprt,
const struct rpcrdma_req *req
),
- TP_ARGS(req),
+ TP_ARGS(r_xprt, req),
TP_STRUCT__entry(
- __field(const void *, req)
__field(unsigned int, task_id)
__field(unsigned int, client_id)
- __field(u32, xid)
+ __string(addr, rpcrdma_addrstr(r_xprt))
+ __string(port, rpcrdma_portstr(r_xprt))
),
TP_fast_assign(
const struct rpc_rqst *rqst = &req->rl_slot;
- __entry->req = req;
__entry->task_id = rqst->rq_task->tk_pid;
__entry->client_id = rqst->rq_task->tk_client->cl_clid;
- __entry->xid = be32_to_cpu(rqst->rq_xid);
+ __assign_str(addr, rpcrdma_addrstr(r_xprt));
+ __assign_str(port, rpcrdma_portstr(r_xprt));
),
- TP_printk("task:%u@%u xid=0x%08x req=%p",
- __entry->task_id, __entry->client_id, __entry->xid,
- __entry->req
+ TP_printk("peer=[%s]:%s task:%u@%u",
+ __get_str(addr), __get_str(port),
+ __entry->task_id, __entry->client_id
)
);
@@ -735,8 +723,8 @@ TRACE_EVENT(xprtrdma_post_send,
TP_ARGS(req),
TP_STRUCT__entry(
- __field(const void *, req)
- __field(const void *, sc)
+ __field(u32, cq_id)
+ __field(int, completion_id)
__field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(int, num_sge)
@@ -745,20 +733,21 @@ TRACE_EVENT(xprtrdma_post_send,
TP_fast_assign(
const struct rpc_rqst *rqst = &req->rl_slot;
+ const struct rpcrdma_sendctx *sc = req->rl_sendctx;
+ __entry->cq_id = sc->sc_cid.ci_queue_id;
+ __entry->completion_id = sc->sc_cid.ci_completion_id;
__entry->task_id = rqst->rq_task->tk_pid;
__entry->client_id = rqst->rq_task->tk_client ?
rqst->rq_task->tk_client->cl_clid : -1;
- __entry->req = req;
- __entry->sc = req->rl_sendctx;
__entry->num_sge = req->rl_wr.num_sge;
__entry->signaled = req->rl_wr.send_flags & IB_SEND_SIGNALED;
),
- TP_printk("task:%u@%u req=%p sc=%p (%d SGE%s) %s",
+ TP_printk("task:%u@%u cq.id=%u cid=%d (%d SGE%s) %s",
__entry->task_id, __entry->client_id,
- __entry->req, __entry->sc, __entry->num_sge,
- (__entry->num_sge == 1 ? "" : "s"),
+ __entry->cq_id, __entry->completion_id,
+ __entry->num_sge, (__entry->num_sge == 1 ? "" : "s"),
(__entry->signaled ? "signaled" : "")
)
);
@@ -771,15 +760,17 @@ TRACE_EVENT(xprtrdma_post_recv,
TP_ARGS(rep),
TP_STRUCT__entry(
- __field(const void *, rep)
+ __field(u32, cq_id)
+ __field(int, completion_id)
),
TP_fast_assign(
- __entry->rep = rep;
+ __entry->cq_id = rep->rr_cid.ci_queue_id;
+ __entry->completion_id = rep->rr_cid.ci_completion_id;
),
- TP_printk("rep=%p",
- __entry->rep
+ TP_printk("cq.id=%d cid=%d",
+ __entry->cq_id, __entry->completion_id
)
);
@@ -816,7 +807,7 @@ TRACE_EVENT(xprtrdma_post_recvs,
)
);
-TRACE_EVENT(xprtrdma_post_linv,
+TRACE_EVENT(xprtrdma_post_linv_err,
TP_PROTO(
const struct rpcrdma_req *req,
int status
@@ -825,19 +816,21 @@ TRACE_EVENT(xprtrdma_post_linv,
TP_ARGS(req, status),
TP_STRUCT__entry(
- __field(const void *, req)
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
__field(int, status)
- __field(u32, xid)
),
TP_fast_assign(
- __entry->req = req;
+ const struct rpc_task *task = req->rl_slot.rq_task;
+
+ __entry->task_id = task->tk_pid;
+ __entry->client_id = task->tk_client->cl_clid;
__entry->status = status;
- __entry->xid = be32_to_cpu(req->rl_slot.rq_xid);
),
- TP_printk("req=%p xid=0x%08x status=%d",
- __entry->req, __entry->xid, __entry->status
+ TP_printk("task:%u@%u status=%d",
+ __entry->task_id, __entry->client_id, __entry->status
)
);
@@ -845,75 +838,12 @@ TRACE_EVENT(xprtrdma_post_linv,
** Completion events
**/
-TRACE_EVENT(xprtrdma_wc_send,
- TP_PROTO(
- const struct rpcrdma_sendctx *sc,
- const struct ib_wc *wc
- ),
-
- TP_ARGS(sc, wc),
-
- TP_STRUCT__entry(
- __field(const void *, req)
- __field(const void *, sc)
- __field(unsigned int, unmap_count)
- __field(unsigned int, status)
- __field(unsigned int, vendor_err)
- ),
-
- TP_fast_assign(
- __entry->req = sc->sc_req;
- __entry->sc = sc;
- __entry->unmap_count = sc->sc_unmap_count;
- __entry->status = wc->status;
- __entry->vendor_err = __entry->status ? wc->vendor_err : 0;
- ),
-
- TP_printk("req=%p sc=%p unmapped=%u: %s (%u/0x%x)",
- __entry->req, __entry->sc, __entry->unmap_count,
- rdma_show_wc_status(__entry->status),
- __entry->status, __entry->vendor_err
- )
-);
-
-TRACE_EVENT(xprtrdma_wc_receive,
- TP_PROTO(
- const struct ib_wc *wc
- ),
-
- TP_ARGS(wc),
-
- TP_STRUCT__entry(
- __field(const void *, rep)
- __field(u32, byte_len)
- __field(unsigned int, status)
- __field(u32, vendor_err)
- ),
-
- TP_fast_assign(
- __entry->rep = container_of(wc->wr_cqe, struct rpcrdma_rep,
- rr_cqe);
- __entry->status = wc->status;
- if (wc->status) {
- __entry->byte_len = 0;
- __entry->vendor_err = wc->vendor_err;
- } else {
- __entry->byte_len = wc->byte_len;
- __entry->vendor_err = 0;
- }
- ),
-
- TP_printk("rep=%p %u bytes: %s (%u/0x%x)",
- __entry->rep, __entry->byte_len,
- rdma_show_wc_status(__entry->status),
- __entry->status, __entry->vendor_err
- )
-);
-
-DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_fastreg);
-DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li);
-DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_wake);
-DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_done);
+DEFINE_COMPLETION_EVENT(xprtrdma_wc_receive);
+DEFINE_COMPLETION_EVENT(xprtrdma_wc_send);
+DEFINE_COMPLETION_EVENT(xprtrdma_wc_fastreg);
+DEFINE_COMPLETION_EVENT(xprtrdma_wc_li);
+DEFINE_COMPLETION_EVENT(xprtrdma_wc_li_wake);
+DEFINE_COMPLETION_EVENT(xprtrdma_wc_li_done);
TRACE_EVENT(xprtrdma_frwr_alloc,
TP_PROTO(
@@ -1036,9 +966,9 @@ TRACE_EVENT(xprtrdma_frwr_maperr,
DEFINE_MR_EVENT(localinv);
DEFINE_MR_EVENT(map);
-DEFINE_MR_EVENT(unmap);
-DEFINE_MR_EVENT(reminv);
-DEFINE_MR_EVENT(recycle);
+
+DEFINE_ANON_MR_EVENT(unmap);
+DEFINE_ANON_MR_EVENT(recycle);
TRACE_EVENT(xprtrdma_dma_maperr,
TP_PROTO(
@@ -1066,17 +996,14 @@ TRACE_EVENT(xprtrdma_reply,
TP_PROTO(
const struct rpc_task *task,
const struct rpcrdma_rep *rep,
- const struct rpcrdma_req *req,
unsigned int credits
),
- TP_ARGS(task, rep, req, credits),
+ TP_ARGS(task, rep, credits),
TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
- __field(const void *, rep)
- __field(const void *, req)
__field(u32, xid)
__field(unsigned int, credits)
),
@@ -1084,49 +1011,102 @@ TRACE_EVENT(xprtrdma_reply,
TP_fast_assign(
__entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client->cl_clid;
- __entry->rep = rep;
- __entry->req = req;
__entry->xid = be32_to_cpu(rep->rr_xid);
__entry->credits = credits;
),
- TP_printk("task:%u@%u xid=0x%08x, %u credits, rep=%p -> req=%p",
+ TP_printk("task:%u@%u xid=0x%08x credits=%u",
__entry->task_id, __entry->client_id, __entry->xid,
- __entry->credits, __entry->rep, __entry->req
+ __entry->credits
)
);
-TRACE_EVENT(xprtrdma_defer_cmp,
+DEFINE_REPLY_EVENT(vers);
+DEFINE_REPLY_EVENT(rqst);
+DEFINE_REPLY_EVENT(short);
+DEFINE_REPLY_EVENT(hdr);
+
+TRACE_EVENT(xprtrdma_err_vers,
TP_PROTO(
- const struct rpcrdma_rep *rep
+ const struct rpc_rqst *rqst,
+ __be32 *min,
+ __be32 *max
),
- TP_ARGS(rep),
+ TP_ARGS(rqst, min, max),
TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
- __field(const void *, rep)
__field(u32, xid)
+ __field(u32, min)
+ __field(u32, max)
),
TP_fast_assign(
- __entry->task_id = rep->rr_rqst->rq_task->tk_pid;
- __entry->client_id = rep->rr_rqst->rq_task->tk_client->cl_clid;
- __entry->rep = rep;
- __entry->xid = be32_to_cpu(rep->rr_xid);
+ __entry->task_id = rqst->rq_task->tk_pid;
+ __entry->client_id = rqst->rq_task->tk_client->cl_clid;
+ __entry->xid = be32_to_cpu(rqst->rq_xid);
+ __entry->min = be32_to_cpup(min);
+ __entry->max = be32_to_cpup(max);
),
- TP_printk("task:%u@%u xid=0x%08x rep=%p",
+ TP_printk("task:%u@%u xid=0x%08x versions=[%u, %u]",
__entry->task_id, __entry->client_id, __entry->xid,
- __entry->rep
+ __entry->min, __entry->max
+ )
+);
+
+TRACE_EVENT(xprtrdma_err_chunk,
+ TP_PROTO(
+ const struct rpc_rqst *rqst
+ ),
+
+ TP_ARGS(rqst),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(u32, xid)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = rqst->rq_task->tk_pid;
+ __entry->client_id = rqst->rq_task->tk_client->cl_clid;
+ __entry->xid = be32_to_cpu(rqst->rq_xid);
+ ),
+
+ TP_printk("task:%u@%u xid=0x%08x",
+ __entry->task_id, __entry->client_id, __entry->xid
)
);
-DEFINE_REPLY_EVENT(xprtrdma_reply_vers);
-DEFINE_REPLY_EVENT(xprtrdma_reply_rqst);
-DEFINE_REPLY_EVENT(xprtrdma_reply_short);
-DEFINE_REPLY_EVENT(xprtrdma_reply_hdr);
+TRACE_EVENT(xprtrdma_err_unrecognized,
+ TP_PROTO(
+ const struct rpc_rqst *rqst,
+ __be32 *procedure
+ ),
+
+ TP_ARGS(rqst, procedure),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(u32, xid)
+ __field(u32, procedure)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = rqst->rq_task->tk_pid;
+ __entry->client_id = rqst->rq_task->tk_client->cl_clid;
+ __entry->procedure = be32_to_cpup(procedure);
+ ),
+
+ TP_printk("task:%u@%u xid=0x%08x procedure=%u",
+ __entry->task_id, __entry->client_id, __entry->xid,
+ __entry->procedure
+ )
+);
TRACE_EVENT(xprtrdma_fixup,
TP_PROTO(
@@ -1187,6 +1167,28 @@ TRACE_EVENT(xprtrdma_decode_seg,
)
);
+TRACE_EVENT(xprtrdma_mrs_zap,
+ TP_PROTO(
+ const struct rpc_task *task
+ ),
+
+ TP_ARGS(task),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = task->tk_pid;
+ __entry->client_id = task->tk_client->cl_clid;
+ ),
+
+ TP_printk("task:%u@%u",
+ __entry->task_id, __entry->client_id
+ )
+);
+
/**
** Callback events
**/
@@ -1219,36 +1221,8 @@ TRACE_EVENT(xprtrdma_cb_setup,
)
);
-DEFINE_CB_EVENT(xprtrdma_cb_call);
-DEFINE_CB_EVENT(xprtrdma_cb_reply);
-
-TRACE_EVENT(xprtrdma_leaked_rep,
- TP_PROTO(
- const struct rpc_rqst *rqst,
- const struct rpcrdma_rep *rep
- ),
-
- TP_ARGS(rqst, rep),
-
- TP_STRUCT__entry(
- __field(unsigned int, task_id)
- __field(unsigned int, client_id)
- __field(u32, xid)
- __field(const void *, rep)
- ),
-
- TP_fast_assign(
- __entry->task_id = rqst->rq_task->tk_pid;
- __entry->client_id = rqst->rq_task->tk_client->cl_clid;
- __entry->xid = be32_to_cpu(rqst->rq_xid);
- __entry->rep = rep;
- ),
-
- TP_printk("task:%u@%u xid=0x%08x rep=%p",
- __entry->task_id, __entry->client_id, __entry->xid,
- __entry->rep
- )
-);
+DEFINE_CALLBACK_EVENT(call);
+DEFINE_CALLBACK_EVENT(reply);
/**
** Server-side RPC/RDMA events