summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMaor Gottlieb <maorg@mellanox.com>2020-06-21 13:41:47 +0300
committerJason Gunthorpe <jgg@nvidia.com>2020-07-03 09:16:25 -0300
commitd473f4dc2f95c8c856b1659ced3502802b7d2fbe (patch)
tree7eaf69c0231b8059552fe3033d8025b78616a4ec
parent0cb42c0265837fafa2b4f302c8a7fed2631d7869 (diff)
RDMA/mlx5: Introduce ODP prefetch counter
For debugging purpose it will be easier to understand if prefetch works okay if it has its own counter. Introduce ODP prefetch counter and count per MR the total number of prefetched pages. In addition remove comment which is not relevant anymore and anyway not in the correct place. Link: https://lore.kernel.org/r/20200621104147.53795-1-leon@kernel.org Signed-off-by: Maor Gottlieb <maorg@mellanox.com> Signed-off-by: Leon Romanovsky <leonro@mellanox.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c19
-rw-r--r--drivers/infiniband/hw/mlx5/restrack.c3
-rw-r--r--include/rdma/ib_verbs.h1
3 files changed, 14 insertions, 9 deletions
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 7d2ec9ee5097..ee88b32d143d 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -913,11 +913,6 @@ next_mr:
if (ret < 0)
goto srcu_unlock;
- /*
- * When prefetching a page, page fault is generated
- * in order to bring the page to the main memory.
- * In the current flow, page faults are being counted.
- */
mlx5_update_odp_stats(mr, faults, ret);
npages += ret;
@@ -1755,12 +1750,17 @@ static void mlx5_ib_prefetch_mr_work(struct work_struct *w)
struct prefetch_mr_work *work =
container_of(w, struct prefetch_mr_work, work);
u32 bytes_mapped = 0;
+ int ret;
u32 i;
- for (i = 0; i < work->num_sge; ++i)
- pagefault_mr(work->frags[i].mr, work->frags[i].io_virt,
- work->frags[i].length, &bytes_mapped,
- work->pf_flags);
+ for (i = 0; i < work->num_sge; ++i) {
+ ret = pagefault_mr(work->frags[i].mr, work->frags[i].io_virt,
+ work->frags[i].length, &bytes_mapped,
+ work->pf_flags);
+ if (ret <= 0)
+ continue;
+ mlx5_update_odp_stats(work->frags[i].mr, prefetch, ret);
+ }
destroy_prefetch_work(work);
}
@@ -1818,6 +1818,7 @@ static int mlx5_ib_prefetch_sg_list(struct ib_pd *pd,
&bytes_mapped, pf_flags);
if (ret < 0)
goto out;
+ mlx5_update_odp_stats(mr, prefetch, ret);
}
ret = 0;
diff --git a/drivers/infiniband/hw/mlx5/restrack.c b/drivers/infiniband/hw/mlx5/restrack.c
index 224a63975822..32c6d0397946 100644
--- a/drivers/infiniband/hw/mlx5/restrack.c
+++ b/drivers/infiniband/hw/mlx5/restrack.c
@@ -99,6 +99,9 @@ int mlx5_ib_fill_stat_mr_entry(struct sk_buff *msg,
msg, "page_invalidations",
atomic64_read(&mr->odp_stats.invalidations)))
goto err_table;
+ if (rdma_nl_stat_hwcounter_entry(msg, "page_prefetch",
+ atomic64_read(&mr->odp_stats.prefetch)))
+ goto err_table;
nla_nest_end(msg, table_attr);
return 0;
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 1e902a8f1713..f6b51a709818 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -2271,6 +2271,7 @@ struct rdma_netdev_alloc_params {
struct ib_odp_counters {
atomic64_t faults;
atomic64_t invalidations;
+ atomic64_t prefetch;
};
struct ib_counters {