summaryrefslogtreecommitdiff
path: root/net/smc/smc_ib.h
diff options
context:
space:
mode:
authorGuangguan Wang <guangguan.wang@linux.alibaba.com>2022-07-14 17:44:01 +0800
committerDavid S. Miller <davem@davemloft.net>2022-07-18 11:19:17 +0100
commit0ef69e788411cba2af017db731a9fc62d255e9ac (patch)
tree4eef442d258216d382747071b839592992519931 /net/smc/smc_ib.h
parent6d52e2de6415b7a035b3e8dc4ccffd0da25bbfb9 (diff)
net/smc: optimize for smc_sndbuf_sync_sg_for_device and smc_rmb_sync_sg_for_cpu
Some CPU, such as Xeon, can guarantee DMA cache coherency. So it is no need to use dma sync APIs to flush cache on such CPUs. In order to avoid calling dma sync APIs on the IO path, use the dma_need_sync to check whether smc_buf_desc needs dma sync when creating smc_buf_desc. Signed-off-by: Guangguan Wang <guangguan.wang@linux.alibaba.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/smc/smc_ib.h')
-rw-r--r--net/smc/smc_ib.h2
1 files changed, 2 insertions, 0 deletions
diff --git a/net/smc/smc_ib.h b/net/smc/smc_ib.h
index 5d8b49c57f50..034295676e88 100644
--- a/net/smc/smc_ib.h
+++ b/net/smc/smc_ib.h
@@ -102,6 +102,8 @@ long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev);
int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags,
struct smc_buf_desc *buf_slot, u8 link_idx);
void smc_ib_put_memory_region(struct ib_mr *mr);
+bool smc_ib_is_sg_need_sync(struct smc_link *lnk,
+ struct smc_buf_desc *buf_slot);
void smc_ib_sync_sg_for_cpu(struct smc_link *lnk,
struct smc_buf_desc *buf_slot,
enum dma_data_direction data_direction);