summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2020-06-29 15:03:56 +0200
committerDaniel Borkmann <daniel@iogearbox.net>2020-06-30 15:44:03 +0200
commit3aa91625007807bfca4155df1867a5c924a08662 (patch)
tree200b5d80ea2b377dcdc560056f1fc247dc18024f
parent2bdeb3ed547d8822b2566797afa6c2584abdb119 (diff)
dma-mapping: Add a new dma_need_sync API
Add a new API to check if calls to dma_sync_single_for_{device,cpu} are required for a given DMA streaming mapping. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Link: https://lore.kernel.org/bpf/20200629130359.2690853-2-hch@lst.de
-rw-r--r--Documentation/core-api/dma-api.rst8
-rw-r--r--include/linux/dma-direct.h1
-rw-r--r--include/linux/dma-mapping.h5
-rw-r--r--kernel/dma/direct.c6
-rw-r--r--kernel/dma/mapping.c10
5 files changed, 30 insertions, 0 deletions
diff --git a/Documentation/core-api/dma-api.rst b/Documentation/core-api/dma-api.rst
index 2d8d2fed7317..f41620439ef3 100644
--- a/Documentation/core-api/dma-api.rst
+++ b/Documentation/core-api/dma-api.rst
@@ -206,6 +206,14 @@ others should not be larger than the returned value.
::
+ bool
+ dma_need_sync(struct device *dev, dma_addr_t dma_addr);
+
+Returns %true if dma_sync_single_for_{device,cpu} calls are required to
+transfer memory ownership. Returns %false if those calls can be skipped.
+
+::
+
unsigned long
dma_get_merge_boundary(struct device *dev);
diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
index 136f984df0d9..8b006730687b 100644
--- a/include/linux/dma-direct.h
+++ b/include/linux/dma-direct.h
@@ -87,4 +87,5 @@ int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs);
int dma_direct_supported(struct device *dev, u64 mask);
+bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr);
#endif /* _LINUX_DMA_DIRECT_H */
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 78f677cf45ab..a33ed3954ed4 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -461,6 +461,7 @@ int dma_set_mask(struct device *dev, u64 mask);
int dma_set_coherent_mask(struct device *dev, u64 mask);
u64 dma_get_required_mask(struct device *dev);
size_t dma_max_mapping_size(struct device *dev);
+bool dma_need_sync(struct device *dev, dma_addr_t dma_addr);
unsigned long dma_get_merge_boundary(struct device *dev);
#else /* CONFIG_HAS_DMA */
static inline dma_addr_t dma_map_page_attrs(struct device *dev,
@@ -571,6 +572,10 @@ static inline size_t dma_max_mapping_size(struct device *dev)
{
return 0;
}
+static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
+{
+ return false;
+}
static inline unsigned long dma_get_merge_boundary(struct device *dev)
{
return 0;
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 0a4881e59aa7..ecb922a0bfa0 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -530,3 +530,9 @@ size_t dma_direct_max_mapping_size(struct device *dev)
return swiotlb_max_mapping_size(dev);
return SIZE_MAX;
}
+
+bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr)
+{
+ return !dev_is_dma_coherent(dev) ||
+ is_swiotlb_buffer(dma_to_phys(dev, dma_addr));
+}
diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
index 98e3d873792e..a8c18c9a796f 100644
--- a/kernel/dma/mapping.c
+++ b/kernel/dma/mapping.c
@@ -397,6 +397,16 @@ size_t dma_max_mapping_size(struct device *dev)
}
EXPORT_SYMBOL_GPL(dma_max_mapping_size);
+bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
+{
+ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ if (dma_is_direct(ops))
+ return dma_direct_need_sync(dev, dma_addr);
+ return ops->sync_single_for_cpu || ops->sync_single_for_device;
+}
+EXPORT_SYMBOL_GPL(dma_need_sync);
+
unsigned long dma_get_merge_boundary(struct device *dev)
{
const struct dma_map_ops *ops = get_dma_ops(dev);