summaryrefslogtreecommitdiff
path: root/kernel/dma/debug.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2018-12-25 08:50:35 +0100
committerChristoph Hellwig <hch@lst.de>2019-01-04 09:02:17 +0100
commit2e05ea5cdc1ac55d9ef678ed5ea6c38acf7fd2a3 (patch)
treee88e6506cf6ad38bd2ad3d764a20290dbf0b6818 /kernel/dma/debug.c
parent96d4f267e40f9509e8a66e2b39e8b95655617693 (diff)
dma-mapping: implement dma_map_single_attrs using dma_map_page_attrs
And also switch the way we implement the unmap side around to stay consistent. This ensures dma-debug works again because it records which function we used for mapping to ensure it is also used for unmapping, and also reduces further code duplication. Last but not least this also officially allows calling dma_sync_single_* for mappings created using dma_map_page, which is perfectly fine given that the sync calls only take a dma_addr_t, but not a virtual address or struct page. Fixes: 7f0fee242e ("dma-mapping: merge dma_unmap_page_attrs and dma_unmap_single_attrs") Signed-off-by: Christoph Hellwig <hch@lst.de> Tested-by: LABBE Corentin <clabbe.montjoie@gmail.com>
Diffstat (limited to 'kernel/dma/debug.c')
-rw-r--r--kernel/dma/debug.c17
1 files changed, 4 insertions, 13 deletions
diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c
index 164706da2a73..1e0157113d15 100644
--- a/kernel/dma/debug.c
+++ b/kernel/dma/debug.c
@@ -49,7 +49,6 @@
enum {
dma_debug_single,
- dma_debug_page,
dma_debug_sg,
dma_debug_coherent,
dma_debug_resource,
@@ -1300,8 +1299,7 @@ void debug_dma_map_single(struct device *dev, const void *addr,
EXPORT_SYMBOL(debug_dma_map_single);
void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
- size_t size, int direction, dma_addr_t dma_addr,
- bool map_single)
+ size_t size, int direction, dma_addr_t dma_addr)
{
struct dma_debug_entry *entry;
@@ -1316,7 +1314,7 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
return;
entry->dev = dev;
- entry->type = dma_debug_page;
+ entry->type = dma_debug_single;
entry->pfn = page_to_pfn(page);
entry->offset = offset,
entry->dev_addr = dma_addr;
@@ -1324,9 +1322,6 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
entry->direction = direction;
entry->map_err_type = MAP_ERR_NOT_CHECKED;
- if (map_single)
- entry->type = dma_debug_single;
-
check_for_stack(dev, page, offset);
if (!PageHighMem(page)) {
@@ -1378,10 +1373,10 @@ void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
EXPORT_SYMBOL(debug_dma_mapping_error);
void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
- size_t size, int direction, bool map_single)
+ size_t size, int direction)
{
struct dma_debug_entry ref = {
- .type = dma_debug_page,
+ .type = dma_debug_single,
.dev = dev,
.dev_addr = addr,
.size = size,
@@ -1390,10 +1385,6 @@ void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
if (unlikely(dma_debug_disabled()))
return;
-
- if (map_single)
- ref.type = dma_debug_single;
-
check_unmap(&ref);
}
EXPORT_SYMBOL(debug_dma_unmap_page);