summaryrefslogtreecommitdiff
path: root/kernel/dma
diff options
context:
space:
mode:
authorSean Anderson <sean.anderson@linux.dev>2024-10-18 11:00:37 -0400
committerChristoph Hellwig <hch@lst.de>2024-10-29 08:54:06 +0100
commit68b6dbf1f441c4eba3b8511728a41cf9b01dca35 (patch)
tree4eb452797a551e5c56b54374e381fafb0560acd5 /kernel/dma
parentc4484ab86ee00f2d9236e2851621ea02c105f4cc (diff)
dma-mapping: trace more error paths
It can be surprising to the user if DMA functions are only traced on success. On failure, it can be unclear what the source of the problem is. Fix this by tracing all functions even when they fail. Cases where we BUG/WARN are skipped, since those should be sufficiently noisy already. Signed-off-by: Sean Anderson <sean.anderson@linux.dev> Reviewed-by: Steven Rostedt (Google) <rostedt@goodmis.org> Signed-off-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'kernel/dma')
-rw-r--r--kernel/dma/mapping.c25
1 files changed, 18 insertions, 7 deletions
diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
index b8a6bc492fae..636dbb0629a4 100644
--- a/kernel/dma/mapping.c
+++ b/kernel/dma/mapping.c
@@ -223,6 +223,7 @@ static int __dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
debug_dma_map_sg(dev, sg, nents, ents, dir, attrs);
} else if (WARN_ON_ONCE(ents != -EINVAL && ents != -ENOMEM &&
ents != -EIO && ents != -EREMOTEIO)) {
+ trace_dma_map_sg_err(dev, sg, nents, ents, dir, attrs);
return -EIO;
}
@@ -604,20 +605,26 @@ void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
if (WARN_ON_ONCE(flag & __GFP_COMP))
return NULL;
- if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
+ if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr)) {
+ trace_dma_alloc(dev, cpu_addr, *dma_handle, size,
+ DMA_BIDIRECTIONAL, flag, attrs);
return cpu_addr;
+ }
/* let the implementation decide on the zone to allocate from: */
flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
- if (dma_alloc_direct(dev, ops))
+ if (dma_alloc_direct(dev, ops)) {
cpu_addr = dma_direct_alloc(dev, size, dma_handle, flag, attrs);
- else if (use_dma_iommu(dev))
+ } else if (use_dma_iommu(dev)) {
cpu_addr = iommu_dma_alloc(dev, size, dma_handle, flag, attrs);
- else if (ops->alloc)
+ } else if (ops->alloc) {
cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
- else
+ } else {
+ trace_dma_alloc(dev, NULL, 0, size, DMA_BIDIRECTIONAL, flag,
+ attrs);
return NULL;
+ }
trace_dma_alloc(dev, cpu_addr, *dma_handle, size, DMA_BIDIRECTIONAL,
flag, attrs);
@@ -642,11 +649,11 @@ void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
*/
WARN_ON(irqs_disabled());
+ trace_dma_free(dev, cpu_addr, dma_handle, size, DMA_BIDIRECTIONAL,
+ attrs);
if (!cpu_addr)
return;
- trace_dma_free(dev, cpu_addr, dma_handle, size, DMA_BIDIRECTIONAL,
- attrs);
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
if (dma_alloc_direct(dev, ops))
dma_direct_free(dev, size, cpu_addr, dma_handle, attrs);
@@ -688,6 +695,8 @@ struct page *dma_alloc_pages(struct device *dev, size_t size,
trace_dma_alloc_pages(dev, page_to_virt(page), *dma_handle,
size, dir, gfp, 0);
debug_dma_map_page(dev, page, 0, size, dir, *dma_handle, 0);
+ } else {
+ trace_dma_alloc_pages(dev, NULL, 0, size, dir, gfp, 0);
}
return page;
}
@@ -772,6 +781,8 @@ struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
sgt->nents = 1;
trace_dma_alloc_sgt(dev, sgt, size, dir, gfp, attrs);
debug_dma_map_sg(dev, sgt->sgl, sgt->orig_nents, 1, dir, attrs);
+ } else {
+ trace_dma_alloc_sgt_err(dev, NULL, 0, size, gfp, dir, attrs);
}
return sgt;
}