summaryrefslogtreecommitdiff
path: root/kernel/dma/debug.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/dma/debug.c')
-rw-r--r--kernel/dma/debug.c373
1 files changed, 195 insertions, 178 deletions
diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c
index 18c93c2276ca..138ede653de4 100644
--- a/kernel/dma/debug.c
+++ b/kernel/dma/debug.c
@@ -23,6 +23,7 @@
#include <linux/ctype.h>
#include <linux/list.h>
#include <linux/slab.h>
+#include <linux/swiotlb.h>
#include <asm/sections.h>
#include "debug.h"
@@ -38,7 +39,8 @@ enum {
dma_debug_single,
dma_debug_sg,
dma_debug_coherent,
- dma_debug_resource,
+ dma_debug_noncoherent,
+ dma_debug_phy,
};
enum map_err_types {
@@ -53,15 +55,16 @@ enum map_err_types {
* struct dma_debug_entry - track a dma_map* or dma_alloc_coherent mapping
* @list: node on pre-allocated free_entries list
* @dev: 'dev' argument to dma_map_{page|single|sg} or dma_alloc_coherent
+ * @dev_addr: dma address
* @size: length of the mapping
* @type: single, page, sg, coherent
* @direction: enum dma_data_direction
* @sg_call_ents: 'nents' from dma_map_sg
* @sg_mapped_ents: 'mapped_ents' from dma_map_sg
- * @pfn: page frame of the start address
- * @offset: offset of mapping relative to pfn
+ * @paddr: physical start address of the mapping
* @map_err_type: track whether dma_mapping_error() was checked
- * @stacktrace: support backtraces when a violation is detected
+ * @stack_len: number of backtrace entries in @stack_entries
+ * @stack_entries: stack of backtrace history
*/
struct dma_debug_entry {
struct list_head list;
@@ -72,8 +75,7 @@ struct dma_debug_entry {
int direction;
int sg_call_ents;
int sg_mapped_ents;
- unsigned long pfn;
- size_t offset;
+ phys_addr_t paddr;
enum map_err_types map_err_type;
#ifdef CONFIG_STACKTRACE
unsigned int stack_len;
@@ -138,9 +140,10 @@ static const char *const maperr2str[] = {
static const char *type2name[] = {
[dma_debug_single] = "single",
- [dma_debug_sg] = "scather-gather",
+ [dma_debug_sg] = "scatter-gather",
[dma_debug_coherent] = "coherent",
- [dma_debug_resource] = "resource",
+ [dma_debug_noncoherent] = "noncoherent",
+ [dma_debug_phy] = "phy",
};
static const char *dir2name[] = {
@@ -387,45 +390,6 @@ static void hash_bucket_del(struct dma_debug_entry *entry)
list_del(&entry->list);
}
-static unsigned long long phys_addr(struct dma_debug_entry *entry)
-{
- if (entry->type == dma_debug_resource)
- return __pfn_to_phys(entry->pfn) + entry->offset;
-
- return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset;
-}
-
-/*
- * Dump mapping entries for debugging purposes
- */
-void debug_dma_dump_mappings(struct device *dev)
-{
- int idx;
-
- for (idx = 0; idx < HASH_SIZE; idx++) {
- struct hash_bucket *bucket = &dma_entry_hash[idx];
- struct dma_debug_entry *entry;
- unsigned long flags;
-
- spin_lock_irqsave(&bucket->lock, flags);
-
- list_for_each_entry(entry, &bucket->list, list) {
- if (!dev || dev == entry->dev) {
- dev_info(entry->dev,
- "%s idx %d P=%Lx N=%lx D=%Lx L=%Lx %s %s\n",
- type2name[entry->type], idx,
- phys_addr(entry), entry->pfn,
- entry->dev_addr, entry->size,
- dir2name[entry->direction],
- maperr2str[entry->map_err_type]);
- }
- }
-
- spin_unlock_irqrestore(&bucket->lock, flags);
- cond_resched();
- }
-}
-
/*
* For each mapping (initial cacheline in the case of
* dma_alloc_coherent/dma_map_page, initial cacheline in each page of a
@@ -445,8 +409,11 @@ void debug_dma_dump_mappings(struct device *dev)
* dma_active_cacheline entry to track per event. dma_map_sg(), on the
* other hand, consumes a single dma_debug_entry, but inserts 'nents'
* entries into the tree.
+ *
+ * Use __GFP_NOWARN because the printk from an OOM, to netconsole, could end
+ * up right back in the DMA debugging code, leading to a deadlock.
*/
-static RADIX_TREE(dma_active_cacheline, GFP_ATOMIC);
+static RADIX_TREE(dma_active_cacheline, GFP_ATOMIC | __GFP_NOWARN);
static DEFINE_SPINLOCK(radix_lock);
#define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1)
#define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT)
@@ -454,8 +421,8 @@ static DEFINE_SPINLOCK(radix_lock);
static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry)
{
- return (entry->pfn << CACHELINE_PER_PAGE_SHIFT) +
- (entry->offset >> L1_CACHE_SHIFT);
+ return ((entry->paddr >> PAGE_SHIFT) << CACHELINE_PER_PAGE_SHIFT) +
+ (offset_in_page(entry->paddr) >> L1_CACHE_SHIFT);
}
static int active_cacheline_read_overlap(phys_addr_t cln)
@@ -547,6 +514,70 @@ static void active_cacheline_remove(struct dma_debug_entry *entry)
}
/*
+ * Dump mappings entries on kernel space for debugging purposes
+ */
+void debug_dma_dump_mappings(struct device *dev)
+{
+ int idx;
+ phys_addr_t cln;
+
+ for (idx = 0; idx < HASH_SIZE; idx++) {
+ struct hash_bucket *bucket = &dma_entry_hash[idx];
+ struct dma_debug_entry *entry;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bucket->lock, flags);
+ list_for_each_entry(entry, &bucket->list, list) {
+ if (!dev || dev == entry->dev) {
+ cln = to_cacheline_number(entry);
+ dev_info(entry->dev,
+ "%s idx %d P=%pa D=%llx L=%llx cln=%pa %s %s\n",
+ type2name[entry->type], idx,
+ &entry->paddr, entry->dev_addr,
+ entry->size, &cln,
+ dir2name[entry->direction],
+ maperr2str[entry->map_err_type]);
+ }
+ }
+ spin_unlock_irqrestore(&bucket->lock, flags);
+
+ cond_resched();
+ }
+}
+
+/*
+ * Dump mappings entries on user space via debugfs
+ */
+static int dump_show(struct seq_file *seq, void *v)
+{
+ int idx;
+ phys_addr_t cln;
+
+ for (idx = 0; idx < HASH_SIZE; idx++) {
+ struct hash_bucket *bucket = &dma_entry_hash[idx];
+ struct dma_debug_entry *entry;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bucket->lock, flags);
+ list_for_each_entry(entry, &bucket->list, list) {
+ cln = to_cacheline_number(entry);
+ seq_printf(seq,
+ "%s %s %s idx %d P=%pa D=%llx L=%llx cln=%pa %s %s\n",
+ dev_driver_string(entry->dev),
+ dev_name(entry->dev),
+ type2name[entry->type], idx,
+ &entry->paddr, entry->dev_addr,
+ entry->size, &cln,
+ dir2name[entry->direction],
+ maperr2str[entry->map_err_type]);
+ }
+ spin_unlock_irqrestore(&bucket->lock, flags);
+ }
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(dump);
+
+/*
* Wrapper function for adding an entry to the hash.
* This function takes care of locking itself.
*/
@@ -564,7 +595,9 @@ static void add_dma_entry(struct dma_debug_entry *entry, unsigned long attrs)
if (rc == -ENOMEM) {
pr_err_once("cacheline tracking ENOMEM, dma-debug disabled\n");
global_disable = true;
- } else if (rc == -EEXIST && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
+ } else if (rc == -EEXIST && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
+ !(IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC) &&
+ is_swiotlb_active(entry->dev))) {
err_printk(entry->dev, entry,
"cacheline tracking EEXIST, overlapping mappings aren't supported\n");
}
@@ -603,15 +636,19 @@ static struct dma_debug_entry *__dma_entry_alloc(void)
return entry;
}
-static void __dma_entry_alloc_check_leak(void)
+/*
+ * This should be called outside of free_entries_lock scope to avoid potential
+ * deadlocks with serial consoles that use DMA.
+ */
+static void __dma_entry_alloc_check_leak(u32 nr_entries)
{
- u32 tmp = nr_total_entries % nr_prealloc_entries;
+ u32 tmp = nr_entries % nr_prealloc_entries;
/* Shout each time we tick over some multiple of the initial pool */
if (tmp < DMA_DEBUG_DYNAMIC_ENTRIES) {
pr_info("dma_debug_entry pool grown to %u (%u00%%)\n",
- nr_total_entries,
- (nr_total_entries / nr_prealloc_entries));
+ nr_entries,
+ (nr_entries / nr_prealloc_entries));
}
}
@@ -622,8 +659,10 @@ static void __dma_entry_alloc_check_leak(void)
*/
static struct dma_debug_entry *dma_entry_alloc(void)
{
+ bool alloc_check_leak = false;
struct dma_debug_entry *entry;
unsigned long flags;
+ u32 nr_entries;
spin_lock_irqsave(&free_entries_lock, flags);
if (num_free_entries == 0) {
@@ -633,13 +672,17 @@ static struct dma_debug_entry *dma_entry_alloc(void)
pr_err("debugging out of memory - disabling\n");
return NULL;
}
- __dma_entry_alloc_check_leak();
+ alloc_check_leak = true;
+ nr_entries = nr_total_entries;
}
entry = __dma_entry_alloc();
spin_unlock_irqrestore(&free_entries_lock, flags);
+ if (alloc_check_leak)
+ __dma_entry_alloc_check_leak(nr_entries);
+
#ifdef CONFIG_STACKTRACE
entry->stack_len = stack_trace_save(entry->stack_entries,
ARRAY_SIZE(entry->stack_entries),
@@ -764,33 +807,6 @@ static const struct file_operations filter_fops = {
.llseek = default_llseek,
};
-static int dump_show(struct seq_file *seq, void *v)
-{
- int idx;
-
- for (idx = 0; idx < HASH_SIZE; idx++) {
- struct hash_bucket *bucket = &dma_entry_hash[idx];
- struct dma_debug_entry *entry;
- unsigned long flags;
-
- spin_lock_irqsave(&bucket->lock, flags);
- list_for_each_entry(entry, &bucket->list, list) {
- seq_printf(seq,
- "%s %s %s idx %d P=%llx N=%lx D=%llx L=%llx %s %s\n",
- dev_name(entry->dev),
- dev_driver_string(entry->dev),
- type2name[entry->type], idx,
- phys_addr(entry), entry->pfn,
- entry->dev_addr, entry->size,
- dir2name[entry->direction],
- maperr2str[entry->map_err_type]);
- }
- spin_unlock_irqrestore(&bucket->lock, flags);
- }
- return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(dump);
-
static int __init dma_debug_fs_init(void)
{
struct dentry *dentry = debugfs_create_dir("dma-api", NULL);
@@ -859,7 +875,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
return 0;
}
-void dma_debug_add_bus(struct bus_type *bus)
+void dma_debug_add_bus(const struct bus_type *bus)
{
struct notifier_block *nb;
@@ -982,16 +998,17 @@ static void check_unmap(struct dma_debug_entry *ref)
"[mapped as %s] [unmapped as %s]\n",
ref->dev_addr, ref->size,
type2name[entry->type], type2name[ref->type]);
- } else if ((entry->type == dma_debug_coherent) &&
- (phys_addr(ref) != phys_addr(entry))) {
+ } else if ((entry->type == dma_debug_coherent ||
+ entry->type == dma_debug_noncoherent) &&
+ ref->paddr != entry->paddr) {
err_printk(ref->dev, entry, "device driver frees "
"DMA memory with different CPU address "
"[device address=0x%016llx] [size=%llu bytes] "
- "[cpu alloc address=0x%016llx] "
- "[cpu free address=0x%016llx]",
+ "[cpu alloc address=0x%pa] "
+ "[cpu free address=0x%pa]",
ref->dev_addr, ref->size,
- phys_addr(entry),
- phys_addr(ref));
+ &entry->paddr,
+ &ref->paddr);
}
if (ref->sg_call_ents && ref->type == dma_debug_sg &&
@@ -1031,22 +1048,25 @@ static void check_unmap(struct dma_debug_entry *ref)
}
hash_bucket_del(entry);
- dma_entry_free(entry);
-
put_hash_bucket(bucket, flags);
+
+ /*
+ * Free the entry outside of bucket_lock to avoid ABBA deadlocks
+ * between that and radix_lock.
+ */
+ dma_entry_free(entry);
}
-static void check_for_stack(struct device *dev,
- struct page *page, size_t offset)
+static void check_for_stack(struct device *dev, phys_addr_t phys)
{
void *addr;
struct vm_struct *stack_vm_area = task_stack_vm_area(current);
if (!stack_vm_area) {
/* Stack is direct-mapped. */
- if (PageHighMem(page))
+ if (PhysHighMem(phys))
return;
- addr = page_address(page) + offset;
+ addr = phys_to_virt(phys);
if (object_is_on_stack(addr))
err_printk(dev, NULL, "device driver maps memory from stack [addr=%p]\n", addr);
} else {
@@ -1054,10 +1074,12 @@ static void check_for_stack(struct device *dev,
int i;
for (i = 0; i < stack_vm_area->nr_pages; i++) {
- if (page != stack_vm_area->pages[i])
+ if (__phys_to_pfn(phys) !=
+ page_to_pfn(stack_vm_area->pages[i]))
continue;
- addr = (u8 *)current->stack + i * PAGE_SIZE + offset;
+ addr = (u8 *)current->stack + i * PAGE_SIZE +
+ (phys % PAGE_SIZE);
err_printk(dev, NULL, "device driver maps memory from stack [probable addr=%p]\n", addr);
break;
}
@@ -1148,7 +1170,6 @@ out:
static void check_sg_segment(struct device *dev, struct scatterlist *sg)
{
-#ifdef CONFIG_DMA_API_DEBUG_SG
unsigned int max_seg = dma_get_max_seg_size(dev);
u64 start, end, boundary = dma_get_seg_boundary(dev);
@@ -1169,7 +1190,6 @@ static void check_sg_segment(struct device *dev, struct scatterlist *sg)
if ((start ^ end) & ~boundary)
err_printk(dev, NULL, "mapping sg segment across boundary [start=0x%016llx] [end=0x%016llx] [boundary=0x%016llx]\n",
start, end, boundary);
-#endif
}
void debug_dma_map_single(struct device *dev, const void *addr,
@@ -1188,9 +1208,8 @@ void debug_dma_map_single(struct device *dev, const void *addr,
}
EXPORT_SYMBOL(debug_dma_map_single);
-void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
- size_t size, int direction, dma_addr_t dma_addr,
- unsigned long attrs)
+void debug_dma_map_phys(struct device *dev, phys_addr_t phys, size_t size,
+ int direction, dma_addr_t dma_addr, unsigned long attrs)
{
struct dma_debug_entry *entry;
@@ -1205,20 +1224,18 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
return;
entry->dev = dev;
- entry->type = dma_debug_single;
- entry->pfn = page_to_pfn(page);
- entry->offset = offset;
+ entry->type = dma_debug_phy;
+ entry->paddr = phys;
entry->dev_addr = dma_addr;
entry->size = size;
entry->direction = direction;
entry->map_err_type = MAP_ERR_NOT_CHECKED;
- check_for_stack(dev, page, offset);
+ if (!(attrs & DMA_ATTR_MMIO)) {
+ check_for_stack(dev, phys);
- if (!PageHighMem(page)) {
- void *addr = page_address(page) + offset;
-
- check_for_illegal_area(dev, addr, size);
+ if (!PhysHighMem(phys))
+ check_for_illegal_area(dev, phys_to_virt(phys), size);
}
add_dma_entry(entry, attrs);
@@ -1262,13 +1279,13 @@ void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
}
EXPORT_SYMBOL(debug_dma_mapping_error);
-void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
+void debug_dma_unmap_phys(struct device *dev, dma_addr_t dma_addr,
size_t size, int direction)
{
struct dma_debug_entry ref = {
- .type = dma_debug_single,
+ .type = dma_debug_phy,
.dev = dev,
- .dev_addr = addr,
+ .dev_addr = dma_addr,
.size = size,
.direction = direction,
};
@@ -1290,7 +1307,7 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
return;
for_each_sg(sg, s, nents, i) {
- check_for_stack(dev, sg_page(s), s->offset);
+ check_for_stack(dev, sg_phys(s));
if (!PageHighMem(sg_page(s)))
check_for_illegal_area(dev, sg_virt(s), s->length);
}
@@ -1302,8 +1319,7 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
entry->type = dma_debug_sg;
entry->dev = dev;
- entry->pfn = page_to_pfn(sg_page(s));
- entry->offset = s->offset;
+ entry->paddr = sg_phys(s);
entry->size = sg_dma_len(s);
entry->dev_addr = sg_dma_address(s);
entry->direction = direction;
@@ -1349,8 +1365,7 @@ void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
struct dma_debug_entry ref = {
.type = dma_debug_sg,
.dev = dev,
- .pfn = page_to_pfn(sg_page(s)),
- .offset = s->offset,
+ .paddr = sg_phys(s),
.dev_addr = sg_dma_address(s),
.size = sg_dma_len(s),
.direction = dir,
@@ -1367,6 +1382,18 @@ void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
}
}
+static phys_addr_t virt_to_paddr(void *virt)
+{
+ struct page *page;
+
+ if (is_vmalloc_addr(virt))
+ page = vmalloc_to_page(virt);
+ else
+ page = virt_to_page(virt);
+
+ return page_to_phys(page) + offset_in_page(virt);
+}
+
void debug_dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t dma_addr, void *virt,
unsigned long attrs)
@@ -1389,27 +1416,21 @@ void debug_dma_alloc_coherent(struct device *dev, size_t size,
entry->type = dma_debug_coherent;
entry->dev = dev;
- entry->offset = offset_in_page(virt);
+ entry->paddr = virt_to_paddr(virt);
entry->size = size;
entry->dev_addr = dma_addr;
entry->direction = DMA_BIDIRECTIONAL;
- if (is_vmalloc_addr(virt))
- entry->pfn = vmalloc_to_pfn(virt);
- else
- entry->pfn = page_to_pfn(virt_to_page(virt));
-
add_dma_entry(entry, attrs);
}
void debug_dma_free_coherent(struct device *dev, size_t size,
- void *virt, dma_addr_t addr)
+ void *virt, dma_addr_t dma_addr)
{
struct dma_debug_entry ref = {
.type = dma_debug_coherent,
.dev = dev,
- .offset = offset_in_page(virt),
- .dev_addr = addr,
+ .dev_addr = dma_addr,
.size = size,
.direction = DMA_BIDIRECTIONAL,
};
@@ -1418,52 +1439,7 @@ void debug_dma_free_coherent(struct device *dev, size_t size,
if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt))
return;
- if (is_vmalloc_addr(virt))
- ref.pfn = vmalloc_to_pfn(virt);
- else
- ref.pfn = page_to_pfn(virt_to_page(virt));
-
- if (unlikely(dma_debug_disabled()))
- return;
-
- check_unmap(&ref);
-}
-
-void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size,
- int direction, dma_addr_t dma_addr,
- unsigned long attrs)
-{
- struct dma_debug_entry *entry;
-
- if (unlikely(dma_debug_disabled()))
- return;
-
- entry = dma_entry_alloc();
- if (!entry)
- return;
-
- entry->type = dma_debug_resource;
- entry->dev = dev;
- entry->pfn = PHYS_PFN(addr);
- entry->offset = offset_in_page(addr);
- entry->size = size;
- entry->dev_addr = dma_addr;
- entry->direction = direction;
- entry->map_err_type = MAP_ERR_NOT_CHECKED;
-
- add_dma_entry(entry, attrs);
-}
-
-void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr,
- size_t size, int direction)
-{
- struct dma_debug_entry ref = {
- .type = dma_debug_resource,
- .dev = dev,
- .dev_addr = dma_addr,
- .size = size,
- .direction = direction,
- };
+ ref.paddr = virt_to_paddr(virt);
if (unlikely(dma_debug_disabled()))
return;
@@ -1522,8 +1498,7 @@ void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
struct dma_debug_entry ref = {
.type = dma_debug_sg,
.dev = dev,
- .pfn = page_to_pfn(sg_page(s)),
- .offset = s->offset,
+ .paddr = sg_phys(s),
.dev_addr = sg_dma_address(s),
.size = sg_dma_len(s),
.direction = direction,
@@ -1554,8 +1529,7 @@ void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
struct dma_debug_entry ref = {
.type = dma_debug_sg,
.dev = dev,
- .pfn = page_to_pfn(sg_page(s)),
- .offset = s->offset,
+ .paddr = sg_phys(sg),
.dev_addr = sg_dma_address(s),
.size = sg_dma_len(s),
.direction = direction,
@@ -1571,6 +1545,49 @@ void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
}
}
+void debug_dma_alloc_pages(struct device *dev, struct page *page,
+ size_t size, int direction,
+ dma_addr_t dma_addr,
+ unsigned long attrs)
+{
+ struct dma_debug_entry *entry;
+
+ if (unlikely(dma_debug_disabled()))
+ return;
+
+ entry = dma_entry_alloc();
+ if (!entry)
+ return;
+
+ entry->type = dma_debug_noncoherent;
+ entry->dev = dev;
+ entry->paddr = page_to_phys(page);
+ entry->size = size;
+ entry->dev_addr = dma_addr;
+ entry->direction = direction;
+
+ add_dma_entry(entry, attrs);
+}
+
+void debug_dma_free_pages(struct device *dev, struct page *page,
+ size_t size, int direction,
+ dma_addr_t dma_addr)
+{
+ struct dma_debug_entry ref = {
+ .type = dma_debug_noncoherent,
+ .dev = dev,
+ .paddr = page_to_phys(page),
+ .dev_addr = dma_addr,
+ .size = size,
+ .direction = direction,
+ };
+
+ if (unlikely(dma_debug_disabled()))
+ return;
+
+ check_unmap(&ref);
+}
+
static int __init dma_debug_driver_setup(char *str)
{
int i;