From a445e940ea686fc60475564009821010eb213be3 Mon Sep 17 00:00:00 2001 From: Vladimir Murzin Date: Wed, 30 Oct 2019 10:13:13 +0000 Subject: dma-mapping: fix handling of dma-ranges for reserved memory (again) Daniele reported that issue previously fixed in c41f9ea998f3 ("drivers: dma-coherent: Account dma_pfn_offset when used with device tree") reappear shortly after 43fc509c3efb ("dma-coherent: introduce interface for default DMA pool") where fix was accidentally dropped. Lets put fix back in place and respect dma-ranges for reserved memory. Fixes: 43fc509c3efb ("dma-coherent: introduce interface for default DMA pool") Reported-by: Daniele Alessandrelli Tested-by: Daniele Alessandrelli Tested-by: Alexandre Torgue Signed-off-by: Vladimir Murzin Signed-off-by: Christoph Hellwig --- include/linux/dma-mapping.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 4a1c4fca475a..10918c55003f 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -162,7 +162,7 @@ int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr); int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, size_t size, int *ret); -void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle); +void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size, dma_addr_t *dma_handle); int dma_release_from_global_coherent(int order, void *vaddr); int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr, size_t size, int *ret); @@ -172,7 +172,7 @@ int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr, #define dma_release_from_dev_coherent(dev, order, vaddr) (0) #define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0) -static inline void *dma_alloc_from_global_coherent(ssize_t size, +static inline void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size, dma_addr_t *dma_handle) { return NULL; -- cgit From 4544b9f25e70eae9f70a243de0cc802aa5c8cb69 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Tue, 29 Oct 2019 14:34:22 -0700 Subject: dma-mapping: Add vmap checks to dma_map_single() As we've seen from USB and other areas[1], we need to always do runtime checks for DMA operating on memory regions that might be remapped. This adds vmap checks (similar to those already in USB but missing in other places) into dma_map_single() so all callers benefit from the checking. [1] https://git.kernel.org/linus/3840c5b78803b2b6cc1ff820100a74a092c40cbb Suggested-by: Laura Abbott Signed-off-by: Kees Cook [hch: fixed the printk message] Signed-off-by: Christoph Hellwig --- include/linux/dma-mapping.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include') diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 10918c55003f..4d450672b7d6 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -583,6 +583,10 @@ static inline unsigned long dma_get_merge_boundary(struct device *dev) static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir, unsigned long attrs) { + /* DMA must never operate on areas that might be remapped. */ + if (dev_WARN_ONCE(dev, is_vmalloc_addr(ptr), + "rejecting DMA map of vmalloc memory\n")) + return DMA_MAPPING_ERROR; debug_dma_map_single(dev, ptr, size); return dma_map_page_attrs(dev, virt_to_page(ptr), offset_in_page(ptr), size, dir, attrs); -- cgit From acaade1af3587132e7ea585f470a95261e14f60c Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 29 Oct 2019 09:57:09 +0100 Subject: dma-direct: remove __dma_direct_free_pages We can just call dma_free_contiguous directly instead of wrapping it. Signed-off-by: Christoph Hellwig Reviewed-by: Max Filippov --- include/linux/dma-direct.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include') diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h index adf993a3bd58..dec3b3bb121d 100644 --- a/include/linux/dma-direct.h +++ b/include/linux/dma-direct.h @@ -68,6 +68,5 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs); struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs); -void __dma_direct_free_pages(struct device *dev, size_t size, struct page *page); int dma_direct_supported(struct device *dev, u64 mask); #endif /* _LINUX_DMA_DIRECT_H */ -- cgit From 4e1003aa56a7d60ddb048e43a7a51368fcfe36af Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 29 Oct 2019 09:57:32 +0100 Subject: dma-direct: remove the dma_handle argument to __dma_direct_alloc_pages The argument isn't used anywhere, so stop passing it. Signed-off-by: Christoph Hellwig Reviewed-by: Max Filippov --- include/linux/dma-direct.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h index dec3b3bb121d..ff3d5edc44b9 100644 --- a/include/linux/dma-direct.h +++ b/include/linux/dma-direct.h @@ -67,6 +67,6 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size, void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs); struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs); + gfp_t gfp, unsigned long attrs); int dma_direct_supported(struct device *dev, u64 mask); #endif /* _LINUX_DMA_DIRECT_H */ -- cgit From 34dc0ea6bc960f1f57b2148f01a3f4da23f87013 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 29 Oct 2019 11:01:37 +0100 Subject: dma-direct: provide mmap and get_sgtable method overrides For dma-direct we know that the DMA address is an encoding of the physical address that we can trivially decode. Use that fact to provide implementations that do not need the arch_dma_coherent_to_pfn architecture hook. Note that we still can only support mmap of non-coherent memory only if the architecture provides a way to set an uncached bit in the page tables. This must be true for architectures that use the generic remap helpers, but other architectures can also manually select it. Signed-off-by: Christoph Hellwig Reviewed-by: Max Filippov --- include/linux/dma-direct.h | 7 +++++++ include/linux/dma-noncoherent.h | 2 -- 2 files changed, 7 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h index ff3d5edc44b9..bcd953fb1f5a 100644 --- a/include/linux/dma-direct.h +++ b/include/linux/dma-direct.h @@ -68,5 +68,12 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs); struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, gfp_t gfp, unsigned long attrs); +int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt, + void *cpu_addr, dma_addr_t dma_addr, size_t size, + unsigned long attrs); +bool dma_direct_can_mmap(struct device *dev); +int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size, + unsigned long attrs); int dma_direct_supported(struct device *dev, u64 mask); #endif /* _LINUX_DMA_DIRECT_H */ diff --git a/include/linux/dma-noncoherent.h b/include/linux/dma-noncoherent.h index dd3de6d88fc0..e30fca1f1b12 100644 --- a/include/linux/dma-noncoherent.h +++ b/include/linux/dma-noncoherent.h @@ -41,8 +41,6 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs); void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs); -long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr, - dma_addr_t dma_addr); #ifdef CONFIG_MMU /* -- cgit From b12d66278dd627cbe1ea7c000aa4715aaf8830c8 Mon Sep 17 00:00:00 2001 From: Nicolas Saenz Julienne Date: Thu, 7 Nov 2019 16:06:44 +0100 Subject: dma-direct: check for overflows on 32 bit DMA addresses As seen on the new Raspberry Pi 4 and sta2x11's DMA implementation it is possible for a device configured with 32 bit DMA addresses and a partial DMA mapping located at the end of the address space to overflow. It happens when a higher physical address, not DMAable, is translated to it's DMA counterpart. For example the Raspberry Pi 4, configurable up to 4 GB of memory, has an interconnect capable of addressing the lower 1 GB of physical memory with a DMA offset of 0xc0000000. It transpires that, any attempt to translate physical addresses higher than the first GB will result in an overflow which dma_capable() can't detect as it only checks for addresses bigger then the maximum allowed DMA address. Fix this by verifying in dma_capable() if the DMA address range provided is at any point lower than the minimum possible DMA address on the bus. Signed-off-by: Nicolas Saenz Julienne Signed-off-by: Christoph Hellwig --- include/linux/dma-direct.h | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h index bcd953fb1f5a..6db863c3eb93 100644 --- a/include/linux/dma-direct.h +++ b/include/linux/dma-direct.h @@ -3,8 +3,11 @@ #define _LINUX_DMA_DIRECT_H 1 #include +#include /* for min_low_pfn */ #include +static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr); + #ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA #include #else @@ -24,11 +27,16 @@ static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dev_addr) static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) { + dma_addr_t end = addr + size - 1; + if (!dev->dma_mask) return false; - return addr + size - 1 <= - min_not_zero(*dev->dma_mask, dev->bus_dma_mask); + if (!IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) && + min(addr, end) < phys_to_dma(dev, PFN_PHYS(min_low_pfn))) + return false; + + return end <= min_not_zero(*dev->dma_mask, dev->bus_dma_mask); } #endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */ -- cgit From 56e35f9c5b87ec1ae93e483284e189c84388de16 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 7 Nov 2019 18:03:11 +0100 Subject: dma-mapping: drop the dev argument to arch_sync_dma_for_* These are pure cache maintainance routines, so drop the unused struct device argument. Signed-off-by: Christoph Hellwig Suggested-by: Daniel Vetter --- include/linux/dma-noncoherent.h | 20 ++++++++++---------- include/xen/swiotlb-xen.h | 8 ++++---- 2 files changed, 14 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/include/linux/dma-noncoherent.h b/include/linux/dma-noncoherent.h index e30fca1f1b12..ca9b5770caee 100644 --- a/include/linux/dma-noncoherent.h +++ b/include/linux/dma-noncoherent.h @@ -73,29 +73,29 @@ static inline void arch_dma_cache_sync(struct device *dev, void *vaddr, #endif /* CONFIG_DMA_NONCOHERENT_CACHE_SYNC */ #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE -void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, - size_t size, enum dma_data_direction dir); +void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, + enum dma_data_direction dir); #else -static inline void arch_sync_dma_for_device(struct device *dev, - phys_addr_t paddr, size_t size, enum dma_data_direction dir) +static inline void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, + enum dma_data_direction dir) { } #endif /* ARCH_HAS_SYNC_DMA_FOR_DEVICE */ #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU -void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, - size_t size, enum dma_data_direction dir); +void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, + enum dma_data_direction dir); #else -static inline void arch_sync_dma_for_cpu(struct device *dev, - phys_addr_t paddr, size_t size, enum dma_data_direction dir) +static inline void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, + enum dma_data_direction dir) { } #endif /* ARCH_HAS_SYNC_DMA_FOR_CPU */ #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL -void arch_sync_dma_for_cpu_all(struct device *dev); +void arch_sync_dma_for_cpu_all(void); #else -static inline void arch_sync_dma_for_cpu_all(struct device *dev) +static inline void arch_sync_dma_for_cpu_all(void) { } #endif /* CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL */ diff --git a/include/xen/swiotlb-xen.h b/include/xen/swiotlb-xen.h index d71380f6ed0b..ffc0d3902b71 100644 --- a/include/xen/swiotlb-xen.h +++ b/include/xen/swiotlb-xen.h @@ -4,10 +4,10 @@ #include -void xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle, - phys_addr_t paddr, size_t size, enum dma_data_direction dir); -void xen_dma_sync_for_device(struct device *dev, dma_addr_t handle, - phys_addr_t paddr, size_t size, enum dma_data_direction dir); +void xen_dma_sync_for_cpu(dma_addr_t handle, phys_addr_t paddr, size_t size, + enum dma_data_direction dir); +void xen_dma_sync_for_device(dma_addr_t handle, phys_addr_t paddr, size_t size, + enum dma_data_direction dir); extern int xen_swiotlb_init(int verbose, bool early); extern const struct dma_map_ops xen_swiotlb_dma_ops; -- cgit From 130c1ccbf55330b55e82612a6e54eebb82c9d746 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 12 Nov 2019 17:06:04 +0100 Subject: dma-direct: unify the dma_capable definitions Currently each architectures that wants to override dma_to_phys and phys_to_dma also has to provide dma_capable. But there isn't really any good reason for that. powerpc and mips just have copies of the generic one minus the latests fix, and the arm one was the inspiration for said fix, but misses the bus_dma_mask handling. Make all architectures use the generic version instead. Signed-off-by: Christoph Hellwig Acked-by: Michael Ellerman (powerpc) Reviewed-by: Nicolas Saenz Julienne --- include/linux/dma-direct.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h index 6db863c3eb93..991f8aa2676e 100644 --- a/include/linux/dma-direct.h +++ b/include/linux/dma-direct.h @@ -24,6 +24,7 @@ static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dev_addr) return paddr + ((phys_addr_t)dev->dma_pfn_offset << PAGE_SHIFT); } +#endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) { @@ -38,7 +39,6 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) return end <= min_not_zero(*dev->dma_mask, dev->bus_dma_mask); } -#endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */ #ifdef CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED bool force_dma_unencrypted(struct device *dev); -- cgit From c7345159f7db6fb69ec1c3b3f8f28cd05c731be2 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 12 Nov 2019 17:07:43 +0100 Subject: dma-direct: avoid a forward declaration for phys_to_dma Move dma_capable down a bit so that we don't need a forward declaration for phys_to_dma. Signed-off-by: Christoph Hellwig Reviewed-by: Nicolas Saenz Julienne --- include/linux/dma-direct.h | 30 ++++++++++++++---------------- 1 file changed, 14 insertions(+), 16 deletions(-) (limited to 'include') diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h index 991f8aa2676e..f8959f75e496 100644 --- a/include/linux/dma-direct.h +++ b/include/linux/dma-direct.h @@ -6,8 +6,6 @@ #include /* for min_low_pfn */ #include -static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr); - #ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA #include #else @@ -26,20 +24,6 @@ static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dev_addr) } #endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */ -static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) -{ - dma_addr_t end = addr + size - 1; - - if (!dev->dma_mask) - return false; - - if (!IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) && - min(addr, end) < phys_to_dma(dev, PFN_PHYS(min_low_pfn))) - return false; - - return end <= min_not_zero(*dev->dma_mask, dev->bus_dma_mask); -} - #ifdef CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED bool force_dma_unencrypted(struct device *dev); #else @@ -65,6 +49,20 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) return __sme_clr(__dma_to_phys(dev, daddr)); } +static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) +{ + dma_addr_t end = addr + size - 1; + + if (!dev->dma_mask) + return false; + + if (!IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) && + min(addr, end) < phys_to_dma(dev, PFN_PHYS(min_low_pfn))) + return false; + + return end <= min_not_zero(*dev->dma_mask, dev->bus_dma_mask); +} + u64 dma_direct_get_required_mask(struct device *dev); void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs); -- cgit From 68a33b1794665ba8a1d1ef1d3bfcc7c587d380a6 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 19 Nov 2019 17:38:58 +0100 Subject: dma-direct: exclude dma_direct_map_resource from the min_low_pfn check The valid memory address check in dma_capable only makes sense when mapping normal memory, not when using dma_map_resource to map a device resource. Add a new boolean argument to dma_capable to exclude that check for the dma_map_resource case. Fixes: b12d66278dd6 ("dma-direct: check for overflows on 32 bit DMA addresses") Reported-by: Marek Szyprowski Signed-off-by: Christoph Hellwig Acked-by: Marek Szyprowski Tested-by: Marek Szyprowski --- include/linux/dma-direct.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h index f8959f75e496..99b77dd5f79b 100644 --- a/include/linux/dma-direct.h +++ b/include/linux/dma-direct.h @@ -49,14 +49,15 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) return __sme_clr(__dma_to_phys(dev, daddr)); } -static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) +static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size, + bool is_ram) { dma_addr_t end = addr + size - 1; if (!dev->dma_mask) return false; - if (!IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) && + if (is_ram && !IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) && min(addr, end) < phys_to_dma(dev, PFN_PHYS(min_low_pfn))) return false; -- cgit From a7ba70f1787f977f970cd116076c6fce4b9e01cc Mon Sep 17 00:00:00 2001 From: Nicolas Saenz Julienne Date: Thu, 21 Nov 2019 10:26:44 +0100 Subject: dma-mapping: treat dev->bus_dma_mask as a DMA limit Using a mask to represent bus DMA constraints has a set of limitations. The biggest one being it can only hold a power of two (minus one). The DMA mapping code is already aware of this and treats dev->bus_dma_mask as a limit. This quirk is already used by some architectures although still rare. With the introduction of the Raspberry Pi 4 we've found a new contender for the use of bus DMA limits, as its PCIe bus can only address the lower 3GB of memory (of a total of 4GB). This is impossible to represent with a mask. To make things worse the device-tree code rounds non power of two bus DMA limits to the next power of two, which is unacceptable in this case. In the light of this, rename dev->bus_dma_mask to dev->bus_dma_limit all over the tree and treat it as such. Note that dev->bus_dma_limit should contain the higher accessible DMA address. Signed-off-by: Nicolas Saenz Julienne Reviewed-by: Robin Murphy Signed-off-by: Christoph Hellwig --- include/linux/device.h | 6 +++--- include/linux/dma-direct.h | 2 +- include/linux/dma-mapping.h | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/linux/device.h b/include/linux/device.h index 297239a08bb7..e396de656f20 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -1186,8 +1186,8 @@ struct dev_links_info { * @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all * hardware supports 64-bit addresses for consistent allocations * such descriptors. - * @bus_dma_mask: Mask of an upstream bridge or bus which imposes a smaller DMA - * limit than the device itself supports. + * @bus_dma_limit: Limit of an upstream bridge or bus which imposes a smaller + * DMA limit than the device itself supports. * @dma_pfn_offset: offset of DMA memory range relatively of RAM * @dma_parms: A low level driver may set these to teach IOMMU code about * segment limitations. @@ -1270,7 +1270,7 @@ struct device { not all hardware supports 64 bit addresses for consistent allocations such descriptors. */ - u64 bus_dma_mask; /* upstream dma_mask constraint */ + u64 bus_dma_limit; /* upstream dma constraint */ unsigned long dma_pfn_offset; struct device_dma_parameters *dma_parms; diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h index 452f5280cde3..24b8684aa21d 100644 --- a/include/linux/dma-direct.h +++ b/include/linux/dma-direct.h @@ -63,7 +63,7 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size, min(addr, end) < phys_to_dma(dev, PFN_PHYS(min_low_pfn))) return false; - return end <= min_not_zero(*dev->dma_mask, dev->bus_dma_mask); + return end <= min_not_zero(*dev->dma_mask, dev->bus_dma_limit); } u64 dma_direct_get_required_mask(struct device *dev); diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 4d450672b7d6..c4d8741264bd 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -697,7 +697,7 @@ static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask) */ static inline bool dma_addressing_limited(struct device *dev) { - return min_not_zero(dma_get_mask(dev), dev->bus_dma_mask) < + return min_not_zero(dma_get_mask(dev), dev->bus_dma_limit) < dma_get_required_mask(dev); } -- cgit