From 1fbf57d0530217b9f264eedc4867bf91479cdf3c Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 10 Jun 2020 10:29:49 +0200 Subject: dma-direct: re-enable mmap for !CONFIG_MMU nommu configfs can trivially map the coherent allocations to user space, as no actual page table setup is required and the kernel and the user space programs share the same address space. Fixes: 62fcee9a3bd7 ("dma-mapping: remove CONFIG_ARCH_NO_COHERENT_DMA_MMAP") Signed-off-by: Christoph Hellwig Reported-by: dillon min Reviewed-by: Vladimir Murzin Tested-by: dillon min --- kernel/dma/direct.c | 14 -------------- 1 file changed, 14 deletions(-) (limited to 'kernel/dma/direct.c') diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index 0a4881e59aa7..9ec6a5c3fc57 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -459,7 +459,6 @@ int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt, return ret; } -#ifdef CONFIG_MMU bool dma_direct_can_mmap(struct device *dev) { return dev_is_dma_coherent(dev) || @@ -485,19 +484,6 @@ int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma, return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff, user_count << PAGE_SHIFT, vma->vm_page_prot); } -#else /* CONFIG_MMU */ -bool dma_direct_can_mmap(struct device *dev) -{ - return false; -} - -int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, size_t size, - unsigned long attrs) -{ - return -ENXIO; -} -#endif /* CONFIG_MMU */ int dma_direct_supported(struct device *dev, u64 mask) { -- cgit From 26749b3201ab05e288fbf78fbc8585dfa2da3218 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 15 Jun 2020 08:52:31 +0200 Subject: dma-direct: mark __dma_direct_alloc_pages static Signed-off-by: Christoph Hellwig --- kernel/dma/direct.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel/dma/direct.c') diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index 9ec6a5c3fc57..30c41b57acd9 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -109,7 +109,7 @@ static inline bool dma_should_free_from_pool(struct device *dev, return false; } -struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, +static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, gfp_t gfp, unsigned long attrs) { size_t alloc_size = PAGE_ALIGN(size); -- cgit From 633d5fce78a61e8727674467944939f55b0bcfab Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Thu, 11 Jun 2020 12:20:28 -0700 Subject: dma-direct: always align allocation size in dma_direct_alloc_pages() dma_alloc_contiguous() does size >> PAGE_SHIFT and set_memory_decrypted() works at page granularity. It's necessary to page align the allocation size in dma_direct_alloc_pages() for consistent behavior. This also fixes an issue when arch_dma_prep_coherent() is called on an unaligned allocation size for dma_alloc_need_uncached() when CONFIG_DMA_DIRECT_REMAP is disabled but CONFIG_ARCH_HAS_DMA_SET_UNCACHED is enabled. Signed-off-by: David Rientjes Signed-off-by: Christoph Hellwig --- kernel/dma/direct.c | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) (limited to 'kernel/dma/direct.c') diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index 30c41b57acd9..c93e3c8e3d01 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -112,11 +112,12 @@ static inline bool dma_should_free_from_pool(struct device *dev, static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, gfp_t gfp, unsigned long attrs) { - size_t alloc_size = PAGE_ALIGN(size); int node = dev_to_node(dev); struct page *page = NULL; u64 phys_limit; + WARN_ON_ONCE(!PAGE_ALIGNED(size)); + if (attrs & DMA_ATTR_NO_WARN) gfp |= __GFP_NOWARN; @@ -124,14 +125,14 @@ static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, gfp &= ~__GFP_ZERO; gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask, &phys_limit); - page = dma_alloc_contiguous(dev, alloc_size, gfp); + page = dma_alloc_contiguous(dev, size, gfp); if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { - dma_free_contiguous(dev, page, alloc_size); + dma_free_contiguous(dev, page, size); page = NULL; } again: if (!page) - page = alloc_pages_node(node, gfp, get_order(alloc_size)); + page = alloc_pages_node(node, gfp, get_order(size)); if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { dma_free_contiguous(dev, page, size); page = NULL; @@ -158,8 +159,10 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size, struct page *page; void *ret; + size = PAGE_ALIGN(size); + if (dma_should_alloc_from_pool(dev, gfp, attrs)) { - ret = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &page, gfp); + ret = dma_alloc_from_pool(dev, size, &page, gfp); if (!ret) return NULL; goto done; @@ -183,10 +186,10 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size, dma_alloc_need_uncached(dev, attrs)) || (IS_ENABLED(CONFIG_DMA_REMAP) && PageHighMem(page))) { /* remove any dirty cache lines on the kernel alias */ - arch_dma_prep_coherent(page, PAGE_ALIGN(size)); + arch_dma_prep_coherent(page, size); /* create a coherent mapping */ - ret = dma_common_contiguous_remap(page, PAGE_ALIGN(size), + ret = dma_common_contiguous_remap(page, size, dma_pgprot(dev, PAGE_KERNEL, attrs), __builtin_return_address(0)); if (!ret) -- cgit From 96a539fa3bb71f443ae08e57b9f63d6e5bb2207c Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Thu, 11 Jun 2020 12:20:29 -0700 Subject: dma-direct: re-encrypt memory if dma_direct_alloc_pages() fails If arch_dma_set_uncached() fails after memory has been decrypted, it needs to be re-encrypted before freeing. Fixes: fa7e2247c572 ("dma-direct: make uncached_kernel_address more general") Signed-off-by: David Rientjes Signed-off-by: Christoph Hellwig --- kernel/dma/direct.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'kernel/dma/direct.c') diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index c93e3c8e3d01..80d33f215a2e 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -220,7 +220,7 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size, arch_dma_prep_coherent(page, size); ret = arch_dma_set_uncached(ret, size); if (IS_ERR(ret)) - goto out_free_pages; + goto out_encrypt_pages; } done: if (force_dma_unencrypted(dev)) @@ -228,6 +228,11 @@ done: else *dma_handle = phys_to_dma(dev, page_to_phys(page)); return ret; + +out_encrypt_pages: + if (force_dma_unencrypted(dev)) + set_memory_encrypted((unsigned long)page_address(page), + 1 << get_order(size)); out_free_pages: dma_free_contiguous(dev, page, size); return NULL; -- cgit From 56fccf21d1961a06e2a0c96ce446ebf036651062 Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Thu, 11 Jun 2020 12:20:30 -0700 Subject: dma-direct: check return value when encrypting or decrypting memory __change_page_attr() can fail which will cause set_memory_encrypted() and set_memory_decrypted() to return non-zero. If the device requires unencrypted DMA memory and decryption fails, simply free the memory and fail. If attempting to re-encrypt in the failure path and that encryption fails, there is no alternative other than to leak the memory. Fixes: c10f07aa27da ("dma/direct: Handle force decryption for DMA coherent buffers in common code") Signed-off-by: David Rientjes Signed-off-by: Christoph Hellwig --- kernel/dma/direct.c | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) (limited to 'kernel/dma/direct.c') diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index 80d33f215a2e..2f69bfdbe315 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -158,6 +158,7 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size, { struct page *page; void *ret; + int err; size = PAGE_ALIGN(size); @@ -210,8 +211,12 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size, } ret = page_address(page); - if (force_dma_unencrypted(dev)) - set_memory_decrypted((unsigned long)ret, 1 << get_order(size)); + if (force_dma_unencrypted(dev)) { + err = set_memory_decrypted((unsigned long)ret, + 1 << get_order(size)); + if (err) + goto out_free_pages; + } memset(ret, 0, size); @@ -230,9 +235,13 @@ done: return ret; out_encrypt_pages: - if (force_dma_unencrypted(dev)) - set_memory_encrypted((unsigned long)page_address(page), - 1 << get_order(size)); + if (force_dma_unencrypted(dev)) { + err = set_memory_encrypted((unsigned long)page_address(page), + 1 << get_order(size)); + /* If memory cannot be re-encrypted, it must be leaked */ + if (err) + return NULL; + } out_free_pages: dma_free_contiguous(dev, page, size); return NULL; -- cgit From 1a2b3357e860d890f8045367b179c7e7e802cd71 Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Thu, 11 Jun 2020 12:20:32 -0700 Subject: dma-direct: add missing set_memory_decrypted() for coherent mapping When a coherent mapping is created in dma_direct_alloc_pages(), it needs to be decrypted if the device requires unencrypted DMA before returning. Fixes: 3acac065508f ("dma-mapping: merge the generic remapping helpers into dma-direct") Signed-off-by: David Rientjes Signed-off-by: Christoph Hellwig --- kernel/dma/direct.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'kernel/dma/direct.c') diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index 2f69bfdbe315..93f578a8e613 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -195,6 +195,12 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size, __builtin_return_address(0)); if (!ret) goto out_free_pages; + if (force_dma_unencrypted(dev)) { + err = set_memory_decrypted((unsigned long)ret, + 1 << get_order(size)); + if (err) + goto out_free_pages; + } memset(ret, 0, size); goto done; } -- cgit