summaryrefslogtreecommitdiff
path: root/sound/core/memalloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'sound/core/memalloc.c')
-rw-r--r--sound/core/memalloc.c220
1 files changed, 211 insertions, 9 deletions
diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c
index c7c943c661e6..9fc971a704a9 100644
--- a/sound/core/memalloc.c
+++ b/sound/core/memalloc.c
@@ -10,6 +10,7 @@
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <linux/genalloc.h>
+#include <linux/highmem.h>
#include <linux/vmalloc.h>
#ifdef CONFIG_X86
#include <asm/set_memory.h>
@@ -39,9 +40,11 @@ static void *__snd_dma_alloc_pages(struct snd_dma_buffer *dmab, size_t size)
}
/**
- * snd_dma_alloc_pages - allocate the buffer area according to the given type
+ * snd_dma_alloc_dir_pages - allocate the buffer area according to the given
+ * type and direction
* @type: the DMA buffer type
* @device: the device pointer
+ * @dir: DMA direction
* @size: the buffer size to allocate
* @dmab: buffer allocation record to store the allocated data
*
@@ -51,8 +54,9 @@ static void *__snd_dma_alloc_pages(struct snd_dma_buffer *dmab, size_t size)
* Return: Zero if the buffer with the given size is allocated successfully,
* otherwise a negative value on error.
*/
-int snd_dma_alloc_pages(int type, struct device *device, size_t size,
- struct snd_dma_buffer *dmab)
+int snd_dma_alloc_dir_pages(int type, struct device *device,
+ enum dma_data_direction dir, size_t size,
+ struct snd_dma_buffer *dmab)
{
if (WARN_ON(!size))
return -ENXIO;
@@ -62,6 +66,7 @@ int snd_dma_alloc_pages(int type, struct device *device, size_t size,
size = PAGE_ALIGN(size);
dmab->dev.type = type;
dmab->dev.dev = device;
+ dmab->dev.dir = dir;
dmab->bytes = 0;
dmab->addr = 0;
dmab->private_data = NULL;
@@ -71,7 +76,7 @@ int snd_dma_alloc_pages(int type, struct device *device, size_t size,
dmab->bytes = size;
return 0;
}
-EXPORT_SYMBOL(snd_dma_alloc_pages);
+EXPORT_SYMBOL(snd_dma_alloc_dir_pages);
/**
* snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback
@@ -129,9 +134,10 @@ static void __snd_release_pages(struct device *dev, void *res)
}
/**
- * snd_devm_alloc_pages - allocate the buffer and manage with devres
+ * snd_devm_alloc_dir_pages - allocate the buffer and manage with devres
* @dev: the device pointer
* @type: the DMA buffer type
+ * @dir: DMA direction
* @size: the buffer size to allocate
*
* Allocate buffer pages depending on the given type and manage using devres.
@@ -144,7 +150,8 @@ static void __snd_release_pages(struct device *dev, void *res)
* The function returns the snd_dma_buffer object at success, or NULL if failed.
*/
struct snd_dma_buffer *
-snd_devm_alloc_pages(struct device *dev, int type, size_t size)
+snd_devm_alloc_dir_pages(struct device *dev, int type,
+ enum dma_data_direction dir, size_t size)
{
struct snd_dma_buffer *dmab;
int err;
@@ -157,7 +164,7 @@ snd_devm_alloc_pages(struct device *dev, int type, size_t size)
if (!dmab)
return NULL;
- err = snd_dma_alloc_pages(type, dev, size, dmab);
+ err = snd_dma_alloc_dir_pages(type, dev, dir, size, dmab);
if (err < 0) {
devres_free(dmab);
return NULL;
@@ -166,7 +173,7 @@ snd_devm_alloc_pages(struct device *dev, int type, size_t size)
devres_add(dev, dmab);
return dmab;
}
-EXPORT_SYMBOL_GPL(snd_devm_alloc_pages);
+EXPORT_SYMBOL_GPL(snd_devm_alloc_dir_pages);
/**
* snd_dma_buffer_mmap - perform mmap of the given DMA buffer
@@ -176,8 +183,11 @@ EXPORT_SYMBOL_GPL(snd_devm_alloc_pages);
int snd_dma_buffer_mmap(struct snd_dma_buffer *dmab,
struct vm_area_struct *area)
{
- const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
+ const struct snd_malloc_ops *ops;
+ if (!dmab)
+ return -ENOENT;
+ ops = snd_dma_get_ops(dmab);
if (ops && ops->mmap)
return ops->mmap(dmab, area);
else
@@ -185,6 +195,26 @@ int snd_dma_buffer_mmap(struct snd_dma_buffer *dmab,
}
EXPORT_SYMBOL(snd_dma_buffer_mmap);
+#ifdef CONFIG_HAS_DMA
+/**
+ * snd_dma_buffer_sync - sync DMA buffer between CPU and device
+ * @dmab: buffer allocation information
+ * @mode: sync mode
+ */
+void snd_dma_buffer_sync(struct snd_dma_buffer *dmab,
+ enum snd_dma_sync_mode mode)
+{
+ const struct snd_malloc_ops *ops;
+
+ if (!dmab || !dmab->dev.need_sync)
+ return;
+ ops = snd_dma_get_ops(dmab);
+ if (ops && ops->sync)
+ ops->sync(dmab, mode);
+}
+EXPORT_SYMBOL_GPL(snd_dma_buffer_sync);
+#endif /* CONFIG_HAS_DMA */
+
/**
* snd_sgbuf_get_addr - return the physical address at the corresponding offset
* @dmab: buffer allocation information
@@ -468,6 +498,174 @@ static const struct snd_malloc_ops snd_dma_wc_ops = {
.mmap = snd_dma_wc_mmap,
};
#endif /* CONFIG_X86 */
+
+/*
+ * Non-contiguous pages allocator
+ */
+static void *snd_dma_noncontig_alloc(struct snd_dma_buffer *dmab, size_t size)
+{
+ struct sg_table *sgt;
+ void *p;
+
+ sgt = dma_alloc_noncontiguous(dmab->dev.dev, size, dmab->dev.dir,
+ DEFAULT_GFP, 0);
+ if (!sgt)
+ return NULL;
+ dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, dmab->dev.dir);
+ p = dma_vmap_noncontiguous(dmab->dev.dev, size, sgt);
+ if (p)
+ dmab->private_data = sgt;
+ else
+ dma_free_noncontiguous(dmab->dev.dev, size, sgt, dmab->dev.dir);
+ return p;
+}
+
+static void snd_dma_noncontig_free(struct snd_dma_buffer *dmab)
+{
+ dma_vunmap_noncontiguous(dmab->dev.dev, dmab->area);
+ dma_free_noncontiguous(dmab->dev.dev, dmab->bytes, dmab->private_data,
+ dmab->dev.dir);
+}
+
+static int snd_dma_noncontig_mmap(struct snd_dma_buffer *dmab,
+ struct vm_area_struct *area)
+{
+ return dma_mmap_noncontiguous(dmab->dev.dev, area,
+ dmab->bytes, dmab->private_data);
+}
+
+static void snd_dma_noncontig_sync(struct snd_dma_buffer *dmab,
+ enum snd_dma_sync_mode mode)
+{
+ if (mode == SNDRV_DMA_SYNC_CPU) {
+ if (dmab->dev.dir == DMA_TO_DEVICE)
+ return;
+ dma_sync_sgtable_for_cpu(dmab->dev.dev, dmab->private_data,
+ dmab->dev.dir);
+ invalidate_kernel_vmap_range(dmab->area, dmab->bytes);
+ } else {
+ if (dmab->dev.dir == DMA_FROM_DEVICE)
+ return;
+ flush_kernel_vmap_range(dmab->area, dmab->bytes);
+ dma_sync_sgtable_for_device(dmab->dev.dev, dmab->private_data,
+ dmab->dev.dir);
+ }
+}
+
+static inline void snd_dma_noncontig_iter_set(struct snd_dma_buffer *dmab,
+ struct sg_page_iter *piter,
+ size_t offset)
+{
+ struct sg_table *sgt = dmab->private_data;
+
+ __sg_page_iter_start(piter, sgt->sgl, sgt->orig_nents,
+ offset >> PAGE_SHIFT);
+}
+
+static dma_addr_t snd_dma_noncontig_get_addr(struct snd_dma_buffer *dmab,
+ size_t offset)
+{
+ struct sg_dma_page_iter iter;
+
+ snd_dma_noncontig_iter_set(dmab, &iter.base, offset);
+ __sg_page_iter_dma_next(&iter);
+ return sg_page_iter_dma_address(&iter) + offset % PAGE_SIZE;
+}
+
+static struct page *snd_dma_noncontig_get_page(struct snd_dma_buffer *dmab,
+ size_t offset)
+{
+ struct sg_page_iter iter;
+
+ snd_dma_noncontig_iter_set(dmab, &iter, offset);
+ __sg_page_iter_next(&iter);
+ return sg_page_iter_page(&iter);
+}
+
+static unsigned int
+snd_dma_noncontig_get_chunk_size(struct snd_dma_buffer *dmab,
+ unsigned int ofs, unsigned int size)
+{
+ struct sg_dma_page_iter iter;
+ unsigned int start, end;
+ unsigned long addr;
+
+ start = ALIGN_DOWN(ofs, PAGE_SIZE);
+ end = ofs + size - 1; /* the last byte address */
+ snd_dma_noncontig_iter_set(dmab, &iter.base, start);
+ if (!__sg_page_iter_dma_next(&iter))
+ return 0;
+ /* check page continuity */
+ addr = sg_page_iter_dma_address(&iter);
+ for (;;) {
+ start += PAGE_SIZE;
+ if (start > end)
+ break;
+ addr += PAGE_SIZE;
+ if (!__sg_page_iter_dma_next(&iter) ||
+ sg_page_iter_dma_address(&iter) != addr)
+ return start - ofs;
+ }
+ /* ok, all on continuous pages */
+ return size;
+}
+
+static const struct snd_malloc_ops snd_dma_noncontig_ops = {
+ .alloc = snd_dma_noncontig_alloc,
+ .free = snd_dma_noncontig_free,
+ .mmap = snd_dma_noncontig_mmap,
+ .sync = snd_dma_noncontig_sync,
+ .get_addr = snd_dma_noncontig_get_addr,
+ .get_page = snd_dma_noncontig_get_page,
+ .get_chunk_size = snd_dma_noncontig_get_chunk_size,
+};
+
+/*
+ * Non-coherent pages allocator
+ */
+static void *snd_dma_noncoherent_alloc(struct snd_dma_buffer *dmab, size_t size)
+{
+ dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, dmab->dev.dir);
+ return dma_alloc_noncoherent(dmab->dev.dev, size, &dmab->addr,
+ dmab->dev.dir, DEFAULT_GFP);
+}
+
+static void snd_dma_noncoherent_free(struct snd_dma_buffer *dmab)
+{
+ dma_free_noncoherent(dmab->dev.dev, dmab->bytes, dmab->area,
+ dmab->addr, dmab->dev.dir);
+}
+
+static int snd_dma_noncoherent_mmap(struct snd_dma_buffer *dmab,
+ struct vm_area_struct *area)
+{
+ area->vm_page_prot = vm_get_page_prot(area->vm_flags);
+ return dma_mmap_pages(dmab->dev.dev, area,
+ area->vm_end - area->vm_start,
+ virt_to_page(dmab->area));
+}
+
+static void snd_dma_noncoherent_sync(struct snd_dma_buffer *dmab,
+ enum snd_dma_sync_mode mode)
+{
+ if (mode == SNDRV_DMA_SYNC_CPU) {
+ if (dmab->dev.dir != DMA_TO_DEVICE)
+ dma_sync_single_for_cpu(dmab->dev.dev, dmab->addr,
+ dmab->bytes, dmab->dev.dir);
+ } else {
+ if (dmab->dev.dir != DMA_FROM_DEVICE)
+ dma_sync_single_for_device(dmab->dev.dev, dmab->addr,
+ dmab->bytes, dmab->dev.dir);
+ }
+}
+
+static const struct snd_malloc_ops snd_dma_noncoherent_ops = {
+ .alloc = snd_dma_noncoherent_alloc,
+ .free = snd_dma_noncoherent_free,
+ .mmap = snd_dma_noncoherent_mmap,
+ .sync = snd_dma_noncoherent_sync,
+};
+
#endif /* CONFIG_HAS_DMA */
/*
@@ -479,6 +677,8 @@ static const struct snd_malloc_ops *dma_ops[] = {
#ifdef CONFIG_HAS_DMA
[SNDRV_DMA_TYPE_DEV] = &snd_dma_dev_ops,
[SNDRV_DMA_TYPE_DEV_WC] = &snd_dma_wc_ops,
+ [SNDRV_DMA_TYPE_NONCONTIG] = &snd_dma_noncontig_ops,
+ [SNDRV_DMA_TYPE_NONCOHERENT] = &snd_dma_noncoherent_ops,
#ifdef CONFIG_GENERIC_ALLOCATOR
[SNDRV_DMA_TYPE_DEV_IRAM] = &snd_dma_iram_ops,
#endif /* CONFIG_GENERIC_ALLOCATOR */
@@ -491,6 +691,8 @@ static const struct snd_malloc_ops *dma_ops[] = {
static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab)
{
+ if (WARN_ON_ONCE(!dmab))
+ return NULL;
if (WARN_ON_ONCE(dmab->dev.type <= SNDRV_DMA_TYPE_UNKNOWN ||
dmab->dev.type >= ARRAY_SIZE(dma_ops)))
return NULL;