summaryrefslogtreecommitdiff
path: root/sound/core/memalloc.c
diff options
context:
space:
mode:
authorTakashi Iwai <tiwai@suse.de>2021-06-09 18:25:49 +0200
committerTakashi Iwai <tiwai@suse.de>2021-06-10 10:15:21 +0200
commit37af81c5998f4b0f23fb452cffa4b8a1c00ce95b (patch)
tree1def6906c07dfff4d6ddde09cbaaaa3000af693d /sound/core/memalloc.c
parent84a0374051c1582ed9ace6cd63cdbfb15ed4b797 (diff)
ALSA: core: Abstract memory alloc helpers
This patch introduces the ops table to each memory allocation type (SNDRV_DMA_TYPE_XXX) and abstract the handling for the better code management. Then we get separate the page allocation, release and other tasks for each type, especially for the SG buffer. Each buffer type has now callbacks in the struct snd_malloc_ops, and the common helper functions call those ops accordingly. The former inline code that is specific to SG-buffer is moved into the local sgbuf.c, and we can simplify the PCM code without details of memory handling. Link: https://lore.kernel.org/r/20210609162551.7842-4-tiwai@suse.de Signed-off-by: Takashi Iwai <tiwai@suse.de>
Diffstat (limited to 'sound/core/memalloc.c')
-rw-r--r--sound/core/memalloc.c391
1 files changed, 244 insertions, 147 deletions
diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c
index 966bef5acc75..ad68bcdf82cf 100644
--- a/sound/core/memalloc.c
+++ b/sound/core/memalloc.c
@@ -15,99 +15,27 @@
#include <asm/set_memory.h>
#endif
#include <sound/memalloc.h>
+#include "memalloc_local.h"
-/*
- *
- * Bus-specific memory allocators
- *
- */
+static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab);
-#ifdef CONFIG_HAS_DMA
-/* allocate the coherent DMA pages */
-static void snd_malloc_dev_pages(struct snd_dma_buffer *dmab, size_t size)
-{
- gfp_t gfp_flags;
-
- gfp_flags = GFP_KERNEL
- | __GFP_COMP /* compound page lets parts be mapped */
- | __GFP_NORETRY /* don't trigger OOM-killer */
- | __GFP_NOWARN; /* no stack trace print - this call is non-critical */
- dmab->area = dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr,
- gfp_flags);
-#ifdef CONFIG_X86
- if (dmab->area && dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC)
- set_memory_wc((unsigned long)dmab->area,
- PAGE_ALIGN(size) >> PAGE_SHIFT);
-#endif
-}
-
-/* free the coherent DMA pages */
-static void snd_free_dev_pages(struct snd_dma_buffer *dmab)
+/* a cast to gfp flag from the dev pointer; for CONTINUOUS and VMALLOC types */
+static inline gfp_t snd_mem_get_gfp_flags(const struct snd_dma_buffer *dmab,
+ gfp_t default_gfp)
{
-#ifdef CONFIG_X86
- if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC)
- set_memory_wb((unsigned long)dmab->area,
- PAGE_ALIGN(dmab->bytes) >> PAGE_SHIFT);
-#endif
- dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
+ if (!dmab->dev.dev)
+ return default_gfp;
+ else
+ return (__force gfp_t)(unsigned long)dmab->dev.dev;
}
-#ifdef CONFIG_GENERIC_ALLOCATOR
-/**
- * snd_malloc_dev_iram - allocate memory from on-chip internal ram
- * @dmab: buffer allocation record to store the allocated data
- * @size: number of bytes to allocate from the iram
- *
- * This function requires iram phandle provided via of_node
- */
-static void snd_malloc_dev_iram(struct snd_dma_buffer *dmab, size_t size)
+static int __snd_dma_alloc_pages(struct snd_dma_buffer *dmab, size_t size)
{
- struct device *dev = dmab->dev.dev;
- struct gen_pool *pool = NULL;
-
- dmab->area = NULL;
- dmab->addr = 0;
-
- if (dev->of_node)
- pool = of_gen_pool_get(dev->of_node, "iram", 0);
-
- if (!pool)
- return;
-
- /* Assign the pool into private_data field */
- dmab->private_data = pool;
+ const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
- dmab->area = gen_pool_dma_alloc_align(pool, size, &dmab->addr,
- PAGE_SIZE);
-}
-
-/**
- * snd_free_dev_iram - free allocated specific memory from on-chip internal ram
- * @dmab: buffer allocation record to store the allocated data
- */
-static void snd_free_dev_iram(struct snd_dma_buffer *dmab)
-{
- struct gen_pool *pool = dmab->private_data;
-
- if (pool && dmab->area)
- gen_pool_free(pool, (unsigned long)dmab->area, dmab->bytes);
-}
-#endif /* CONFIG_GENERIC_ALLOCATOR */
-#endif /* CONFIG_HAS_DMA */
-
-/*
- *
- * ALSA generic memory management
- *
- */
-
-static inline gfp_t snd_mem_get_gfp_flags(const struct device *dev,
- gfp_t default_gfp)
-{
- if (!dev)
- return default_gfp;
- else
- return (__force gfp_t)(unsigned long)dev;
+ if (WARN_ON_ONCE(!ops || !ops->alloc))
+ return -EINVAL;
+ return ops->alloc(dmab, size);
}
/**
@@ -126,7 +54,7 @@ static inline gfp_t snd_mem_get_gfp_flags(const struct device *dev,
int snd_dma_alloc_pages(int type, struct device *device, size_t size,
struct snd_dma_buffer *dmab)
{
- gfp_t gfp;
+ int err;
if (WARN_ON(!size))
return -ENXIO;
@@ -140,43 +68,10 @@ int snd_dma_alloc_pages(int type, struct device *device, size_t size,
dmab->area = NULL;
dmab->addr = 0;
dmab->private_data = NULL;
- switch (type) {
- case SNDRV_DMA_TYPE_CONTINUOUS:
- gfp = snd_mem_get_gfp_flags(device, GFP_KERNEL);
- dmab->area = alloc_pages_exact(size, gfp);
- break;
- case SNDRV_DMA_TYPE_VMALLOC:
- gfp = snd_mem_get_gfp_flags(device, GFP_KERNEL | __GFP_HIGHMEM);
- dmab->area = __vmalloc(size, gfp);
- break;
-#ifdef CONFIG_HAS_DMA
-#ifdef CONFIG_GENERIC_ALLOCATOR
- case SNDRV_DMA_TYPE_DEV_IRAM:
- snd_malloc_dev_iram(dmab, size);
- if (dmab->area)
- break;
- /* Internal memory might have limited size and no enough space,
- * so if we fail to malloc, try to fetch memory traditionally.
- */
- dmab->dev.type = SNDRV_DMA_TYPE_DEV;
- fallthrough;
-#endif /* CONFIG_GENERIC_ALLOCATOR */
- case SNDRV_DMA_TYPE_DEV:
- case SNDRV_DMA_TYPE_DEV_UC:
- snd_malloc_dev_pages(dmab, size);
- break;
-#endif
-#ifdef CONFIG_SND_DMA_SGBUF
- case SNDRV_DMA_TYPE_DEV_SG:
- case SNDRV_DMA_TYPE_DEV_UC_SG:
- snd_malloc_sgbuf_pages(device, size, dmab, NULL);
- break;
-#endif
- default:
- pr_err("snd-malloc: invalid device type %d\n", type);
- return -ENXIO;
- }
- if (! dmab->area)
+ err = __snd_dma_alloc_pages(dmab, size);
+ if (err < 0)
+ return err;
+ if (!dmab->area)
return -ENOMEM;
dmab->bytes = size;
return 0;
@@ -217,7 +112,6 @@ int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size,
}
EXPORT_SYMBOL(snd_dma_alloc_pages_fallback);
-
/**
* snd_dma_free_pages - release the allocated buffer
* @dmab: the buffer allocation record to release
@@ -226,32 +120,235 @@ EXPORT_SYMBOL(snd_dma_alloc_pages_fallback);
*/
void snd_dma_free_pages(struct snd_dma_buffer *dmab)
{
- switch (dmab->dev.type) {
- case SNDRV_DMA_TYPE_CONTINUOUS:
- free_pages_exact(dmab->area, dmab->bytes);
- break;
- case SNDRV_DMA_TYPE_VMALLOC:
- vfree(dmab->area);
- break;
+ const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
+
+ if (ops && ops->free)
+ ops->free(dmab);
+}
+EXPORT_SYMBOL(snd_dma_free_pages);
+
+/**
+ * snd_sgbuf_get_addr - return the physical address at the corresponding offset
+ * @dmab: buffer allocation information
+ * @offset: offset in the ring buffer
+ */
+dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab, size_t offset)
+{
+ const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
+
+ if (ops && ops->get_addr)
+ return ops->get_addr(dmab, offset);
+ else
+ return dmab->addr + offset;
+}
+EXPORT_SYMBOL(snd_sgbuf_get_addr);
+
+/**
+ * snd_sgbuf_get_page - return the physical page at the corresponding offset
+ * @dmab: buffer allocation information
+ * @offset: offset in the ring buffer
+ */
+struct page *snd_sgbuf_get_page(struct snd_dma_buffer *dmab, size_t offset)
+{
+ const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
+
+ if (ops && ops->get_page)
+ return ops->get_page(dmab, offset);
+ else
+ return virt_to_page(dmab->area + offset);
+}
+EXPORT_SYMBOL(snd_sgbuf_get_page);
+
+/**
+ * snd_sgbuf_get_chunk_size - compute the max chunk size with continuous pages
+ * on sg-buffer
+ * @dmab: buffer allocation information
+ * @ofs: offset in the ring buffer
+ * @size: the requested size
+ */
+unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab,
+ unsigned int ofs, unsigned int size)
+{
+ const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
+
+ if (ops && ops->get_chunk_size)
+ return ops->get_chunk_size(dmab, ofs, size);
+ else
+ return size;
+}
+EXPORT_SYMBOL(snd_sgbuf_get_chunk_size);
+
+/*
+ * Continuous pages allocator
+ */
+static int snd_dma_continuous_alloc(struct snd_dma_buffer *dmab, size_t size)
+{
+ gfp_t gfp = snd_mem_get_gfp_flags(dmab, GFP_KERNEL);
+
+ dmab->area = alloc_pages_exact(size, gfp);
+ return 0;
+}
+
+static void snd_dma_continuous_free(struct snd_dma_buffer *dmab)
+{
+ free_pages_exact(dmab->area, dmab->bytes);
+}
+
+static const struct snd_malloc_ops snd_dma_continuous_ops = {
+ .alloc = snd_dma_continuous_alloc,
+ .free = snd_dma_continuous_free,
+};
+
+/*
+ * VMALLOC allocator
+ */
+static int snd_dma_vmalloc_alloc(struct snd_dma_buffer *dmab, size_t size)
+{
+ gfp_t gfp = snd_mem_get_gfp_flags(dmab, GFP_KERNEL | __GFP_HIGHMEM);
+
+ dmab->area = __vmalloc(size, gfp);
+ return 0;
+}
+
+static void snd_dma_vmalloc_free(struct snd_dma_buffer *dmab)
+{
+ vfree(dmab->area);
+}
+
+static dma_addr_t snd_dma_vmalloc_get_addr(struct snd_dma_buffer *dmab,
+ size_t offset)
+{
+ return page_to_phys(vmalloc_to_page(dmab->area + offset)) +
+ offset % PAGE_SIZE;
+}
+
+static struct page *snd_dma_vmalloc_get_page(struct snd_dma_buffer *dmab,
+ size_t offset)
+{
+ return vmalloc_to_page(dmab->area + offset);
+}
+
+static unsigned int
+snd_dma_vmalloc_get_chunk_size(struct snd_dma_buffer *dmab,
+ unsigned int ofs, unsigned int size)
+{
+ ofs %= PAGE_SIZE;
+ size += ofs;
+ if (size > PAGE_SIZE)
+ size = PAGE_SIZE;
+ return size - ofs;
+}
+
+static const struct snd_malloc_ops snd_dma_vmalloc_ops = {
+ .alloc = snd_dma_vmalloc_alloc,
+ .free = snd_dma_vmalloc_free,
+ .get_addr = snd_dma_vmalloc_get_addr,
+ .get_page = snd_dma_vmalloc_get_page,
+ .get_chunk_size = snd_dma_vmalloc_get_chunk_size,
+};
+
#ifdef CONFIG_HAS_DMA
+/*
+ * IRAM allocator
+ */
#ifdef CONFIG_GENERIC_ALLOCATOR
- case SNDRV_DMA_TYPE_DEV_IRAM:
- snd_free_dev_iram(dmab);
- break;
+static int snd_dma_iram_alloc(struct snd_dma_buffer *dmab, size_t size)
+{
+ struct device *dev = dmab->dev.dev;
+ struct gen_pool *pool;
+
+ if (dev->of_node) {
+ pool = of_gen_pool_get(dev->of_node, "iram", 0);
+ /* Assign the pool into private_data field */
+ dmab->private_data = pool;
+
+ dmab->area = gen_pool_dma_alloc_align(pool, size, &dmab->addr,
+ PAGE_SIZE);
+ if (dmab->area)
+ return 0;
+ }
+
+ /* Internal memory might have limited size and no enough space,
+ * so if we fail to malloc, try to fetch memory traditionally.
+ */
+ dmab->dev.type = SNDRV_DMA_TYPE_DEV;
+ return __snd_dma_alloc_pages(dmab, size);
+}
+
+static void snd_dma_iram_free(struct snd_dma_buffer *dmab)
+{
+ struct gen_pool *pool = dmab->private_data;
+
+ if (pool && dmab->area)
+ gen_pool_free(pool, (unsigned long)dmab->area, dmab->bytes);
+}
+
+static const struct snd_malloc_ops snd_dma_iram_ops = {
+ .alloc = snd_dma_iram_alloc,
+ .free = snd_dma_iram_free,
+};
#endif /* CONFIG_GENERIC_ALLOCATOR */
- case SNDRV_DMA_TYPE_DEV:
- case SNDRV_DMA_TYPE_DEV_UC:
- snd_free_dev_pages(dmab);
- break;
+
+/*
+ * Coherent device pages allocator
+ */
+static int snd_dma_dev_alloc(struct snd_dma_buffer *dmab, size_t size)
+{
+ gfp_t gfp_flags;
+
+ gfp_flags = GFP_KERNEL
+ | __GFP_COMP /* compound page lets parts be mapped */
+ | __GFP_NORETRY /* don't trigger OOM-killer */
+ | __GFP_NOWARN; /* no stack trace print - this call is non-critical */
+ dmab->area = dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr,
+ gfp_flags);
+#ifdef CONFIG_X86
+ if (dmab->area && dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC)
+ set_memory_wc((unsigned long)dmab->area,
+ PAGE_ALIGN(size) >> PAGE_SHIFT);
#endif
+ return 0;
+}
+
+static void snd_dma_dev_free(struct snd_dma_buffer *dmab)
+{
+#ifdef CONFIG_X86
+ if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC)
+ set_memory_wb((unsigned long)dmab->area,
+ PAGE_ALIGN(dmab->bytes) >> PAGE_SHIFT);
+#endif
+ dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
+}
+
+static const struct snd_malloc_ops snd_dma_dev_ops = {
+ .alloc = snd_dma_dev_alloc,
+ .free = snd_dma_dev_free,
+};
+#endif /* CONFIG_HAS_DMA */
+
+/*
+ * Entry points
+ */
+static const struct snd_malloc_ops *dma_ops[] = {
+ [SNDRV_DMA_TYPE_CONTINUOUS] = &snd_dma_continuous_ops,
+ [SNDRV_DMA_TYPE_VMALLOC] = &snd_dma_vmalloc_ops,
+#ifdef CONFIG_HAS_DMA
+ [SNDRV_DMA_TYPE_DEV] = &snd_dma_dev_ops,
+ [SNDRV_DMA_TYPE_DEV_UC] = &snd_dma_dev_ops,
+#ifdef CONFIG_GENERIC_ALLOCATOR
+ [SNDRV_DMA_TYPE_DEV_IRAM] = &snd_dma_iram_ops,
+#endif /* CONFIG_GENERIC_ALLOCATOR */
+#endif /* CONFIG_HAS_DMA */
#ifdef CONFIG_SND_DMA_SGBUF
- case SNDRV_DMA_TYPE_DEV_SG:
- case SNDRV_DMA_TYPE_DEV_UC_SG:
- snd_free_sgbuf_pages(dmab);
- break;
+ [SNDRV_DMA_TYPE_DEV_SG] = &snd_dma_sg_ops,
+ [SNDRV_DMA_TYPE_DEV_UC_SG] = &snd_dma_sg_ops,
#endif
- default:
- pr_err("snd-malloc: invalid device type %d\n", dmab->dev.type);
- }
+};
+
+static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab)
+{
+ if (WARN_ON_ONCE(dmab->dev.type <= SNDRV_DMA_TYPE_UNKNOWN ||
+ dmab->dev.type >= ARRAY_SIZE(dma_ops)))
+ return NULL;
+ return dma_ops[dmab->dev.type];
}
-EXPORT_SYMBOL(snd_dma_free_pages);