summaryrefslogtreecommitdiff
path: root/sound/core/memalloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'sound/core/memalloc.c')
-rw-r--r--sound/core/memalloc.c1179
1 files changed, 772 insertions, 407 deletions
diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c
index bdf826f4fe0c..b3853583d2ae 100644
--- a/sound/core/memalloc.c
+++ b/sound/core/memalloc.c
@@ -1,175 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
* Takashi Iwai <tiwai@suse.de>
*
* Generic memory allocators
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
-#include <linux/module.h>
-#include <linux/proc_fs.h>
-#include <linux/init.h>
-#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/mm.h>
-#include <linux/seq_file.h>
-#include <asm/uaccess.h>
#include <linux/dma-mapping.h>
-#include <linux/moduleparam.h>
-#include <linux/mutex.h>
+#include <linux/dma-map-ops.h>
+#include <linux/genalloc.h>
+#include <linux/highmem.h>
+#include <linux/vmalloc.h>
+#ifdef CONFIG_X86
+#include <asm/set_memory.h>
+#endif
#include <sound/memalloc.h>
-
-MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>, Jaroslav Kysela <perex@perex.cz>");
-MODULE_DESCRIPTION("Memory allocator for ALSA system.");
-MODULE_LICENSE("GPL");
-
-
-/*
- */
-
-static DEFINE_MUTEX(list_mutex);
-static LIST_HEAD(mem_list_head);
-
-/* buffer preservation list */
-struct snd_mem_list {
- struct snd_dma_buffer buffer;
- unsigned int id;
- struct list_head list;
+struct snd_malloc_ops {
+ void *(*alloc)(struct snd_dma_buffer *dmab, size_t size);
+ void (*free)(struct snd_dma_buffer *dmab);
+ dma_addr_t (*get_addr)(struct snd_dma_buffer *dmab, size_t offset);
+ struct page *(*get_page)(struct snd_dma_buffer *dmab, size_t offset);
+ unsigned int (*get_chunk_size)(struct snd_dma_buffer *dmab,
+ unsigned int ofs, unsigned int size);
+ int (*mmap)(struct snd_dma_buffer *dmab, struct vm_area_struct *area);
+ void (*sync)(struct snd_dma_buffer *dmab, enum snd_dma_sync_mode mode);
};
-/* id for pre-allocated buffers */
-#define SNDRV_DMA_DEVICE_UNUSED (unsigned int)-1
-
-/*
- *
- * Generic memory allocators
- *
- */
-
-static long snd_allocated_pages; /* holding the number of allocated pages */
+#define DEFAULT_GFP \
+ (GFP_KERNEL | \
+ __GFP_RETRY_MAYFAIL | /* don't trigger OOM-killer */ \
+ __GFP_NOWARN) /* no stack trace print - this call is non-critical */
-static inline void inc_snd_pages(int order)
-{
- snd_allocated_pages += 1 << order;
-}
-
-static inline void dec_snd_pages(int order)
-{
- snd_allocated_pages -= 1 << order;
-}
+static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab);
-/**
- * snd_malloc_pages - allocate pages with the given size
- * @size: the size to allocate in bytes
- * @gfp_flags: the allocation conditions, GFP_XXX
- *
- * Allocates the physically contiguous pages with the given size.
- *
- * Return: The pointer of the buffer, or %NULL if no enough memory.
- */
-void *snd_malloc_pages(size_t size, gfp_t gfp_flags)
+static void *__snd_dma_alloc_pages(struct snd_dma_buffer *dmab, size_t size)
{
- int pg;
- void *res;
+ const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
- if (WARN_ON(!size))
+ if (WARN_ON_ONCE(!ops || !ops->alloc))
return NULL;
- if (WARN_ON(!gfp_flags))
- return NULL;
- gfp_flags |= __GFP_COMP; /* compound page lets parts be mapped */
- pg = get_order(size);
- if ((res = (void *) __get_free_pages(gfp_flags, pg)) != NULL)
- inc_snd_pages(pg);
- return res;
+ return ops->alloc(dmab, size);
}
/**
- * snd_free_pages - release the pages
- * @ptr: the buffer pointer to release
- * @size: the allocated buffer size
- *
- * Releases the buffer allocated via snd_malloc_pages().
- */
-void snd_free_pages(void *ptr, size_t size)
-{
- int pg;
-
- if (ptr == NULL)
- return;
- pg = get_order(size);
- dec_snd_pages(pg);
- free_pages((unsigned long) ptr, pg);
-}
-
-/*
- *
- * Bus-specific memory allocators
- *
- */
-
-#ifdef CONFIG_HAS_DMA
-/* allocate the coherent DMA pages */
-static void *snd_malloc_dev_pages(struct device *dev, size_t size, dma_addr_t *dma)
-{
- int pg;
- void *res;
- gfp_t gfp_flags;
-
- if (WARN_ON(!dma))
- return NULL;
- pg = get_order(size);
- gfp_flags = GFP_KERNEL
- | __GFP_COMP /* compound page lets parts be mapped */
- | __GFP_NORETRY /* don't trigger OOM-killer */
- | __GFP_NOWARN; /* no stack trace print - this call is non-critical */
- res = dma_alloc_coherent(dev, PAGE_SIZE << pg, dma, gfp_flags);
- if (res != NULL)
- inc_snd_pages(pg);
-
- return res;
-}
-
-/* free the coherent DMA pages */
-static void snd_free_dev_pages(struct device *dev, size_t size, void *ptr,
- dma_addr_t dma)
-{
- int pg;
-
- if (ptr == NULL)
- return;
- pg = get_order(size);
- dec_snd_pages(pg);
- dma_free_coherent(dev, PAGE_SIZE << pg, ptr, dma);
-}
-#endif /* CONFIG_HAS_DMA */
-
-/*
- *
- * ALSA generic memory management
- *
- */
-
-
-/**
- * snd_dma_alloc_pages - allocate the buffer area according to the given type
+ * snd_dma_alloc_dir_pages - allocate the buffer area according to the given
+ * type and direction
* @type: the DMA buffer type
* @device: the device pointer
+ * @dir: DMA direction
* @size: the buffer size to allocate
* @dmab: buffer allocation record to store the allocated data
*
@@ -179,44 +60,29 @@ static void snd_free_dev_pages(struct device *dev, size_t size, void *ptr,
* Return: Zero if the buffer with the given size is allocated successfully,
* otherwise a negative value on error.
*/
-int snd_dma_alloc_pages(int type, struct device *device, size_t size,
- struct snd_dma_buffer *dmab)
+int snd_dma_alloc_dir_pages(int type, struct device *device,
+ enum dma_data_direction dir, size_t size,
+ struct snd_dma_buffer *dmab)
{
if (WARN_ON(!size))
return -ENXIO;
if (WARN_ON(!dmab))
return -ENXIO;
+ size = PAGE_ALIGN(size);
dmab->dev.type = type;
dmab->dev.dev = device;
+ dmab->dev.dir = dir;
dmab->bytes = 0;
- switch (type) {
- case SNDRV_DMA_TYPE_CONTINUOUS:
- dmab->area = snd_malloc_pages(size,
- (__force gfp_t)(unsigned long)device);
- dmab->addr = 0;
- break;
-#ifdef CONFIG_HAS_DMA
- case SNDRV_DMA_TYPE_DEV:
- dmab->area = snd_malloc_dev_pages(device, size, &dmab->addr);
- break;
-#endif
-#ifdef CONFIG_SND_DMA_SGBUF
- case SNDRV_DMA_TYPE_DEV_SG:
- snd_malloc_sgbuf_pages(device, size, dmab, NULL);
- break;
-#endif
- default:
- printk(KERN_ERR "snd-malloc: invalid device type %d\n", type);
- dmab->area = NULL;
- dmab->addr = 0;
- return -ENXIO;
- }
- if (! dmab->area)
+ dmab->addr = 0;
+ dmab->private_data = NULL;
+ dmab->area = __snd_dma_alloc_pages(dmab, size);
+ if (!dmab->area)
return -ENOMEM;
dmab->bytes = size;
return 0;
}
+EXPORT_SYMBOL(snd_dma_alloc_dir_pages);
/**
* snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback
@@ -239,22 +105,18 @@ int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size,
int err;
while ((err = snd_dma_alloc_pages(type, device, size, dmab)) < 0) {
- size_t aligned_size;
if (err != -ENOMEM)
return err;
if (size <= PAGE_SIZE)
return -ENOMEM;
- aligned_size = PAGE_SIZE << get_order(size);
- if (size != aligned_size)
- size = aligned_size;
- else
- size >>= 1;
+ size >>= 1;
+ size = PAGE_SIZE << get_order(size);
}
if (! dmab->area)
return -ENOMEM;
return 0;
}
-
+EXPORT_SYMBOL(snd_dma_alloc_pages_fallback);
/**
* snd_dma_free_pages - release the allocated buffer
@@ -264,284 +126,787 @@ int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size,
*/
void snd_dma_free_pages(struct snd_dma_buffer *dmab)
{
- switch (dmab->dev.type) {
- case SNDRV_DMA_TYPE_CONTINUOUS:
- snd_free_pages(dmab->area, dmab->bytes);
- break;
-#ifdef CONFIG_HAS_DMA
- case SNDRV_DMA_TYPE_DEV:
- snd_free_dev_pages(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
- break;
-#endif
-#ifdef CONFIG_SND_DMA_SGBUF
- case SNDRV_DMA_TYPE_DEV_SG:
- snd_free_sgbuf_pages(dmab);
- break;
-#endif
- default:
- printk(KERN_ERR "snd-malloc: invalid device type %d\n", dmab->dev.type);
- }
+ const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
+
+ if (ops && ops->free)
+ ops->free(dmab);
}
+EXPORT_SYMBOL(snd_dma_free_pages);
+/* called by devres */
+static void __snd_release_pages(struct device *dev, void *res)
+{
+ snd_dma_free_pages(res);
+}
/**
- * snd_dma_get_reserved - get the reserved buffer for the given device
- * @dmab: the buffer allocation record to store
- * @id: the buffer id
+ * snd_devm_alloc_dir_pages - allocate the buffer and manage with devres
+ * @dev: the device pointer
+ * @type: the DMA buffer type
+ * @dir: DMA direction
+ * @size: the buffer size to allocate
+ *
+ * Allocate buffer pages depending on the given type and manage using devres.
+ * The pages will be released automatically at the device removal.
*
- * Looks for the reserved-buffer list and re-uses if the same buffer
- * is found in the list. When the buffer is found, it's removed from the free list.
+ * Unlike snd_dma_alloc_pages(), this function requires the real device pointer,
+ * hence it can't work with SNDRV_DMA_TYPE_CONTINUOUS or
+ * SNDRV_DMA_TYPE_VMALLOC type.
*
- * Return: The size of buffer if the buffer is found, or zero if not found.
+ * Return: the snd_dma_buffer object at success, or NULL if failed
*/
-size_t snd_dma_get_reserved_buf(struct snd_dma_buffer *dmab, unsigned int id)
+struct snd_dma_buffer *
+snd_devm_alloc_dir_pages(struct device *dev, int type,
+ enum dma_data_direction dir, size_t size)
{
- struct snd_mem_list *mem;
+ struct snd_dma_buffer *dmab;
+ int err;
- if (WARN_ON(!dmab))
- return 0;
+ if (WARN_ON(type == SNDRV_DMA_TYPE_CONTINUOUS ||
+ type == SNDRV_DMA_TYPE_VMALLOC))
+ return NULL;
- mutex_lock(&list_mutex);
- list_for_each_entry(mem, &mem_list_head, list) {
- if (mem->id == id &&
- (mem->buffer.dev.dev == NULL || dmab->dev.dev == NULL ||
- ! memcmp(&mem->buffer.dev, &dmab->dev, sizeof(dmab->dev)))) {
- struct device *dev = dmab->dev.dev;
- list_del(&mem->list);
- *dmab = mem->buffer;
- if (dmab->dev.dev == NULL)
- dmab->dev.dev = dev;
- kfree(mem);
- mutex_unlock(&list_mutex);
- return dmab->bytes;
- }
+ dmab = devres_alloc(__snd_release_pages, sizeof(*dmab), GFP_KERNEL);
+ if (!dmab)
+ return NULL;
+
+ err = snd_dma_alloc_dir_pages(type, dev, dir, size, dmab);
+ if (err < 0) {
+ devres_free(dmab);
+ return NULL;
}
- mutex_unlock(&list_mutex);
- return 0;
+
+ devres_add(dev, dmab);
+ return dmab;
}
+EXPORT_SYMBOL_GPL(snd_devm_alloc_dir_pages);
/**
- * snd_dma_reserve_buf - reserve the buffer
- * @dmab: the buffer to reserve
- * @id: the buffer id
+ * snd_dma_buffer_mmap - perform mmap of the given DMA buffer
+ * @dmab: buffer allocation information
+ * @area: VM area information
*
- * Reserves the given buffer as a reserved buffer.
+ * Return: zero if successful, or a negative error code
+ */
+int snd_dma_buffer_mmap(struct snd_dma_buffer *dmab,
+ struct vm_area_struct *area)
+{
+ const struct snd_malloc_ops *ops;
+
+ if (!dmab)
+ return -ENOENT;
+ ops = snd_dma_get_ops(dmab);
+ if (ops && ops->mmap)
+ return ops->mmap(dmab, area);
+ else
+ return -ENOENT;
+}
+EXPORT_SYMBOL(snd_dma_buffer_mmap);
+
+#ifdef CONFIG_HAS_DMA
+/**
+ * snd_dma_buffer_sync - sync DMA buffer between CPU and device
+ * @dmab: buffer allocation information
+ * @mode: sync mode
+ */
+void snd_dma_buffer_sync(struct snd_dma_buffer *dmab,
+ enum snd_dma_sync_mode mode)
+{
+ const struct snd_malloc_ops *ops;
+
+ if (!dmab || !dmab->dev.need_sync)
+ return;
+ ops = snd_dma_get_ops(dmab);
+ if (ops && ops->sync)
+ ops->sync(dmab, mode);
+}
+EXPORT_SYMBOL_GPL(snd_dma_buffer_sync);
+#endif /* CONFIG_HAS_DMA */
+
+/**
+ * snd_sgbuf_get_addr - return the physical address at the corresponding offset
+ * @dmab: buffer allocation information
+ * @offset: offset in the ring buffer
*
- * Return: Zero if successful, or a negative code on error.
+ * Return: the physical address
*/
-int snd_dma_reserve_buf(struct snd_dma_buffer *dmab, unsigned int id)
+dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab, size_t offset)
{
- struct snd_mem_list *mem;
+ const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
- if (WARN_ON(!dmab))
- return -EINVAL;
- mem = kmalloc(sizeof(*mem), GFP_KERNEL);
- if (! mem)
- return -ENOMEM;
- mutex_lock(&list_mutex);
- mem->buffer = *dmab;
- mem->id = id;
- list_add_tail(&mem->list, &mem_list_head);
- mutex_unlock(&list_mutex);
- return 0;
+ if (ops && ops->get_addr)
+ return ops->get_addr(dmab, offset);
+ else
+ return dmab->addr + offset;
+}
+EXPORT_SYMBOL(snd_sgbuf_get_addr);
+
+/**
+ * snd_sgbuf_get_page - return the physical page at the corresponding offset
+ * @dmab: buffer allocation information
+ * @offset: offset in the ring buffer
+ *
+ * Return: the page pointer
+ */
+struct page *snd_sgbuf_get_page(struct snd_dma_buffer *dmab, size_t offset)
+{
+ const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
+
+ if (ops && ops->get_page)
+ return ops->get_page(dmab, offset);
+ else
+ return virt_to_page(dmab->area + offset);
+}
+EXPORT_SYMBOL(snd_sgbuf_get_page);
+
+/**
+ * snd_sgbuf_get_chunk_size - compute the max chunk size with continuous pages
+ * on sg-buffer
+ * @dmab: buffer allocation information
+ * @ofs: offset in the ring buffer
+ * @size: the requested size
+ *
+ * Return: the chunk size
+ */
+unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab,
+ unsigned int ofs, unsigned int size)
+{
+ const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
+
+ if (ops && ops->get_chunk_size)
+ return ops->get_chunk_size(dmab, ofs, size);
+ else
+ return size;
+}
+EXPORT_SYMBOL(snd_sgbuf_get_chunk_size);
+
+/*
+ * Continuous pages allocator
+ */
+static void *do_alloc_pages(struct device *dev, size_t size, dma_addr_t *addr,
+ bool wc)
+{
+ void *p;
+ gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
+
+ again:
+ p = alloc_pages_exact(size, gfp);
+ if (!p)
+ return NULL;
+ *addr = page_to_phys(virt_to_page(p));
+ if (!dev)
+ return p;
+ if ((*addr + size - 1) & ~dev->coherent_dma_mask) {
+ if (IS_ENABLED(CONFIG_ZONE_DMA32) && !(gfp & GFP_DMA32)) {
+ gfp |= GFP_DMA32;
+ goto again;
+ }
+ if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) {
+ gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
+ goto again;
+ }
+ }
+#ifdef CONFIG_X86
+ if (wc)
+ set_memory_wc((unsigned long)(p), size >> PAGE_SHIFT);
+#endif
+ return p;
+}
+
+static void do_free_pages(void *p, size_t size, bool wc)
+{
+#ifdef CONFIG_X86
+ if (wc)
+ set_memory_wb((unsigned long)(p), size >> PAGE_SHIFT);
+#endif
+ free_pages_exact(p, size);
+}
+
+
+static void *snd_dma_continuous_alloc(struct snd_dma_buffer *dmab, size_t size)
+{
+ return do_alloc_pages(dmab->dev.dev, size, &dmab->addr, false);
+}
+
+static void snd_dma_continuous_free(struct snd_dma_buffer *dmab)
+{
+ do_free_pages(dmab->area, dmab->bytes, false);
+}
+
+static int snd_dma_continuous_mmap(struct snd_dma_buffer *dmab,
+ struct vm_area_struct *area)
+{
+ return remap_pfn_range(area, area->vm_start,
+ dmab->addr >> PAGE_SHIFT,
+ area->vm_end - area->vm_start,
+ area->vm_page_prot);
+}
+
+static const struct snd_malloc_ops snd_dma_continuous_ops = {
+ .alloc = snd_dma_continuous_alloc,
+ .free = snd_dma_continuous_free,
+ .mmap = snd_dma_continuous_mmap,
+};
+
+/*
+ * VMALLOC allocator
+ */
+static void *snd_dma_vmalloc_alloc(struct snd_dma_buffer *dmab, size_t size)
+{
+ return vmalloc(size);
+}
+
+static void snd_dma_vmalloc_free(struct snd_dma_buffer *dmab)
+{
+ vfree(dmab->area);
+}
+
+static int snd_dma_vmalloc_mmap(struct snd_dma_buffer *dmab,
+ struct vm_area_struct *area)
+{
+ return remap_vmalloc_range(area, dmab->area, 0);
+}
+
+#define get_vmalloc_page_addr(dmab, offset) \
+ page_to_phys(vmalloc_to_page((dmab)->area + (offset)))
+
+static dma_addr_t snd_dma_vmalloc_get_addr(struct snd_dma_buffer *dmab,
+ size_t offset)
+{
+ return get_vmalloc_page_addr(dmab, offset) + offset % PAGE_SIZE;
+}
+
+static struct page *snd_dma_vmalloc_get_page(struct snd_dma_buffer *dmab,
+ size_t offset)
+{
+ return vmalloc_to_page(dmab->area + offset);
+}
+
+static unsigned int
+snd_dma_vmalloc_get_chunk_size(struct snd_dma_buffer *dmab,
+ unsigned int ofs, unsigned int size)
+{
+ unsigned int start, end;
+ unsigned long addr;
+
+ start = ALIGN_DOWN(ofs, PAGE_SIZE);
+ end = ofs + size - 1; /* the last byte address */
+ /* check page continuity */
+ addr = get_vmalloc_page_addr(dmab, start);
+ for (;;) {
+ start += PAGE_SIZE;
+ if (start > end)
+ break;
+ addr += PAGE_SIZE;
+ if (get_vmalloc_page_addr(dmab, start) != addr)
+ return start - ofs;
+ }
+ /* ok, all on continuous pages */
+ return size;
}
+static const struct snd_malloc_ops snd_dma_vmalloc_ops = {
+ .alloc = snd_dma_vmalloc_alloc,
+ .free = snd_dma_vmalloc_free,
+ .mmap = snd_dma_vmalloc_mmap,
+ .get_addr = snd_dma_vmalloc_get_addr,
+ .get_page = snd_dma_vmalloc_get_page,
+ .get_chunk_size = snd_dma_vmalloc_get_chunk_size,
+};
+
+#ifdef CONFIG_HAS_DMA
/*
- * purge all reserved buffers
+ * IRAM allocator
*/
-static void free_all_reserved_pages(void)
-{
- struct list_head *p;
- struct snd_mem_list *mem;
-
- mutex_lock(&list_mutex);
- while (! list_empty(&mem_list_head)) {
- p = mem_list_head.next;
- mem = list_entry(p, struct snd_mem_list, list);
- list_del(p);
- snd_dma_free_pages(&mem->buffer);
- kfree(mem);
+#ifdef CONFIG_GENERIC_ALLOCATOR
+static void *snd_dma_iram_alloc(struct snd_dma_buffer *dmab, size_t size)
+{
+ struct device *dev = dmab->dev.dev;
+ struct gen_pool *pool;
+ void *p;
+
+ if (dev->of_node) {
+ pool = of_gen_pool_get(dev->of_node, "iram", 0);
+ /* Assign the pool into private_data field */
+ dmab->private_data = pool;
+
+ p = gen_pool_dma_alloc_align(pool, size, &dmab->addr, PAGE_SIZE);
+ if (p)
+ return p;
}
- mutex_unlock(&list_mutex);
+
+ /* Internal memory might have limited size and no enough space,
+ * so if we fail to malloc, try to fetch memory traditionally.
+ */
+ dmab->dev.type = SNDRV_DMA_TYPE_DEV;
+ return __snd_dma_alloc_pages(dmab, size);
+}
+
+static void snd_dma_iram_free(struct snd_dma_buffer *dmab)
+{
+ struct gen_pool *pool = dmab->private_data;
+
+ if (pool && dmab->area)
+ gen_pool_free(pool, (unsigned long)dmab->area, dmab->bytes);
}
+static int snd_dma_iram_mmap(struct snd_dma_buffer *dmab,
+ struct vm_area_struct *area)
+{
+ area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
+ return remap_pfn_range(area, area->vm_start,
+ dmab->addr >> PAGE_SHIFT,
+ area->vm_end - area->vm_start,
+ area->vm_page_prot);
+}
+
+static const struct snd_malloc_ops snd_dma_iram_ops = {
+ .alloc = snd_dma_iram_alloc,
+ .free = snd_dma_iram_free,
+ .mmap = snd_dma_iram_mmap,
+};
+#endif /* CONFIG_GENERIC_ALLOCATOR */
-#ifdef CONFIG_PROC_FS
/*
- * proc file interface
+ * Coherent device pages allocator
*/
-#define SND_MEM_PROC_FILE "driver/snd-page-alloc"
-static struct proc_dir_entry *snd_mem_proc;
-
-static int snd_mem_proc_read(struct seq_file *seq, void *offset)
-{
- long pages = snd_allocated_pages >> (PAGE_SHIFT-12);
- struct snd_mem_list *mem;
- int devno;
- static char *types[] = { "UNKNOWN", "CONT", "DEV", "DEV-SG" };
-
- mutex_lock(&list_mutex);
- seq_printf(seq, "pages : %li bytes (%li pages per %likB)\n",
- pages * PAGE_SIZE, pages, PAGE_SIZE / 1024);
- devno = 0;
- list_for_each_entry(mem, &mem_list_head, list) {
- devno++;
- seq_printf(seq, "buffer %d : ID %08x : type %s\n",
- devno, mem->id, types[mem->buffer.dev.type]);
- seq_printf(seq, " addr = 0x%lx, size = %d bytes\n",
- (unsigned long)mem->buffer.addr,
- (int)mem->buffer.bytes);
+static void *snd_dma_dev_alloc(struct snd_dma_buffer *dmab, size_t size)
+{
+ return dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP);
+}
+
+static void snd_dma_dev_free(struct snd_dma_buffer *dmab)
+{
+ dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
+}
+
+static int snd_dma_dev_mmap(struct snd_dma_buffer *dmab,
+ struct vm_area_struct *area)
+{
+ return dma_mmap_coherent(dmab->dev.dev, area,
+ dmab->area, dmab->addr, dmab->bytes);
+}
+
+static const struct snd_malloc_ops snd_dma_dev_ops = {
+ .alloc = snd_dma_dev_alloc,
+ .free = snd_dma_dev_free,
+ .mmap = snd_dma_dev_mmap,
+};
+
+/*
+ * Write-combined pages
+ */
+#ifdef CONFIG_SND_DMA_SGBUF
+/* x86-specific allocations */
+static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
+{
+ void *p = do_alloc_pages(dmab->dev.dev, size, &dmab->addr, true);
+
+ if (!p)
+ return NULL;
+ dmab->addr = dma_map_single(dmab->dev.dev, p, size, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dmab->dev.dev, dmab->addr)) {
+ do_free_pages(dmab->area, size, true);
+ return NULL;
}
- mutex_unlock(&list_mutex);
- return 0;
+ return p;
}
-static int snd_mem_proc_open(struct inode *inode, struct file *file)
-{
- return single_open(file, snd_mem_proc_read, NULL);
-}
-
-/* FIXME: for pci only - other bus? */
-#ifdef CONFIG_PCI
-#define gettoken(bufp) strsep(bufp, " \t\n")
-
-static ssize_t snd_mem_proc_write(struct file *file, const char __user * buffer,
- size_t count, loff_t * ppos)
-{
- char buf[128];
- char *token, *p;
-
- if (count > sizeof(buf) - 1)
- return -EINVAL;
- if (copy_from_user(buf, buffer, count))
- return -EFAULT;
- buf[count] = '\0';
-
- p = buf;
- token = gettoken(&p);
- if (! token || *token == '#')
- return count;
- if (strcmp(token, "add") == 0) {
- char *endp;
- int vendor, device, size, buffers;
- long mask;
- int i, alloced;
- struct pci_dev *pci;
-
- if ((token = gettoken(&p)) == NULL ||
- (vendor = simple_strtol(token, NULL, 0)) <= 0 ||
- (token = gettoken(&p)) == NULL ||
- (device = simple_strtol(token, NULL, 0)) <= 0 ||
- (token = gettoken(&p)) == NULL ||
- (mask = simple_strtol(token, NULL, 0)) < 0 ||
- (token = gettoken(&p)) == NULL ||
- (size = memparse(token, &endp)) < 64*1024 ||
- size > 16*1024*1024 /* too big */ ||
- (token = gettoken(&p)) == NULL ||
- (buffers = simple_strtol(token, NULL, 0)) <= 0 ||
- buffers > 4) {
- printk(KERN_ERR "snd-page-alloc: invalid proc write format\n");
- return count;
- }
- vendor &= 0xffff;
- device &= 0xffff;
-
- alloced = 0;
- pci = NULL;
- while ((pci = pci_get_device(vendor, device, pci)) != NULL) {
- if (mask > 0 && mask < 0xffffffff) {
- if (pci_set_dma_mask(pci, mask) < 0 ||
- pci_set_consistent_dma_mask(pci, mask) < 0) {
- printk(KERN_ERR "snd-page-alloc: cannot set DMA mask %lx for pci %04x:%04x\n", mask, vendor, device);
- pci_dev_put(pci);
- return count;
- }
- }
- for (i = 0; i < buffers; i++) {
- struct snd_dma_buffer dmab;
- memset(&dmab, 0, sizeof(dmab));
- if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
- size, &dmab) < 0) {
- printk(KERN_ERR "snd-page-alloc: cannot allocate buffer pages (size = %d)\n", size);
- pci_dev_put(pci);
- return count;
- }
- snd_dma_reserve_buf(&dmab, snd_dma_pci_buf_id(pci));
- }
- alloced++;
+static void snd_dma_wc_free(struct snd_dma_buffer *dmab)
+{
+ dma_unmap_single(dmab->dev.dev, dmab->addr, dmab->bytes,
+ DMA_BIDIRECTIONAL);
+ do_free_pages(dmab->area, dmab->bytes, true);
+}
+
+static int snd_dma_wc_mmap(struct snd_dma_buffer *dmab,
+ struct vm_area_struct *area)
+{
+ area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
+ return dma_mmap_coherent(dmab->dev.dev, area,
+ dmab->area, dmab->addr, dmab->bytes);
+}
+#else
+static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
+{
+ return dma_alloc_wc(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP);
+}
+
+static void snd_dma_wc_free(struct snd_dma_buffer *dmab)
+{
+ dma_free_wc(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
+}
+
+static int snd_dma_wc_mmap(struct snd_dma_buffer *dmab,
+ struct vm_area_struct *area)
+{
+ return dma_mmap_wc(dmab->dev.dev, area,
+ dmab->area, dmab->addr, dmab->bytes);
+}
+#endif
+
+static const struct snd_malloc_ops snd_dma_wc_ops = {
+ .alloc = snd_dma_wc_alloc,
+ .free = snd_dma_wc_free,
+ .mmap = snd_dma_wc_mmap,
+};
+
+/*
+ * Non-contiguous pages allocator
+ */
+static void *snd_dma_noncontig_alloc(struct snd_dma_buffer *dmab, size_t size)
+{
+ struct sg_table *sgt;
+ void *p;
+
+ sgt = dma_alloc_noncontiguous(dmab->dev.dev, size, dmab->dev.dir,
+ DEFAULT_GFP, 0);
+ if (!sgt)
+ return NULL;
+
+ dmab->dev.need_sync = dma_need_sync(dmab->dev.dev,
+ sg_dma_address(sgt->sgl));
+ p = dma_vmap_noncontiguous(dmab->dev.dev, size, sgt);
+ if (p) {
+ dmab->private_data = sgt;
+ /* store the first page address for convenience */
+ dmab->addr = snd_sgbuf_get_addr(dmab, 0);
+ } else {
+ dma_free_noncontiguous(dmab->dev.dev, size, sgt, dmab->dev.dir);
+ }
+ return p;
+}
+
+static void snd_dma_noncontig_free(struct snd_dma_buffer *dmab)
+{
+ dma_vunmap_noncontiguous(dmab->dev.dev, dmab->area);
+ dma_free_noncontiguous(dmab->dev.dev, dmab->bytes, dmab->private_data,
+ dmab->dev.dir);
+}
+
+static int snd_dma_noncontig_mmap(struct snd_dma_buffer *dmab,
+ struct vm_area_struct *area)
+{
+ return dma_mmap_noncontiguous(dmab->dev.dev, area,
+ dmab->bytes, dmab->private_data);
+}
+
+static void snd_dma_noncontig_sync(struct snd_dma_buffer *dmab,
+ enum snd_dma_sync_mode mode)
+{
+ if (mode == SNDRV_DMA_SYNC_CPU) {
+ if (dmab->dev.dir == DMA_TO_DEVICE)
+ return;
+ invalidate_kernel_vmap_range(dmab->area, dmab->bytes);
+ dma_sync_sgtable_for_cpu(dmab->dev.dev, dmab->private_data,
+ dmab->dev.dir);
+ } else {
+ if (dmab->dev.dir == DMA_FROM_DEVICE)
+ return;
+ flush_kernel_vmap_range(dmab->area, dmab->bytes);
+ dma_sync_sgtable_for_device(dmab->dev.dev, dmab->private_data,
+ dmab->dev.dir);
+ }
+}
+
+static inline void snd_dma_noncontig_iter_set(struct snd_dma_buffer *dmab,
+ struct sg_page_iter *piter,
+ size_t offset)
+{
+ struct sg_table *sgt = dmab->private_data;
+
+ __sg_page_iter_start(piter, sgt->sgl, sgt->orig_nents,
+ offset >> PAGE_SHIFT);
+}
+
+static dma_addr_t snd_dma_noncontig_get_addr(struct snd_dma_buffer *dmab,
+ size_t offset)
+{
+ struct sg_dma_page_iter iter;
+
+ snd_dma_noncontig_iter_set(dmab, &iter.base, offset);
+ __sg_page_iter_dma_next(&iter);
+ return sg_page_iter_dma_address(&iter) + offset % PAGE_SIZE;
+}
+
+static struct page *snd_dma_noncontig_get_page(struct snd_dma_buffer *dmab,
+ size_t offset)
+{
+ struct sg_page_iter iter;
+
+ snd_dma_noncontig_iter_set(dmab, &iter, offset);
+ __sg_page_iter_next(&iter);
+ return sg_page_iter_page(&iter);
+}
+
+static unsigned int
+snd_dma_noncontig_get_chunk_size(struct snd_dma_buffer *dmab,
+ unsigned int ofs, unsigned int size)
+{
+ struct sg_dma_page_iter iter;
+ unsigned int start, end;
+ unsigned long addr;
+
+ start = ALIGN_DOWN(ofs, PAGE_SIZE);
+ end = ofs + size - 1; /* the last byte address */
+ snd_dma_noncontig_iter_set(dmab, &iter.base, start);
+ if (!__sg_page_iter_dma_next(&iter))
+ return 0;
+ /* check page continuity */
+ addr = sg_page_iter_dma_address(&iter);
+ for (;;) {
+ start += PAGE_SIZE;
+ if (start > end)
+ break;
+ addr += PAGE_SIZE;
+ if (!__sg_page_iter_dma_next(&iter) ||
+ sg_page_iter_dma_address(&iter) != addr)
+ return start - ofs;
+ }
+ /* ok, all on continuous pages */
+ return size;
+}
+
+static const struct snd_malloc_ops snd_dma_noncontig_ops = {
+ .alloc = snd_dma_noncontig_alloc,
+ .free = snd_dma_noncontig_free,
+ .mmap = snd_dma_noncontig_mmap,
+ .sync = snd_dma_noncontig_sync,
+ .get_addr = snd_dma_noncontig_get_addr,
+ .get_page = snd_dma_noncontig_get_page,
+ .get_chunk_size = snd_dma_noncontig_get_chunk_size,
+};
+
+#ifdef CONFIG_SND_DMA_SGBUF
+/* Fallback SG-buffer allocations for x86 */
+struct snd_dma_sg_fallback {
+ struct sg_table sgt; /* used by get_addr - must be the first item */
+ size_t count;
+ struct page **pages;
+ unsigned int *npages;
+};
+
+static void __snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab,
+ struct snd_dma_sg_fallback *sgbuf)
+{
+ bool wc = dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG;
+ size_t i, size;
+
+ if (sgbuf->pages && sgbuf->npages) {
+ i = 0;
+ while (i < sgbuf->count) {
+ size = sgbuf->npages[i];
+ if (!size)
+ break;
+ do_free_pages(page_address(sgbuf->pages[i]),
+ size << PAGE_SHIFT, wc);
+ i += size;
}
- if (! alloced) {
- for (i = 0; i < buffers; i++) {
- struct snd_dma_buffer dmab;
- memset(&dmab, 0, sizeof(dmab));
- /* FIXME: We can allocate only in ZONE_DMA
- * without a device pointer!
- */
- if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, NULL,
- size, &dmab) < 0) {
- printk(KERN_ERR "snd-page-alloc: cannot allocate buffer pages (size = %d)\n", size);
- break;
- }
- snd_dma_reserve_buf(&dmab, (unsigned int)((vendor << 16) | device));
- }
+ }
+ kvfree(sgbuf->pages);
+ kvfree(sgbuf->npages);
+ kfree(sgbuf);
+}
+
+/* fallback manual S/G buffer allocations */
+static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size)
+{
+ bool wc = dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG;
+ struct snd_dma_sg_fallback *sgbuf;
+ struct page **pagep, *curp;
+ size_t chunk;
+ dma_addr_t addr;
+ unsigned int idx, npages;
+ void *p;
+
+ sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
+ if (!sgbuf)
+ return NULL;
+ size = PAGE_ALIGN(size);
+ sgbuf->count = size >> PAGE_SHIFT;
+ sgbuf->pages = kvcalloc(sgbuf->count, sizeof(*sgbuf->pages), GFP_KERNEL);
+ sgbuf->npages = kvcalloc(sgbuf->count, sizeof(*sgbuf->npages), GFP_KERNEL);
+ if (!sgbuf->pages || !sgbuf->npages)
+ goto error;
+
+ pagep = sgbuf->pages;
+ chunk = size;
+ idx = 0;
+ while (size > 0) {
+ chunk = min(size, chunk);
+ p = do_alloc_pages(dmab->dev.dev, chunk, &addr, wc);
+ if (!p) {
+ if (chunk <= PAGE_SIZE)
+ goto error;
+ chunk >>= 1;
+ chunk = PAGE_SIZE << get_order(chunk);
+ continue;
}
- } else if (strcmp(token, "erase") == 0)
- /* FIXME: need for releasing each buffer chunk? */
- free_all_reserved_pages();
+
+ size -= chunk;
+ /* fill pages */
+ npages = chunk >> PAGE_SHIFT;
+ sgbuf->npages[idx] = npages;
+ idx += npages;
+ curp = virt_to_page(p);
+ while (npages--)
+ *pagep++ = curp++;
+ }
+
+ if (sg_alloc_table_from_pages(&sgbuf->sgt, sgbuf->pages, sgbuf->count,
+ 0, sgbuf->count << PAGE_SHIFT, GFP_KERNEL))
+ goto error;
+
+ if (dma_map_sgtable(dmab->dev.dev, &sgbuf->sgt, DMA_BIDIRECTIONAL, 0))
+ goto error_dma_map;
+
+ p = vmap(sgbuf->pages, sgbuf->count, VM_MAP, PAGE_KERNEL);
+ if (!p)
+ goto error_vmap;
+
+ dmab->private_data = sgbuf;
+ /* store the first page address for convenience */
+ dmab->addr = snd_sgbuf_get_addr(dmab, 0);
+ return p;
+
+ error_vmap:
+ dma_unmap_sgtable(dmab->dev.dev, &sgbuf->sgt, DMA_BIDIRECTIONAL, 0);
+ error_dma_map:
+ sg_free_table(&sgbuf->sgt);
+ error:
+ __snd_dma_sg_fallback_free(dmab, sgbuf);
+ return NULL;
+}
+
+static void snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab)
+{
+ struct snd_dma_sg_fallback *sgbuf = dmab->private_data;
+
+ vunmap(dmab->area);
+ dma_unmap_sgtable(dmab->dev.dev, &sgbuf->sgt, DMA_BIDIRECTIONAL, 0);
+ sg_free_table(&sgbuf->sgt);
+ __snd_dma_sg_fallback_free(dmab, dmab->private_data);
+}
+
+static int snd_dma_sg_fallback_mmap(struct snd_dma_buffer *dmab,
+ struct vm_area_struct *area)
+{
+ struct snd_dma_sg_fallback *sgbuf = dmab->private_data;
+
+ if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG)
+ area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
+ return vm_map_pages(area, sgbuf->pages, sgbuf->count);
+}
+
+static void *snd_dma_sg_alloc(struct snd_dma_buffer *dmab, size_t size)
+{
+ int type = dmab->dev.type;
+ void *p;
+
+ /* try the standard DMA API allocation at first */
+ if (type == SNDRV_DMA_TYPE_DEV_WC_SG)
+ dmab->dev.type = SNDRV_DMA_TYPE_DEV_WC;
else
- printk(KERN_ERR "snd-page-alloc: invalid proc cmd\n");
- return count;
+ dmab->dev.type = SNDRV_DMA_TYPE_DEV;
+ p = __snd_dma_alloc_pages(dmab, size);
+ if (p)
+ return p;
+
+ dmab->dev.type = type; /* restore the type */
+ return snd_dma_sg_fallback_alloc(dmab, size);
}
-#endif /* CONFIG_PCI */
-static const struct file_operations snd_mem_proc_fops = {
- .owner = THIS_MODULE,
- .open = snd_mem_proc_open,
- .read = seq_read,
-#ifdef CONFIG_PCI
- .write = snd_mem_proc_write,
-#endif
- .llseek = seq_lseek,
- .release = single_release,
+static const struct snd_malloc_ops snd_dma_sg_ops = {
+ .alloc = snd_dma_sg_alloc,
+ .free = snd_dma_sg_fallback_free,
+ .mmap = snd_dma_sg_fallback_mmap,
+ /* reuse noncontig helper */
+ .get_addr = snd_dma_noncontig_get_addr,
+ /* reuse vmalloc helpers */
+ .get_page = snd_dma_vmalloc_get_page,
+ .get_chunk_size = snd_dma_vmalloc_get_chunk_size,
};
-
-#endif /* CONFIG_PROC_FS */
+#endif /* CONFIG_SND_DMA_SGBUF */
/*
- * module entry
+ * Non-coherent pages allocator
*/
+static void *snd_dma_noncoherent_alloc(struct snd_dma_buffer *dmab, size_t size)
+{
+ void *p;
+
+ p = dma_alloc_noncoherent(dmab->dev.dev, size, &dmab->addr,
+ dmab->dev.dir, DEFAULT_GFP);
+ if (p)
+ dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, dmab->addr);
+ return p;
+}
-static int __init snd_mem_init(void)
+static void snd_dma_noncoherent_free(struct snd_dma_buffer *dmab)
{
-#ifdef CONFIG_PROC_FS
- snd_mem_proc = proc_create(SND_MEM_PROC_FILE, 0644, NULL,
- &snd_mem_proc_fops);
-#endif
- return 0;
+ dma_free_noncoherent(dmab->dev.dev, dmab->bytes, dmab->area,
+ dmab->addr, dmab->dev.dir);
}
-static void __exit snd_mem_exit(void)
+static int snd_dma_noncoherent_mmap(struct snd_dma_buffer *dmab,
+ struct vm_area_struct *area)
{
- remove_proc_entry(SND_MEM_PROC_FILE, NULL);
- free_all_reserved_pages();
- if (snd_allocated_pages > 0)
- printk(KERN_ERR "snd-malloc: Memory leak? pages not freed = %li\n", snd_allocated_pages);
+ area->vm_page_prot = vm_get_page_prot(area->vm_flags);
+ return dma_mmap_pages(dmab->dev.dev, area,
+ area->vm_end - area->vm_start,
+ virt_to_page(dmab->area));
}
+static void snd_dma_noncoherent_sync(struct snd_dma_buffer *dmab,
+ enum snd_dma_sync_mode mode)
+{
+ if (mode == SNDRV_DMA_SYNC_CPU) {
+ if (dmab->dev.dir != DMA_TO_DEVICE)
+ dma_sync_single_for_cpu(dmab->dev.dev, dmab->addr,
+ dmab->bytes, dmab->dev.dir);
+ } else {
+ if (dmab->dev.dir != DMA_FROM_DEVICE)
+ dma_sync_single_for_device(dmab->dev.dev, dmab->addr,
+ dmab->bytes, dmab->dev.dir);
+ }
+}
-module_init(snd_mem_init)
-module_exit(snd_mem_exit)
+static const struct snd_malloc_ops snd_dma_noncoherent_ops = {
+ .alloc = snd_dma_noncoherent_alloc,
+ .free = snd_dma_noncoherent_free,
+ .mmap = snd_dma_noncoherent_mmap,
+ .sync = snd_dma_noncoherent_sync,
+};
+#endif /* CONFIG_HAS_DMA */
/*
- * exports
+ * Entry points
*/
-EXPORT_SYMBOL(snd_dma_alloc_pages);
-EXPORT_SYMBOL(snd_dma_alloc_pages_fallback);
-EXPORT_SYMBOL(snd_dma_free_pages);
-
-EXPORT_SYMBOL(snd_dma_get_reserved_buf);
-EXPORT_SYMBOL(snd_dma_reserve_buf);
+static const struct snd_malloc_ops *snd_dma_ops[] = {
+ [SNDRV_DMA_TYPE_CONTINUOUS] = &snd_dma_continuous_ops,
+ [SNDRV_DMA_TYPE_VMALLOC] = &snd_dma_vmalloc_ops,
+#ifdef CONFIG_HAS_DMA
+ [SNDRV_DMA_TYPE_DEV] = &snd_dma_dev_ops,
+ [SNDRV_DMA_TYPE_DEV_WC] = &snd_dma_wc_ops,
+ [SNDRV_DMA_TYPE_NONCONTIG] = &snd_dma_noncontig_ops,
+ [SNDRV_DMA_TYPE_NONCOHERENT] = &snd_dma_noncoherent_ops,
+#ifdef CONFIG_SND_DMA_SGBUF
+ [SNDRV_DMA_TYPE_DEV_SG] = &snd_dma_sg_ops,
+ [SNDRV_DMA_TYPE_DEV_WC_SG] = &snd_dma_sg_ops,
+#endif
+#ifdef CONFIG_GENERIC_ALLOCATOR
+ [SNDRV_DMA_TYPE_DEV_IRAM] = &snd_dma_iram_ops,
+#endif /* CONFIG_GENERIC_ALLOCATOR */
+#endif /* CONFIG_HAS_DMA */
+};
-EXPORT_SYMBOL(snd_malloc_pages);
-EXPORT_SYMBOL(snd_free_pages);
+static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab)
+{
+ if (WARN_ON_ONCE(!dmab))
+ return NULL;
+ if (WARN_ON_ONCE(dmab->dev.type <= SNDRV_DMA_TYPE_UNKNOWN ||
+ dmab->dev.type >= ARRAY_SIZE(snd_dma_ops)))
+ return NULL;
+ return snd_dma_ops[dmab->dev.type];
+}