summaryrefslogtreecommitdiff
path: root/bmm_lib.c
diff options
context:
space:
mode:
Diffstat (limited to 'bmm_lib.c')
-rw-r--r--bmm_lib.c726
1 files changed, 146 insertions, 580 deletions
diff --git a/bmm_lib.c b/bmm_lib.c
index d6a3271..3a202f3 100644
--- a/bmm_lib.c
+++ b/bmm_lib.c
@@ -16,13 +16,14 @@
#include <sys/stat.h>
#include <sys/mman.h>
#include <assert.h>
+#include <errno.h>
#include <fcntl.h>
#include <pthread.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
-#include "bmm_lib_priv.h"
+#include "bmm_drv.h"
#include "bmm_lib.h"
#include "rb.h"
@@ -34,35 +35,15 @@
#define pr_debug(fmt, arg...) do { if (0) fprintf(stderr, fmt, ##arg); } while (0)
#endif
-#define API_FEAT_MIN 0x0000
-#define API_COMP_MAX 0x0001
-#define API_FEAT(x) ((x) >> 16)
-#define API_COMP(x) ((x) & 0xffff)
-#define API_FEAT_GET_DMABUF_FD 0x0001
-#define API_FEAT_FREE_PHYS 0x0002
-#define API_FEAT_KEEP_FORK 0x0003
-
-static unsigned bmm_api;
static int bmm_fd = -1;
+static int bmm_use;
static pthread_mutex_t bmm_mutex = PTHREAD_MUTEX_INITIALIZER;
-static Rb_node virt_rb;
-static Rb_node phys_rb;
-
-struct bmm_virt_buffer;
-
-struct bmm_phys_buffer {
- unsigned long paddr;
- size_t size;
- int attr;
- struct bmm_virt_buffer *virt;
- unsigned attach;
-};
+static Rb_node bmm_virt_rb;
struct bmm_virt_buffer {
void *vaddr;
size_t size;
- unsigned phys_offset;
- struct bmm_phys_buffer *phys;
+ int fd;
};
static int cmp_virt(const void *key, const void *val)
@@ -70,17 +51,7 @@ static int cmp_virt(const void *key, const void *val)
const struct bmm_virt_buffer *vbuf = val;
void *k = (void *)key;
- return (k < vbuf->vaddr) ? -1 :
- (k - vbuf->vaddr < vbuf->size) ? 0 : 1;
-}
-
-static int cmp_phys(const void *key, const void *val)
-{
- const struct bmm_phys_buffer *pbuf = val;
- unsigned long k = (unsigned long)key;
-
- return (k < pbuf->paddr) ? -1 :
- (k - pbuf->paddr < pbuf->size) ? 0 : 1;
+ return (k < vbuf->vaddr) ? -1 : (k - vbuf->vaddr < vbuf->size) ? 0 : 1;
}
static struct bmm_virt_buffer *bmm_buf_find_virt(void *virt)
@@ -89,12 +60,12 @@ static struct bmm_virt_buffer *bmm_buf_find_virt(void *virt)
Rb_node node;
int found = 0;
- node = rb_find_key_n(virt_rb, virt, cmp_virt, &found);
+ node = rb_find_key_n(bmm_virt_rb, virt, cmp_virt, &found);
if (found) {
vbuf = rb_val(node);
- pr_debug("rb: %s(%p) phys=0x%08lx virt=%p size=0x%08zx\n",
- "find_virt", virt, vbuf->phys->paddr, vbuf->vaddr,
+ pr_debug("rb: %s(%p) virt=%p size=0x%08zx\n",
+ "find_virt", virt, vbuf->vaddr,
vbuf->size);
} else {
pr_debug("rb: %s(%p): not found\n",
@@ -104,66 +75,15 @@ static struct bmm_virt_buffer *bmm_buf_find_virt(void *virt)
return vbuf;
}
-static struct bmm_phys_buffer *bmm_buf_find_phys(unsigned long phys)
-{
- struct bmm_phys_buffer *pbuf = NULL;
- Rb_node node;
- int found = 0;
-
- node = rb_find_key_n(phys_rb, (void *)phys, cmp_phys, &found);
- if (found) {
- pbuf = rb_val(node);
-
- pr_debug("rb: %s(0x%08lx) phys=0x%08lx size=0x%08zx\n",
- "find_phys", (unsigned long)phys, pbuf->paddr, pbuf->size);
- } else {
- pr_debug("rb: %s(0x%08lx): not found\n",
- "find_phys", (unsigned long)phys);
- }
-
- return pbuf;
-}
-
-static void bmm_rb_phys_remove(struct bmm_phys_buffer *pbuf)
-{
- Rb_node node;
- int found = 0;
-
- pr_debug("%s: phys=0x%08lx size=0x%08zx\n",
- __FUNCTION__, pbuf->paddr, pbuf->size);
-
- node = rb_find_key_n(phys_rb, (void *)pbuf->paddr, cmp_phys, &found);
- assert(found);
- rb_delete_node(node);
-}
-
-static void bmm_rb_phys_insert(struct bmm_phys_buffer *pbuf)
-{
- Rb_node node;
- int found = 0;
-
- pr_debug("%s: phys=0x%08lx size=0x%08zx\n",
- __FUNCTION__, pbuf->paddr, pbuf->size);
-
- node = rb_find_key_n(phys_rb, (void *)pbuf->paddr, cmp_phys, &found);
- if (found) {
- struct bmm_phys_buffer *f = rb_val(node);
- pr_debug("rb: found: %p\n", f);
- pr_debug(" p0x%08lx s0x%08zx\n", f->paddr, f->size);
- }
- assert(found == 0);
- rb_insert_b(node, pbuf);
-}
-
static void bmm_rb_virt_remove(struct bmm_virt_buffer *vbuf)
{
Rb_node node;
int found = 0;
- pr_debug("%s: phys=0x%08lx virt=%p size=0x%08zx\n",
- __FUNCTION__, vbuf->phys->paddr, vbuf->vaddr, vbuf->size);
+ pr_debug("%s: virt=%p size=0x%08zx\n",
+ __FUNCTION__, vbuf->vaddr, vbuf->size);
- node = rb_find_key_n(virt_rb, vbuf->vaddr, cmp_virt, &found);
+ node = rb_find_key_n(bmm_virt_rb, vbuf->vaddr, cmp_virt, &found);
assert(found);
rb_delete_node(node);
}
@@ -173,583 +93,229 @@ static void bmm_rb_virt_insert(struct bmm_virt_buffer *vbuf)
Rb_node node;
int found = 0;
- pr_debug("%s: phys=0x%08lx virt=%p size=0x%08zx\n",
- __FUNCTION__, vbuf->phys->paddr, vbuf->vaddr, vbuf->size);
+ pr_debug("%s: virt=%p size=0x%08zx\n",
+ __FUNCTION__, vbuf->vaddr, vbuf->size);
- node = rb_find_key_n(virt_rb, vbuf->vaddr, cmp_virt, &found);
+ node = rb_find_key_n(bmm_virt_rb, vbuf->vaddr, cmp_virt, &found);
if (found) {
struct bmm_virt_buffer *f = rb_val(node);
pr_debug("rb: found: %p\n", f);
- pr_debug(" p0x%08lx v%p s0x%08zx\n", f->phys->paddr, f->vaddr, f->size);
+ pr_debug(" v%p s0x%08zx\n", f->vaddr, f->size);
}
assert(found == 0);
rb_insert_b(node, vbuf);
}
-static int bmm_get_api_version(void)
+/**
+ * bmm_dmabuf_alloc - allocate a DMA buffer
+ * @size: size of DMA buffer
+ * @attr: attributes of DMA buffer
+ * @align: requested alignment
+ *
+ * Returns: dma_buf file descriptor referring to buffer
+ */
+int bmm_dmabuf_alloc(unsigned long size, int attr, unsigned align)
{
- ioctl_arg_t io;
-
- /* Get the BMM API version */
- io.input = 0;
- io.arg = 0;
- io.output = 0;
- if (ioctl(bmm_fd, BMM_API_VERSION, &io) == 0 &&
- io.input == 0xdeaddead && io.arg == 0xfacebeef)
- bmm_api = io.output;
- else
- bmm_api = 0;
-
- pr_debug("BMM API version %08x\n", bmm_api);
-
- if (API_FEAT(bmm_api) < API_FEAT_MIN ||
- API_COMP(bmm_api) > API_COMP_MAX)
- return -1;
-
- return 0;
-}
+ struct bmm_dmabuf_alloc arg;
+ int ret;
-int bmm_init(void)
-{
- if (bmm_fd < 0) {
- virt_rb = make_rb();
- phys_rb = make_rb();
- if (!virt_rb || !phys_rb)
- goto err_rb;
+ /* obsolete, only for back-compatible */
+ if (attr & BMM_ATTR_NONBUFFERABLE && attr & BMM_ATTR_NONCACHEABLE)
+ attr = BMM_ATTR_NONCACHED;
- /* attempt to open the BMM driver */
- bmm_fd = open(BMM_DEVICE_FILE, O_RDWR | O_CLOEXEC);
- pr_debug("BMM device fd: %d\n", bmm_fd);
- if (bmm_fd < 0)
- goto err_open;
+ if (!(attr & BMM_ATTR_NONBUFFERABLE) && attr & BMM_ATTR_NONCACHEABLE)
+ attr = BMM_ATTR_WRITECOMBINE;
- if (bmm_get_api_version() < 0)
- goto err_api;
- }
+ arg.size = size;
+ arg.align = align;
+ arg.attr = attr;
- return bmm_fd;
+ ret = ioctl(bmm_fd, BMM_DMABUF_ALLOC, &arg);
- err_api:
- close(bmm_fd);
- bmm_fd = -1;
- err_open:
- err_rb:
- if (phys_rb)
- rb_free_tree(phys_rb);
- if (virt_rb)
- rb_free_tree(virt_rb);
- phys_rb = virt_rb = NULL;
- return bmm_fd;
-}
-
-void bmm_exit(void)
-{
- if (bmm_fd >= 0) {
- close(bmm_fd);
- rb_free_tree(phys_rb);
- rb_free_tree(virt_rb);
- phys_rb = virt_rb = NULL;
- }
- bmm_fd = -1;
+ return ret < 0 ? ret : arg.fd;
}
-void *bmm_malloc_aligned_phys(unsigned long size, int attr, unsigned align,
- unsigned long *paddr)
+/**
+ * bmm_dmabuf_map - map an allcoated DMA buffer
+ * @fd: dma_buf file descriptor
+ * @offset: page aligned offset into DMA buffer
+ * @size: size of buffer to map
+ *
+ * Map the requested buffer into userspace with the requested offset and size.
+ *
+ * Returns: address of mapped buffer, or NULL on error
+ */
+void *bmm_dmabuf_map(int fd, unsigned offset, unsigned size)
{
- struct bmm_phys_buffer *pbuf;
struct bmm_virt_buffer *vbuf;
- int ret;
void *vaddr;
- ioctl_arg_t io;
-
- if(size == 0)
- return NULL;
- if(bmm_init() < 0)
+ vaddr = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, offset);
+ if (vaddr == (void *)-1)
return NULL;
- pbuf = malloc(sizeof(*pbuf));
vbuf = malloc(sizeof(*vbuf));
- if (!pbuf || !vbuf) {
- if (pbuf)
- free(pbuf);
- if (vbuf)
- free(vbuf);
+ if (!vbuf) {
+ munmap(vaddr, size);
return NULL;
}
- pr_debug("%s(size=%lu,attr=%x,align=%u,paddr=%p)\n",
- __FUNCTION__, size, attr, align, paddr);
-
- /* obsolete, only for back-compatible */
- if ((attr & BMM_ATTR_NONBUFFERABLE)&&(attr & BMM_ATTR_NONCACHEABLE))
- attr = BMM_ATTR_NONCACHED;
-
- if ((!(attr & BMM_ATTR_NONBUFFERABLE))&&(attr & BMM_ATTR_NONCACHEABLE))
- attr = BMM_ATTR_WRITECOMBINE;
-
- io.input = align;
- io.length = size;
- io.output = 0;
- io.arg = attr;
- ret = ioctl(bmm_fd, BMM_MALLOC_ALIGNED, &io);
- if (ret < 0 || io.output == 0)
- goto err_free_buf;
-
- pr_debug("%s return paddr = 0x%08lx\n", __FUNCTION__, io.output);
-
- vaddr = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, bmm_fd, io.output);
- if ((int)vaddr == -1)
- goto err_free_bmm;
-
- if (paddr)
- *paddr = io.output;
-
- pbuf->paddr = io.output;
- pbuf->size = size;
- pbuf->attr = attr;
- pbuf->virt = vbuf;
- pbuf->attach = 1;
-
vbuf->vaddr = vaddr;
vbuf->size = size;
- vbuf->phys_offset = 0;
- vbuf->phys = pbuf;
+ vbuf->fd = fd;
pthread_mutex_lock(&bmm_mutex);
- bmm_rb_phys_insert(pbuf);
bmm_rb_virt_insert(vbuf);
pthread_mutex_unlock(&bmm_mutex);
- /*
- * If we keep bmm buffers across fork, they're reference
- * counted, so we can drop the allocation reference here.
- * This also "disowns" this buffer from this thread group.
- */
- if (API_FEAT(bmm_api) >= API_FEAT_KEEP_FORK) {
- io.input = io.output;
- io.output = 0;
- io.arg = 0;
- ioctl(bmm_fd, BMM_FREE_PHYS, &io);
- }
-
return vaddr;
-
- err_free_bmm:
- if (API_FEAT(bmm_api) >= API_FEAT_FREE_PHYS) {
- /* Modern APIs allow us to free this failed allocation */
- io.input = io.output;
- io.output = 0;
- io.arg = 0;
- ioctl(bmm_fd, BMM_FREE_PHYS, &io);
- }
-
- err_free_buf:
- free(pbuf);
- free(vbuf);
- return NULL;
}
-void *bmm_malloc_aligned(unsigned long size, int attr, unsigned align)
-{
- return bmm_malloc_aligned_phys(size, attr, align, NULL);
-}
-
-void *bmm_malloc(unsigned long size, int attr)
-{
- return bmm_malloc_aligned_phys(size, attr, sizeof(int), NULL);
-}
-
-void bmm_free(void *vaddr)
+/*
+ * bmm_dmabuf_unmap - unmap a dma_buf mapped previously with bmm_dmabuf_map
+ * @vaddr: virtual address returned from bmm_dmabuf_map()
+ */
+void bmm_dmabuf_unmap(void *vaddr)
{
- struct bmm_phys_buffer *pbuf = NULL;
struct bmm_virt_buffer *vbuf;
- ioctl_arg_t io;
-
- if (bmm_init() < 0)
- return;
pthread_mutex_lock(&bmm_mutex);
vbuf = bmm_buf_find_virt(vaddr);
- if (vbuf) {
- pbuf = vbuf->phys;
- if (pbuf->virt == vbuf)
- pbuf->virt = NULL;
+ if (vbuf)
bmm_rb_virt_remove(vbuf);
- if (--pbuf->attach == 0)
- bmm_rb_phys_remove(pbuf);
- }
pthread_mutex_unlock(&bmm_mutex);
- assert(vbuf);
- assert(pbuf);
-
- munmap(vbuf->vaddr, vbuf->size);
-
- /*
- * If we keep bmm buffers across fork, they're reference
- * counted, so we don't need to free them on the last munmap.
- */
- if (API_FEAT(bmm_api) < API_FEAT_KEEP_FORK) {
- if (API_FEAT(bmm_api) >= API_FEAT_FREE_PHYS) {
- io.input = pbuf->paddr;
- io.output = 0;
- io.arg = 0;
- ioctl(bmm_fd, BMM_FREE_PHYS, &io);
- } else {
- io.input = (unsigned long)vbuf->vaddr;
- io.output = 0;
- io.arg = 0;
- ioctl(bmm_fd, BMM_FREE, &io);
- }
+ if (vbuf) {
+ munmap(vbuf->vaddr, vbuf->size);
+ free(vbuf);
}
- if (pbuf->attach == 0)
- free(pbuf);
- free(vbuf);
}
-void *bmm_attach(unsigned long paddr, unsigned long len)
+int bmm_dmabuf_flush(int fd, void *addr, unsigned offset, unsigned size, unsigned direction)
{
- struct bmm_phys_buffer *pbuf, *new_pbuf;
struct bmm_virt_buffer *vbuf;
- void *vaddr;
-
- if(len == 0)
- return NULL;
-
- if(bmm_init() < 0)
- return NULL;
-
- /* Try to map it */
- vaddr = mmap(0, len, PROT_READ | PROT_WRITE, MAP_SHARED, bmm_fd, paddr);
- if (vaddr == (void *)-1)
- return NULL;
-
- vbuf = malloc(sizeof(*vbuf));
- new_pbuf = malloc(sizeof(*new_pbuf));
- if (!vbuf || !new_pbuf) {
- if (vbuf)
- free(vbuf);
- if (new_pbuf)
- free(new_pbuf);
- munmap(vaddr, len);
- return NULL;
- }
-
- vbuf->vaddr = vaddr;
- vbuf->size = len;
+ int ret = 0;
pthread_mutex_lock(&bmm_mutex);
- pbuf = bmm_buf_find_phys(paddr);
- if (pbuf) {
- /*
- * If we find the buffer, it means we already know about
- * this buffer. Increment the number of attachments we
- * know about for it, and insert the new virtual buffer.
- */
- pbuf->attach++;
- } else {
- /*
- * Otherwise, we're importing a new buffer which we know
- * nothing about. Create a new pbuf entry.
- */
- new_pbuf->paddr = paddr;
- new_pbuf->size = len;
- new_pbuf->attr = 0;
- new_pbuf->virt = NULL;
- new_pbuf->attach = 1;
- pbuf = new_pbuf;
-
- bmm_rb_phys_insert(new_pbuf);
- }
-
- if (!pbuf->virt)
- pbuf->virt = vbuf;
- vbuf->phys_offset = paddr - pbuf->paddr;
- vbuf->phys = pbuf;
- bmm_rb_virt_insert(vbuf);
-
+ vbuf = bmm_buf_find_virt(addr);
pthread_mutex_unlock(&bmm_mutex);
- /* If the new pbuf wasn't used, free it. */
- if (pbuf != new_pbuf)
- free(new_pbuf);
-
- return vaddr;
-}
-
-void bmm_detach(void *vaddr, unsigned long len)
-{
- struct bmm_virt_buffer *vbuf;
- struct bmm_phys_buffer *pbuf;
-
- if(bmm_init() < 0)
- return;
-
- pthread_mutex_lock(&bmm_mutex);
- vbuf = bmm_buf_find_virt(vaddr);
if (vbuf) {
- pbuf = vbuf->phys;
- assert(pbuf->attach > 0);
+ struct bmm_dmabuf_flush arg;
- if (pbuf->virt == vbuf)
- pbuf->virt = NULL;
+ if (fd != vbuf->fd) {
+ errno = EINVAL;
+ return -1;
+ }
- bmm_rb_virt_remove(vbuf);
+ arg.size = size;
+ arg.offset = offset;
+ arg.ptr = (uint64_t)(uintptr_t)addr;
+ arg.fd = vbuf->fd;
+ arg.direction = direction;
- if (--pbuf->attach == 0)
- bmm_rb_phys_remove(pbuf);
- else
- pbuf = NULL;
- } else {
- pbuf = NULL;
+ ret = ioctl(bmm_fd, BMM_DMABUF_FLUSH, &arg);
}
- pthread_mutex_unlock(&bmm_mutex);
-
- munmap(vbuf->vaddr, vbuf->size);
-
- free(vbuf);
- if (pbuf)
- free(pbuf);
-}
-
-void *bmm_get_vaddr(unsigned long paddr)
-{
- struct bmm_phys_buffer *pbuf;
- void *va = NULL;
-
- if (bmm_init() < 0)
- return 0;
-
- pthread_mutex_lock(&bmm_mutex);
- pbuf = bmm_buf_find_phys(paddr);
- if (pbuf && pbuf->virt)
- va = pbuf->virt->vaddr + (paddr - pbuf->paddr);
- pthread_mutex_unlock(&bmm_mutex);
-
- return va;
+ return ret;
}
-unsigned long bmm_get_paddr(void *vaddr)
+/**
+ * bmm_dmabuf_fd - return the dma_buf file descriptor for a mapped dma_buf
+ * @vaddr: virtual address returned from bmm_dmabuf_map()
+ *
+ * Returns the dma_buf file descriptor for the dma_buf previously mapped
+ * with bmm_dmabuf_map(), or -1 on error.
+ */
+int bmm_dmabuf_fd(void *vaddr)
{
struct bmm_virt_buffer *vbuf;
- unsigned long pa = 0;
-
- if (bmm_init() < 0)
- return 0;
+ int fd = -1;
pthread_mutex_lock(&bmm_mutex);
vbuf = bmm_buf_find_virt(vaddr);
if (vbuf)
- pa = vbuf->phys->paddr + vbuf->phys_offset +
- (vaddr - vbuf->vaddr);
+ fd = vbuf->fd;
pthread_mutex_unlock(&bmm_mutex);
- return pa;
+ return fd;
}
-int bmm_get_dmabuf_fd(void *vaddr)
-{
- int ret;
- ioctl_arg_t io;
-
- if (bmm_init() < 0 || API_FEAT(bmm_api) < API_FEAT_GET_DMABUF_FD)
- return -1;
-
- io.input = (unsigned long)vaddr;
- io.output = 0;
- io.arg = 0;
-
- ret = ioctl(bmm_fd, BMM_GET_DMABUF_FD, &io);
-
- return ret < 0 ? -1 : io.output;
-}
-
-int bmm_get_phys_dmabuf_fd(unsigned long paddr)
+/**
+ * bmm_dmabuf_free - free the dma_buf file descriptor
+ * @fd: dma_buf file descriptor returned by bmm_dmabuf_alloc()
+ *
+ * Closes the file descriptor associated with the DMA buffer. The actual
+ * DMA buffer will only be freed when all references to it are gone - in
+ * other words, after the buffer has been completely unmapped and any
+ * imported references are also released.
+ *
+ * After this call has completed, the file descriptor returned by a call
+ * to bmm_dmabuf_fd() is invalid.
+ */
+void bmm_dmabuf_free(int fd)
{
- int ret;
- ioctl_arg_t io;
-
- if (bmm_init() < 0 || API_FEAT(bmm_api) < API_FEAT_GET_DMABUF_FD)
- return -1;
-
- io.input = paddr;
- io.output = 0;
- io.arg = 0;
-
- ret = ioctl(bmm_fd, BMM_GET_PHYS_DMABUF_FD, &io);
-
- return ret < 0 ? -1 : io.output;
+ close(fd);
}
-unsigned long bmm_get_mem_size(void *vaddr)
+int bmm_init(void)
{
- struct bmm_virt_buffer *vbuf;
- unsigned long size = 0;
-
- if (bmm_init() < 0)
- return 0;
-
pthread_mutex_lock(&bmm_mutex);
- vbuf = bmm_buf_find_virt(vaddr);
- if (vbuf)
- size = vbuf->size;
- pthread_mutex_unlock(&bmm_mutex);
-
- return size;
-}
-
-int bmm_get_mem_attr(void *vaddr)
-{
- struct bmm_virt_buffer *vbuf;
- int attr = 0;
-
- if (bmm_init() < 0)
- return 0;
+ if (bmm_use++ == 0) {
+ bmm_virt_rb = make_rb();
+ if (!bmm_virt_rb)
+ goto err_rb;
- pthread_mutex_lock(&bmm_mutex);
- vbuf = bmm_buf_find_virt(vaddr);
- if (vbuf)
- attr = vbuf->phys->attr;
+ /* attempt to open the BMM driver */
+ bmm_fd = open(BMM_DEVICE_FILE, O_RDWR | O_CLOEXEC);
+ pr_debug("BMM device fd: %d\n", bmm_fd);
+ if (bmm_fd < 0)
+ goto err_open;
+ }
pthread_mutex_unlock(&bmm_mutex);
- return attr;
-}
-
-int bmm_set_mem_attr(void *vaddr, int attr)
-{
- struct bmm_virt_buffer *vbuf;
- int ret;
- ioctl_arg_t io;
-
- if(bmm_init() < 0)
- return 0;
-
- io.input = (unsigned long)vaddr;
- io.output = 0;
- io.arg = attr;
- ret = ioctl(bmm_fd, BMM_SET_MEM_ATTR, &io);
- if(ret < 0)
- return 0;
-
- attr = io.output;
+ return bmm_fd;
- pthread_mutex_lock(&bmm_mutex);
- vbuf = bmm_buf_find_virt(vaddr);
- if (vbuf)
- vbuf->phys->attr = attr;
+ err_open:
+ rb_free_tree(bmm_virt_rb);
+ bmm_virt_rb = NULL;
+ err_rb:
+ bmm_use--;
pthread_mutex_unlock(&bmm_mutex);
-
- return attr;
-}
-
-unsigned long bmm_get_total_space(void)
-{
- int ret;
- ioctl_arg_t io;
-
- if(bmm_init() < 0)
- return 0;
-
- io.input = 0;
- io.output = 0;
- io.arg = 0;
- ret = ioctl(bmm_fd, BMM_GET_TOTAL_SPACE, &io);
- if(ret < 0)
- return 0;
-
- return io.output;
-}
-
-unsigned long bmm_get_free_space(void)
-{
- int ret;
- ioctl_arg_t io;
-
- if(bmm_init() < 0)
- return 0;
-
- io.input = 0;
- io.output = 0;
- io.arg = 0;
- ret = ioctl(bmm_fd, BMM_GET_FREE_SPACE, &io);
- if(ret < 0)
- return 0;
-
- return io.output;
-}
-
-unsigned long bmm_get_allocated_space(void)
-{
- int ret;
- ioctl_arg_t io;
-
- if(bmm_init() < 0)
- return 0;
-
- io.input = 0;
- io.output = 0;
- io.arg = 0;
- ret = ioctl(bmm_fd, BMM_GET_ALLOCATED_SPACE, &io);
- if(ret < 0)
- return 0;
-
- return io.output;
+ return bmm_fd;
}
-void bmm_flush_cache(void *vaddr, int dir)
+void bmm_exit(void)
{
- ioctl_arg_t io;
-
- if(bmm_init() < 0)
- return;
-
- io.input = (unsigned long)vaddr;
- io.output = 0;
- io.arg = dir;
- ioctl(bmm_fd, BMM_FLUSH_CACHE, &io);
-}
+ pthread_mutex_lock(&bmm_mutex);
+ if (bmm_use && --bmm_use == 0) {
+ Rb_node node;
-void bmm_flush_cache_range(void *start, size_t size, int direction)
-{
- ioctl_arg_t io;
+ /* Clean up dangling buffers which the app hasn't released. */
+ rb_traverse(node, bmm_virt_rb) {
+ struct bmm_virt_buffer *vbuf = rb_val(node);
- if(bmm_init() < 0)
- return;
+ munmap(vbuf->vaddr, vbuf->size);
+ close(vbuf->fd);
+ free(vbuf);
+ }
- io.input = (unsigned long)start;
- io.length = size;
- io.output = 0;
- io.arg = direction;
- ioctl(bmm_fd, BMM_CONSISTENT_SYNC, &io);
-}
+ rb_free_tree(bmm_virt_rb);
+ bmm_virt_rb = NULL;
-void bmm_flush_user(void *start, long size, int direction)
-{
- ioctl_arg_t io;
-
- if (bmm_init() < 0)
- return;
- if (size <= 0)
- return;
-
- io.input = (unsigned long)start;
- io.length = size;
- io.output = 0;
- io.arg = direction;
- ioctl(bmm_fd, BMM_SYNC_USER, &io);
+ close(bmm_fd);
+ bmm_fd = -1;
+ }
+ pthread_mutex_unlock(&bmm_mutex);
}
-void bmm_dump(void)
+/*
+ * Destructor for this library: ensure it gets cleaned up when the
+ * library is unloaded.
+ */
+static void __attribute__((destructor)) bmm_destruct(void)
{
- ioctl_arg_t io;
-
- if(bmm_init() < 0)
- return;
-
- io.input = 0;
- io.output = 0;
- io.arg = 0;
- ioctl(bmm_fd, BMM_DUMP, &io);
+ if (bmm_use) {
+ bmm_use = 1;
+ bmm_exit();
+ }
}
-