summaryrefslogtreecommitdiff
path: root/drivers/staging/android/ion
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-05-05 18:16:23 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2017-05-05 18:16:23 -0700
commitc6a677c6f37bb7abc85ba7e3465e82b9f7eb1d91 (patch)
tree9d0d4bb2e150837297cddc5be7f1b4950e9ab228 /drivers/staging/android/ion
parente87d51ac61f88ae44fe14b34abe08566032d726b (diff)
parent11270059e8d0b6f80801fac910c4ef751ca05c4c (diff)
Merge tag 'staging-4.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging
Pull staging/IIO updates from Greg KH: "Here is the big staging tree update for 4.12-rc1. It's a big one, adding about 350k new lines of crap^Wcode, mostly all in a big dump of media drivers from Intel. But there's other new drivers in here as well, yet-another-wifi driver, new IIO drivers, and a new crypto accelerator. We also deleted a bunch of stuff, mostly in patch cleanups, but also the Android ION code has shrunk a lot, and the Android low memory killer driver was finally deleted, much to the celebration of the -mm developers. All of these have been in linux-next with a few build issues that will show up when you merge to your tree" Merge conflicts in the new rtl8723bs driver (due to the wifi changes this merge window) handled as per linux-next, courtesy of Stephen Rothwell. * tag 'staging-4.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging: (1182 commits) staging: fsl-mc/dpio: add cpu <--> LE conversion for dpaa2_fd staging: ks7010: remove line continuations in quoted strings staging: vt6656: use tabs instead of spaces staging: android: ion: Fix unnecessary initialization of static variable staging: media: atomisp: fix range checking on clk_num staging: media: atomisp: fix misspelled word in comment staging: media: atomisp: kmap() can't fail staging: atomisp: remove #ifdef for runtime PM functions staging: atomisp: satm include directory is gone atomisp: remove some more unused files atomisp: remove hmm_load/store/clear indirections atomisp: kill off mmgr_free atomisp: clean up the hmm init/cleanup indirections atomisp: handle allocation calls before init in the hmm layer staging: fsl-dpaa2/eth: Add maintainer for Ethernet driver staging: fsl-dpaa2/eth: Add TODO file staging: fsl-dpaa2/eth: Add trace points staging: fsl-dpaa2/eth: Add driver specific stats staging: fsl-dpaa2/eth: Add ethtool support staging: fsl-dpaa2/eth: Add Freescale DPAA2 Ethernet driver ...
Diffstat (limited to 'drivers/staging/android/ion')
-rw-r--r--drivers/staging/android/ion/Kconfig56
-rw-r--r--drivers/staging/android/ion/Makefile18
-rw-r--r--drivers/staging/android/ion/compat_ion.c195
-rw-r--r--drivers/staging/android/ion/compat_ion.h29
-rw-r--r--drivers/staging/android/ion/hisilicon/Kconfig5
-rw-r--r--drivers/staging/android/ion/hisilicon/Makefile1
-rw-r--r--drivers/staging/android/ion/hisilicon/hi6220_ion.c113
-rw-r--r--drivers/staging/android/ion/ion-ioctl.c85
-rw-r--r--drivers/staging/android/ion/ion.c1168
-rw-r--r--drivers/staging/android/ion/ion.h387
-rw-r--r--drivers/staging/android/ion/ion_carveout_heap.c37
-rw-r--r--drivers/staging/android/ion/ion_chunk_heap.c27
-rw-r--r--drivers/staging/android/ion/ion_cma_heap.c125
-rw-r--r--drivers/staging/android/ion/ion_dummy_driver.c156
-rw-r--r--drivers/staging/android/ion/ion_heap.c68
-rw-r--r--drivers/staging/android/ion/ion_of.c184
-rw-r--r--drivers/staging/android/ion/ion_of.h37
-rw-r--r--drivers/staging/android/ion/ion_page_pool.c6
-rw-r--r--drivers/staging/android/ion/ion_priv.h473
-rw-r--r--drivers/staging/android/ion/ion_system_heap.c53
-rw-r--r--drivers/staging/android/ion/ion_test.c305
-rw-r--r--drivers/staging/android/ion/tegra/Makefile1
-rw-r--r--drivers/staging/android/ion/tegra/tegra_ion.c80
23 files changed, 569 insertions, 3040 deletions
diff --git a/drivers/staging/android/ion/Kconfig b/drivers/staging/android/ion/Kconfig
index c8fb4134c55d..a517b2d29f1b 100644
--- a/drivers/staging/android/ion/Kconfig
+++ b/drivers/staging/android/ion/Kconfig
@@ -10,45 +10,35 @@ menuconfig ION
If you're not using Android its probably safe to
say N here.
-config ION_TEST
- tristate "Ion Test Device"
+config ION_SYSTEM_HEAP
+ bool "Ion system heap"
depends on ION
help
- Choose this option to create a device that can be used to test the
- kernel and device side ION functions.
+ Choose this option to enable the Ion system heap. The system heap
+ is backed by pages from the buddy allocator. If in doubt, say Y.
-config ION_DUMMY
- bool "Dummy Ion driver"
+config ION_CARVEOUT_HEAP
+ bool "Ion carveout heap support"
depends on ION
help
- Provides a dummy ION driver that registers the
- /dev/ion device and some basic heaps. This can
- be used for testing the ION infrastructure if
- one doesn't have access to hardware drivers that
- use ION.
+ Choose this option to enable carveout heaps with Ion. Carveout heaps
+ are backed by memory reserved from the system. Allocation times are
+ typically faster at the cost of memory not being used. Unless you
+ know your system has these regions, you should say N here.
-config ION_TEGRA
- tristate "Ion for Tegra"
- depends on ARCH_TEGRA && ION
+config ION_CHUNK_HEAP
+ bool "Ion chunk heap support"
+ depends on ION
help
- Choose this option if you wish to use ion on an nVidia Tegra.
+ Choose this option to enable chunk heaps with Ion. This heap is
+ similar in function the carveout heap but memory is broken down
+ into smaller chunk sizes, typically corresponding to a TLB size.
+ Unless you know your system has these regions, you should say N here.
-config ION_HISI
- tristate "Ion for Hisilicon"
- depends on ARCH_HISI && ION
- select ION_OF
+config ION_CMA_HEAP
+ bool "Ion CMA heap support"
+ depends on ION && CMA
help
- Choose this option if you wish to use ion on Hisilicon Platform.
-
-source "drivers/staging/android/ion/hisilicon/Kconfig"
-
-config ION_OF
- bool "Devicetree support for Ion"
- depends on ION && OF_ADDRESS
- help
- Provides base support for defining Ion heaps in devicetree
- and setting them up. Also includes functions for platforms
- to parse the devicetree and expand for their own custom
- extensions
-
- If using Ion and devicetree, you should say Y here
+ Choose this option to enable CMA heaps with Ion. This heap is backed
+ by the Contiguous Memory Allocator (CMA). If your system has these
+ regions, you should say Y here.
diff --git a/drivers/staging/android/ion/Makefile b/drivers/staging/android/ion/Makefile
index 5d630a088381..eb7eeed6ae40 100644
--- a/drivers/staging/android/ion/Makefile
+++ b/drivers/staging/android/ion/Makefile
@@ -1,13 +1,5 @@
-obj-$(CONFIG_ION) += ion.o ion-ioctl.o ion_heap.o \
- ion_page_pool.o ion_system_heap.o \
- ion_carveout_heap.o ion_chunk_heap.o ion_cma_heap.o
-obj-$(CONFIG_ION_TEST) += ion_test.o
-ifdef CONFIG_COMPAT
-obj-$(CONFIG_ION) += compat_ion.o
-endif
-
-obj-$(CONFIG_ION_DUMMY) += ion_dummy_driver.o
-obj-$(CONFIG_ION_TEGRA) += tegra/
-obj-$(CONFIG_ION_HISI) += hisilicon/
-obj-$(CONFIG_ION_OF) += ion_of.o
-
+obj-$(CONFIG_ION) += ion.o ion-ioctl.o ion_heap.o
+obj-$(CONFIG_ION_SYSTEM_HEAP) += ion_system_heap.o ion_page_pool.o
+obj-$(CONFIG_ION_CARVEOUT_HEAP) += ion_carveout_heap.o
+obj-$(CONFIG_ION_CHUNK_HEAP) += ion_chunk_heap.o
+obj-$(CONFIG_ION_CMA_HEAP) += ion_cma_heap.o
diff --git a/drivers/staging/android/ion/compat_ion.c b/drivers/staging/android/ion/compat_ion.c
deleted file mode 100644
index 9a978d21785e..000000000000
--- a/drivers/staging/android/ion/compat_ion.c
+++ /dev/null
@@ -1,195 +0,0 @@
-/*
- * drivers/staging/android/ion/compat_ion.c
- *
- * Copyright (C) 2013 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/compat.h>
-#include <linux/fs.h>
-#include <linux/uaccess.h>
-
-#include "ion.h"
-#include "compat_ion.h"
-
-/* See drivers/staging/android/uapi/ion.h for the definition of these structs */
-struct compat_ion_allocation_data {
- compat_size_t len;
- compat_size_t align;
- compat_uint_t heap_id_mask;
- compat_uint_t flags;
- compat_int_t handle;
-};
-
-struct compat_ion_custom_data {
- compat_uint_t cmd;
- compat_ulong_t arg;
-};
-
-struct compat_ion_handle_data {
- compat_int_t handle;
-};
-
-#define COMPAT_ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \
- struct compat_ion_allocation_data)
-#define COMPAT_ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, \
- struct compat_ion_handle_data)
-#define COMPAT_ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, \
- struct compat_ion_custom_data)
-
-static int compat_get_ion_allocation_data(
- struct compat_ion_allocation_data __user *data32,
- struct ion_allocation_data __user *data)
-{
- compat_size_t s;
- compat_uint_t u;
- compat_int_t i;
- int err;
-
- err = get_user(s, &data32->len);
- err |= put_user(s, &data->len);
- err |= get_user(s, &data32->align);
- err |= put_user(s, &data->align);
- err |= get_user(u, &data32->heap_id_mask);
- err |= put_user(u, &data->heap_id_mask);
- err |= get_user(u, &data32->flags);
- err |= put_user(u, &data->flags);
- err |= get_user(i, &data32->handle);
- err |= put_user(i, &data->handle);
-
- return err;
-}
-
-static int compat_get_ion_handle_data(
- struct compat_ion_handle_data __user *data32,
- struct ion_handle_data __user *data)
-{
- compat_int_t i;
- int err;
-
- err = get_user(i, &data32->handle);
- err |= put_user(i, &data->handle);
-
- return err;
-}
-
-static int compat_put_ion_allocation_data(
- struct compat_ion_allocation_data __user *data32,
- struct ion_allocation_data __user *data)
-{
- compat_size_t s;
- compat_uint_t u;
- compat_int_t i;
- int err;
-
- err = get_user(s, &data->len);
- err |= put_user(s, &data32->len);
- err |= get_user(s, &data->align);
- err |= put_user(s, &data32->align);
- err |= get_user(u, &data->heap_id_mask);
- err |= put_user(u, &data32->heap_id_mask);
- err |= get_user(u, &data->flags);
- err |= put_user(u, &data32->flags);
- err |= get_user(i, &data->handle);
- err |= put_user(i, &data32->handle);
-
- return err;
-}
-
-static int compat_get_ion_custom_data(
- struct compat_ion_custom_data __user *data32,
- struct ion_custom_data __user *data)
-{
- compat_uint_t cmd;
- compat_ulong_t arg;
- int err;
-
- err = get_user(cmd, &data32->cmd);
- err |= put_user(cmd, &data->cmd);
- err |= get_user(arg, &data32->arg);
- err |= put_user(arg, &data->arg);
-
- return err;
-};
-
-long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
- long ret;
-
- if (!filp->f_op->unlocked_ioctl)
- return -ENOTTY;
-
- switch (cmd) {
- case COMPAT_ION_IOC_ALLOC:
- {
- struct compat_ion_allocation_data __user *data32;
- struct ion_allocation_data __user *data;
- int err;
-
- data32 = compat_ptr(arg);
- data = compat_alloc_user_space(sizeof(*data));
- if (!data)
- return -EFAULT;
-
- err = compat_get_ion_allocation_data(data32, data);
- if (err)
- return err;
- ret = filp->f_op->unlocked_ioctl(filp, ION_IOC_ALLOC,
- (unsigned long)data);
- err = compat_put_ion_allocation_data(data32, data);
- return ret ? ret : err;
- }
- case COMPAT_ION_IOC_FREE:
- {
- struct compat_ion_handle_data __user *data32;
- struct ion_handle_data __user *data;
- int err;
-
- data32 = compat_ptr(arg);
- data = compat_alloc_user_space(sizeof(*data));
- if (!data)
- return -EFAULT;
-
- err = compat_get_ion_handle_data(data32, data);
- if (err)
- return err;
-
- return filp->f_op->unlocked_ioctl(filp, ION_IOC_FREE,
- (unsigned long)data);
- }
- case COMPAT_ION_IOC_CUSTOM: {
- struct compat_ion_custom_data __user *data32;
- struct ion_custom_data __user *data;
- int err;
-
- data32 = compat_ptr(arg);
- data = compat_alloc_user_space(sizeof(*data));
- if (!data)
- return -EFAULT;
-
- err = compat_get_ion_custom_data(data32, data);
- if (err)
- return err;
-
- return filp->f_op->unlocked_ioctl(filp, ION_IOC_CUSTOM,
- (unsigned long)data);
- }
- case ION_IOC_SHARE:
- case ION_IOC_MAP:
- case ION_IOC_IMPORT:
- case ION_IOC_SYNC:
- return filp->f_op->unlocked_ioctl(filp, cmd,
- (unsigned long)compat_ptr(arg));
- default:
- return -ENOIOCTLCMD;
- }
-}
diff --git a/drivers/staging/android/ion/compat_ion.h b/drivers/staging/android/ion/compat_ion.h
deleted file mode 100644
index 9da8f917670b..000000000000
--- a/drivers/staging/android/ion/compat_ion.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * drivers/staging/android/ion/compat_ion.h
- *
- * Copyright (C) 2013 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _LINUX_COMPAT_ION_H
-#define _LINUX_COMPAT_ION_H
-
-#if IS_ENABLED(CONFIG_COMPAT)
-
-long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
-
-#else
-
-#define compat_ion_ioctl NULL
-
-#endif /* CONFIG_COMPAT */
-#endif /* _LINUX_COMPAT_ION_H */
diff --git a/drivers/staging/android/ion/hisilicon/Kconfig b/drivers/staging/android/ion/hisilicon/Kconfig
deleted file mode 100644
index 2b4bd0798290..000000000000
--- a/drivers/staging/android/ion/hisilicon/Kconfig
+++ /dev/null
@@ -1,5 +0,0 @@
-config HI6220_ION
- bool "Hi6220 ION Driver"
- depends on ARCH_HISI && ION
- help
- Build the Hisilicon Hi6220 ion driver.
diff --git a/drivers/staging/android/ion/hisilicon/Makefile b/drivers/staging/android/ion/hisilicon/Makefile
deleted file mode 100644
index 2a89414280ac..000000000000
--- a/drivers/staging/android/ion/hisilicon/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-$(CONFIG_HI6220_ION) += hi6220_ion.o
diff --git a/drivers/staging/android/ion/hisilicon/hi6220_ion.c b/drivers/staging/android/ion/hisilicon/hi6220_ion.c
deleted file mode 100644
index 0de7897fd4bf..000000000000
--- a/drivers/staging/android/ion/hisilicon/hi6220_ion.c
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * Hisilicon Hi6220 ION Driver
- *
- * Copyright (c) 2015 Hisilicon Limited.
- *
- * Author: Chen Feng <puck.chen@hisilicon.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#define pr_fmt(fmt) "Ion: " fmt
-
-#include <linux/err.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/of.h>
-#include <linux/mm.h>
-#include "../ion_priv.h"
-#include "../ion.h"
-#include "../ion_of.h"
-
-struct hisi_ion_dev {
- struct ion_heap **heaps;
- struct ion_device *idev;
- struct ion_platform_data *data;
-};
-
-static struct ion_of_heap hisi_heaps[] = {
- PLATFORM_HEAP("hisilicon,sys_user", 0,
- ION_HEAP_TYPE_SYSTEM, "sys_user"),
- PLATFORM_HEAP("hisilicon,sys_contig", 1,
- ION_HEAP_TYPE_SYSTEM_CONTIG, "sys_contig"),
- PLATFORM_HEAP("hisilicon,cma", ION_HEAP_TYPE_DMA, ION_HEAP_TYPE_DMA,
- "cma"),
- {}
-};
-
-static int hi6220_ion_probe(struct platform_device *pdev)
-{
- struct hisi_ion_dev *ipdev;
- int i;
-
- ipdev = devm_kzalloc(&pdev->dev, sizeof(*ipdev), GFP_KERNEL);
- if (!ipdev)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, ipdev);
-
- ipdev->idev = ion_device_create(NULL);
- if (IS_ERR(ipdev->idev))
- return PTR_ERR(ipdev->idev);
-
- ipdev->data = ion_parse_dt(pdev, hisi_heaps);
- if (IS_ERR(ipdev->data))
- return PTR_ERR(ipdev->data);
-
- ipdev->heaps = devm_kzalloc(&pdev->dev,
- sizeof(struct ion_heap) * ipdev->data->nr,
- GFP_KERNEL);
- if (!ipdev->heaps) {
- ion_destroy_platform_data(ipdev->data);
- return -ENOMEM;
- }
-
- for (i = 0; i < ipdev->data->nr; i++) {
- ipdev->heaps[i] = ion_heap_create(&ipdev->data->heaps[i]);
- if (!ipdev->heaps) {
- ion_destroy_platform_data(ipdev->data);
- return -ENOMEM;
- }
- ion_device_add_heap(ipdev->idev, ipdev->heaps[i]);
- }
- return 0;
-}
-
-static int hi6220_ion_remove(struct platform_device *pdev)
-{
- struct hisi_ion_dev *ipdev;
- int i;
-
- ipdev = platform_get_drvdata(pdev);
-
- for (i = 0; i < ipdev->data->nr; i++)
- ion_heap_destroy(ipdev->heaps[i]);
-
- ion_destroy_platform_data(ipdev->data);
- ion_device_destroy(ipdev->idev);
-
- return 0;
-}
-
-static const struct of_device_id hi6220_ion_match_table[] = {
- {.compatible = "hisilicon,hi6220-ion"},
- {},
-};
-
-static struct platform_driver hi6220_ion_driver = {
- .probe = hi6220_ion_probe,
- .remove = hi6220_ion_remove,
- .driver = {
- .name = "ion-hi6220",
- .of_match_table = hi6220_ion_match_table,
- },
-};
-
-static int __init hi6220_ion_init(void)
-{
- return platform_driver_register(&hi6220_ion_driver);
-}
-
-subsys_initcall(hi6220_ion_init);
diff --git a/drivers/staging/android/ion/ion-ioctl.c b/drivers/staging/android/ion/ion-ioctl.c
index 9ff815ad1cb1..76427e4773a8 100644
--- a/drivers/staging/android/ion/ion-ioctl.c
+++ b/drivers/staging/android/ion/ion-ioctl.c
@@ -19,14 +19,9 @@
#include <linux/uaccess.h>
#include "ion.h"
-#include "ion_priv.h"
-#include "compat_ion.h"
union ion_ioctl_arg {
- struct ion_fd_data fd;
struct ion_allocation_data allocation;
- struct ion_handle_data handle;
- struct ion_custom_data custom;
struct ion_heap_query query;
};
@@ -51,10 +46,6 @@ static int validate_ioctl_arg(unsigned int cmd, union ion_ioctl_arg *arg)
static unsigned int ion_ioctl_dir(unsigned int cmd)
{
switch (cmd) {
- case ION_IOC_SYNC:
- case ION_IOC_FREE:
- case ION_IOC_CUSTOM:
- return _IOC_WRITE;
default:
return _IOC_DIR(cmd);
}
@@ -62,9 +53,6 @@ static unsigned int ion_ioctl_dir(unsigned int cmd)
long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
- struct ion_client *client = filp->private_data;
- struct ion_device *dev = client->dev;
- struct ion_handle *cleanup_handle = NULL;
int ret = 0;
unsigned int dir;
union ion_ioctl_arg data;
@@ -92,87 +80,28 @@ long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
switch (cmd) {
case ION_IOC_ALLOC:
{
- struct ion_handle *handle;
+ int fd;
- handle = ion_alloc(client, data.allocation.len,
- data.allocation.align,
+ fd = ion_alloc(data.allocation.len,
data.allocation.heap_id_mask,
data.allocation.flags);
- if (IS_ERR(handle))
- return PTR_ERR(handle);
+ if (fd < 0)
+ return fd;
- data.allocation.handle = handle->id;
+ data.allocation.fd = fd;
- cleanup_handle = handle;
- break;
- }
- case ION_IOC_FREE:
- {
- struct ion_handle *handle;
-
- mutex_lock(&client->lock);
- handle = ion_handle_get_by_id_nolock(client,
- data.handle.handle);
- if (IS_ERR(handle)) {
- mutex_unlock(&client->lock);
- return PTR_ERR(handle);
- }
- ion_free_nolock(client, handle);
- ion_handle_put_nolock(handle);
- mutex_unlock(&client->lock);
- break;
- }
- case ION_IOC_SHARE:
- case ION_IOC_MAP:
- {
- struct ion_handle *handle;
-
- handle = ion_handle_get_by_id(client, data.handle.handle);
- if (IS_ERR(handle))
- return PTR_ERR(handle);
- data.fd.fd = ion_share_dma_buf_fd(client, handle);
- ion_handle_put(handle);
- if (data.fd.fd < 0)
- ret = data.fd.fd;
- break;
- }
- case ION_IOC_IMPORT:
- {
- struct ion_handle *handle;
-
- handle = ion_import_dma_buf_fd(client, data.fd.fd);
- if (IS_ERR(handle))
- ret = PTR_ERR(handle);
- else
- data.handle.handle = handle->id;
- break;
- }
- case ION_IOC_SYNC:
- {
- ret = ion_sync_for_device(client, data.fd.fd);
- break;
- }
- case ION_IOC_CUSTOM:
- {
- if (!dev->custom_ioctl)
- return -ENOTTY;
- ret = dev->custom_ioctl(client, data.custom.cmd,
- data.custom.arg);
break;
}
case ION_IOC_HEAP_QUERY:
- ret = ion_query_heaps(client, &data.query);
+ ret = ion_query_heaps(&data.query);
break;
default:
return -ENOTTY;
}
if (dir & _IOC_READ) {
- if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
- if (cleanup_handle)
- ion_free(client, cleanup_handle);
+ if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd)))
return -EFAULT;
- }
}
return ret;
}
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 95a7f1648c00..03d3a4fce0e2 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -39,40 +39,15 @@
#include <linux/sched/task.h>
#include "ion.h"
-#include "ion_priv.h"
-#include "compat_ion.h"
-bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
-{
- return (buffer->flags & ION_FLAG_CACHED) &&
- !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
-}
+static struct ion_device *internal_dev;
+static int heap_id;
bool ion_buffer_cached(struct ion_buffer *buffer)
{
return !!(buffer->flags & ION_FLAG_CACHED);
}
-static inline struct page *ion_buffer_page(struct page *page)
-{
- return (struct page *)((unsigned long)page & ~(1UL));
-}
-
-static inline bool ion_buffer_page_is_dirty(struct page *page)
-{
- return !!((unsigned long)page & 1UL);
-}
-
-static inline void ion_buffer_page_dirty(struct page **page)
-{
- *page = (struct page *)((unsigned long)(*page) | 1UL);
-}
-
-static inline void ion_buffer_page_clean(struct page **page)
-{
- *page = (struct page *)((unsigned long)(*page) & ~(1UL));
-}
-
/* this function should only be called while dev->lock is held */
static void ion_buffer_add(struct ion_device *dev,
struct ion_buffer *buffer)
@@ -103,13 +78,11 @@ static void ion_buffer_add(struct ion_device *dev,
static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
struct ion_device *dev,
unsigned long len,
- unsigned long align,
unsigned long flags)
{
struct ion_buffer *buffer;
struct sg_table *table;
- struct scatterlist *sg;
- int i, ret;
+ int ret;
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
if (!buffer)
@@ -117,17 +90,15 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
buffer->heap = heap;
buffer->flags = flags;
- kref_init(&buffer->ref);
- ret = heap->ops->allocate(heap, buffer, len, align, flags);
+ ret = heap->ops->allocate(heap, buffer, len, flags);
if (ret) {
if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
goto err2;
ion_heap_freelist_drain(heap, 0);
- ret = heap->ops->allocate(heap, buffer, len, align,
- flags);
+ ret = heap->ops->allocate(heap, buffer, len, flags);
if (ret)
goto err2;
}
@@ -142,43 +113,11 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
buffer->dev = dev;
buffer->size = len;
- if (ion_buffer_fault_user_mappings(buffer)) {
- int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
- struct scatterlist *sg;
- int i, j, k = 0;
-
- buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
- if (!buffer->pages) {
- ret = -ENOMEM;
- goto err1;
- }
-
- for_each_sg(table->sgl, sg, table->nents, i) {
- struct page *page = sg_page(sg);
-
- for (j = 0; j < sg->length / PAGE_SIZE; j++)
- buffer->pages[k++] = page++;
- }
- }
-
buffer->dev = dev;
buffer->size = len;
INIT_LIST_HEAD(&buffer->vmas);
+ INIT_LIST_HEAD(&buffer->attachments);
mutex_init(&buffer->lock);
- /*
- * this will set up dma addresses for the sglist -- it is not
- * technically correct as per the dma api -- a specific
- * device isn't really taking ownership here. However, in practice on
- * our systems the only dma_address space is physical addresses.
- * Additionally, we can't afford the overhead of invalidating every
- * allocation via dma_map_sg. The implicit contract here is that
- * memory coming from the heaps is ready for dma, ie if it has a
- * cached mapping that mapping has been invalidated
- */
- for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
- sg_dma_address(sg) = sg_phys(sg);
- sg_dma_len(sg) = sg->length;
- }
mutex_lock(&dev->buffer_lock);
ion_buffer_add(dev, buffer);
mutex_unlock(&dev->buffer_lock);
@@ -200,9 +139,8 @@ void ion_buffer_destroy(struct ion_buffer *buffer)
kfree(buffer);
}
-static void _ion_buffer_destroy(struct kref *kref)
+static void _ion_buffer_destroy(struct ion_buffer *buffer)
{
- struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
struct ion_heap *heap = buffer->heap;
struct ion_device *dev = buffer->dev;
@@ -216,273 +154,6 @@ static void _ion_buffer_destroy(struct kref *kref)
ion_buffer_destroy(buffer);
}
-static void ion_buffer_get(struct ion_buffer *buffer)
-{
- kref_get(&buffer->ref);
-}
-
-static int ion_buffer_put(struct ion_buffer *buffer)
-{
- return kref_put(&buffer->ref, _ion_buffer_destroy);
-}
-
-static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
-{
- mutex_lock(&buffer->lock);
- buffer->handle_count++;
- mutex_unlock(&buffer->lock);
-}
-
-static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
-{
- /*
- * when a buffer is removed from a handle, if it is not in
- * any other handles, copy the taskcomm and the pid of the
- * process it's being removed from into the buffer. At this
- * point there will be no way to track what processes this buffer is
- * being used by, it only exists as a dma_buf file descriptor.
- * The taskcomm and pid can provide a debug hint as to where this fd
- * is in the system
- */
- mutex_lock(&buffer->lock);
- buffer->handle_count--;
- BUG_ON(buffer->handle_count < 0);
- if (!buffer->handle_count) {
- struct task_struct *task;
-
- task = current->group_leader;
- get_task_comm(buffer->task_comm, task);
- buffer->pid = task_pid_nr(task);
- }
- mutex_unlock(&buffer->lock);
-}
-
-static struct ion_handle *ion_handle_create(struct ion_client *client,
- struct ion_buffer *buffer)
-{
- struct ion_handle *handle;
-
- handle = kzalloc(sizeof(*handle), GFP_KERNEL);
- if (!handle)
- return ERR_PTR(-ENOMEM);
- kref_init(&handle->ref);
- RB_CLEAR_NODE(&handle->node);
- handle->client = client;
- ion_buffer_get(buffer);
- ion_buffer_add_to_handle(buffer);
- handle->buffer = buffer;
-
- return handle;
-}
-
-static void ion_handle_kmap_put(struct ion_handle *);
-
-static void ion_handle_destroy(struct kref *kref)
-{
- struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
- struct ion_client *client = handle->client;
- struct ion_buffer *buffer = handle->buffer;
-
- mutex_lock(&buffer->lock);
- while (handle->kmap_cnt)
- ion_handle_kmap_put(handle);
- mutex_unlock(&buffer->lock);
-
- idr_remove(&client->idr, handle->id);
- if (!RB_EMPTY_NODE(&handle->node))
- rb_erase(&handle->node, &client->handles);
-
- ion_buffer_remove_from_handle(buffer);
- ion_buffer_put(buffer);
-
- kfree(handle);
-}
-
-static void ion_handle_get(struct ion_handle *handle)
-{
- kref_get(&handle->ref);
-}
-
-int ion_handle_put_nolock(struct ion_handle *handle)
-{
- return kref_put(&handle->ref, ion_handle_destroy);
-}
-
-int ion_handle_put(struct ion_handle *handle)
-{
- struct ion_client *client = handle->client;
- int ret;
-
- mutex_lock(&client->lock);
- ret = ion_handle_put_nolock(handle);
- mutex_unlock(&client->lock);
-
- return ret;
-}
-
-static struct ion_handle *ion_handle_lookup(struct ion_client *client,
- struct ion_buffer *buffer)
-{
- struct rb_node *n = client->handles.rb_node;
-
- while (n) {
- struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
-
- if (buffer < entry->buffer)
- n = n->rb_left;
- else if (buffer > entry->buffer)
- n = n->rb_right;
- else
- return entry;
- }
- return ERR_PTR(-EINVAL);
-}
-
-struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
- int id)
-{
- struct ion_handle *handle;
-
- handle = idr_find(&client->idr, id);
- if (handle)
- ion_handle_get(handle);
-
- return handle ? handle : ERR_PTR(-EINVAL);
-}
-
-struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
- int id)
-{
- struct ion_handle *handle;
-
- mutex_lock(&client->lock);
- handle = ion_handle_get_by_id_nolock(client, id);
- mutex_unlock(&client->lock);
-
- return handle;
-}
-
-static bool ion_handle_validate(struct ion_client *client,
- struct ion_handle *handle)
-{
- WARN_ON(!mutex_is_locked(&client->lock));
- return idr_find(&client->idr, handle->id) == handle;
-}
-
-static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
-{
- int id;
- struct rb_node **p = &client->handles.rb_node;
- struct rb_node *parent = NULL;
- struct ion_handle *entry;
-
- id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
- if (id < 0)
- return id;
-
- handle->id = id;
-
- while (*p) {
- parent = *p;
- entry = rb_entry(parent, struct ion_handle, node);
-
- if (handle->buffer < entry->buffer)
- p = &(*p)->rb_left;
- else if (handle->buffer > entry->buffer)
- p = &(*p)->rb_right;
- else
- WARN(1, "%s: buffer already found.", __func__);
- }
-
- rb_link_node(&handle->node, parent, p);
- rb_insert_color(&handle->node, &client->handles);
-
- return 0;
-}
-
-struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
- size_t align, unsigned int heap_id_mask,
- unsigned int flags)
-{
- struct ion_handle *handle;
- struct ion_device *dev = client->dev;
- struct ion_buffer *buffer = NULL;
- struct ion_heap *heap;
- int ret;
-
- pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
- len, align, heap_id_mask, flags);
- /*
- * traverse the list of heaps available in this system in priority
- * order. If the heap type is supported by the client, and matches the
- * request of the caller allocate from it. Repeat until allocate has
- * succeeded or all heaps have been tried
- */
- len = PAGE_ALIGN(len);
-
- if (!len)
- return ERR_PTR(-EINVAL);
-
- down_read(&dev->lock);
- plist_for_each_entry(heap, &dev->heaps, node) {
- /* if the caller didn't specify this heap id */
- if (!((1 << heap->id) & heap_id_mask))
- continue;
- buffer = ion_buffer_create(heap, dev, len, align, flags);
- if (!IS_ERR(buffer))
- break;
- }
- up_read(&dev->lock);
-
- if (buffer == NULL)
- return ERR_PTR(-ENODEV);
-
- if (IS_ERR(buffer))
- return ERR_CAST(buffer);
-
- handle = ion_handle_create(client, buffer);
-
- /*
- * ion_buffer_create will create a buffer with a ref_cnt of 1,
- * and ion_handle_create will take a second reference, drop one here
- */
- ion_buffer_put(buffer);
-
- if (IS_ERR(handle))
- return handle;
-
- mutex_lock(&client->lock);
- ret = ion_handle_add(client, handle);
- mutex_unlock(&client->lock);
- if (ret) {
- ion_handle_put(handle);
- handle = ERR_PTR(ret);
- }
-
- return handle;
-}
-EXPORT_SYMBOL(ion_alloc);
-
-void ion_free_nolock(struct ion_client *client,
- struct ion_handle *handle)
-{
- if (!ion_handle_validate(client, handle)) {
- WARN(1, "%s: invalid handle passed to free.\n", __func__);
- return;
- }
- ion_handle_put_nolock(handle);
-}
-
-void ion_free(struct ion_client *client, struct ion_handle *handle)
-{
- BUG_ON(client != handle->client);
-
- mutex_lock(&client->lock);
- ion_free_nolock(client, handle);
- mutex_unlock(&client->lock);
-}
-EXPORT_SYMBOL(ion_free);
-
static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
{
void *vaddr;
@@ -502,22 +173,6 @@ static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
return vaddr;
}
-static void *ion_handle_kmap_get(struct ion_handle *handle)
-{
- struct ion_buffer *buffer = handle->buffer;
- void *vaddr;
-
- if (handle->kmap_cnt) {
- handle->kmap_cnt++;
- return buffer->vaddr;
- }
- vaddr = ion_buffer_kmap_get(buffer);
- if (IS_ERR(vaddr))
- return vaddr;
- handle->kmap_cnt++;
- return vaddr;
-}
-
static void ion_buffer_kmap_put(struct ion_buffer *buffer)
{
buffer->kmap_cnt--;
@@ -527,408 +182,117 @@ static void ion_buffer_kmap_put(struct ion_buffer *buffer)
}
}
-static void ion_handle_kmap_put(struct ion_handle *handle)
+static struct sg_table *dup_sg_table(struct sg_table *table)
{
- struct ion_buffer *buffer = handle->buffer;
+ struct sg_table *new_table;
+ int ret, i;
+ struct scatterlist *sg, *new_sg;
- if (!handle->kmap_cnt) {
- WARN(1, "%s: Double unmap detected! bailing...\n", __func__);
- return;
- }
- handle->kmap_cnt--;
- if (!handle->kmap_cnt)
- ion_buffer_kmap_put(buffer);
-}
-
-void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
-{
- struct ion_buffer *buffer;
- void *vaddr;
-
- mutex_lock(&client->lock);
- if (!ion_handle_validate(client, handle)) {
- pr_err("%s: invalid handle passed to map_kernel.\n",
- __func__);
- mutex_unlock(&client->lock);
- return ERR_PTR(-EINVAL);
- }
-
- buffer = handle->buffer;
-
- if (!handle->buffer->heap->ops->map_kernel) {
- pr_err("%s: map_kernel is not implemented by this heap.\n",
- __func__);
- mutex_unlock(&client->lock);
- return ERR_PTR(-ENODEV);
- }
-
- mutex_lock(&buffer->lock);
- vaddr = ion_handle_kmap_get(handle);
- mutex_unlock(&buffer->lock);
- mutex_unlock(&client->lock);
- return vaddr;
-}
-EXPORT_SYMBOL(ion_map_kernel);
-
-void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
-{
- struct ion_buffer *buffer;
-
- mutex_lock(&client->lock);
- buffer = handle->buffer;
- mutex_lock(&buffer->lock);
- ion_handle_kmap_put(handle);
- mutex_unlock(&buffer->lock);
- mutex_unlock(&client->lock);
-}
-EXPORT_SYMBOL(ion_unmap_kernel);
-
-static struct mutex debugfs_mutex;
-static struct rb_root *ion_root_client;
-static int is_client_alive(struct ion_client *client)
-{
- struct rb_node *node;
- struct ion_client *tmp;
- struct ion_device *dev;
-
- node = ion_root_client->rb_node;
- dev = container_of(ion_root_client, struct ion_device, clients);
-
- down_read(&dev->lock);
- while (node) {
- tmp = rb_entry(node, struct ion_client, node);
- if (client < tmp) {
- node = node->rb_left;
- } else if (client > tmp) {
- node = node->rb_right;
- } else {
- up_read(&dev->lock);
- return 1;
- }
- }
-
- up_read(&dev->lock);
- return 0;
-}
+ new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
+ if (!new_table)
+ return ERR_PTR(-ENOMEM);
-static int ion_debug_client_show(struct seq_file *s, void *unused)
-{
- struct ion_client *client = s->private;
- struct rb_node *n;
- size_t sizes[ION_NUM_HEAP_IDS] = {0};
- const char *names[ION_NUM_HEAP_IDS] = {NULL};
- int i;
-
- mutex_lock(&debugfs_mutex);
- if (!is_client_alive(client)) {
- seq_printf(s, "ion_client 0x%p dead, can't dump its buffers\n",
- client);
- mutex_unlock(&debugfs_mutex);
- return 0;
+ ret = sg_alloc_table(new_table, table->nents, GFP_KERNEL);
+ if (ret) {
+ kfree(new_table);
+ return ERR_PTR(-ENOMEM);
}
- mutex_lock(&client->lock);
- for (n = rb_first(&client->handles); n; n = rb_next(n)) {
- struct ion_handle *handle = rb_entry(n, struct ion_handle,
- node);
- unsigned int id = handle->buffer->heap->id;
-
- if (!names[id])
- names[id] = handle->buffer->heap->name;
- sizes[id] += handle->buffer->size;
+ new_sg = new_table->sgl;
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ memcpy(new_sg, sg, sizeof(*sg));
+ sg->dma_address = 0;
+ new_sg = sg_next(new_sg);
}
- mutex_unlock(&client->lock);
- mutex_unlock(&debugfs_mutex);
- seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
- for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
- if (!names[i])
- continue;
- seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
- }
- return 0;
+ return new_table;
}
-static int ion_debug_client_open(struct inode *inode, struct file *file)
+static void free_duped_table(struct sg_table *table)
{
- return single_open(file, ion_debug_client_show, inode->i_private);
+ sg_free_table(table);
+ kfree(table);
}
-static const struct file_operations debug_client_fops = {
- .open = ion_debug_client_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
+struct ion_dma_buf_attachment {
+ struct device *dev;
+ struct sg_table *table;
+ struct list_head list;
};
-static int ion_get_client_serial(const struct rb_root *root,
- const unsigned char *name)
-{
- int serial = -1;
- struct rb_node *node;
-
- for (node = rb_first(root); node; node = rb_next(node)) {
- struct ion_client *client = rb_entry(node, struct ion_client,
- node);
-
- if (strcmp(client->name, name))
- continue;
- serial = max(serial, client->display_serial);
- }
- return serial + 1;
-}
-
-struct ion_client *ion_client_create(struct ion_device *dev,
- const char *name)
+static int ion_dma_buf_attach(struct dma_buf *dmabuf, struct device *dev,
+ struct dma_buf_attachment *attachment)
{
- struct ion_client *client;
- struct task_struct *task;
- struct rb_node **p;
- struct rb_node *parent = NULL;
- struct ion_client *entry;
- pid_t pid;
-
- if (!name) {
- pr_err("%s: Name cannot be null\n", __func__);
- return ERR_PTR(-EINVAL);
- }
+ struct ion_dma_buf_attachment *a;
+ struct sg_table *table;
+ struct ion_buffer *buffer = dmabuf->priv;
- get_task_struct(current->group_leader);
- task_lock(current->group_leader);
- pid = task_pid_nr(current->group_leader);
- /*
- * don't bother to store task struct for kernel threads,
- * they can't be killed anyway
- */
- if (current->group_leader->flags & PF_KTHREAD) {
- put_task_struct(current->group_leader);
- task = NULL;
- } else {
- task = current->group_leader;
- }
- task_unlock(current->group_leader);
-
- client = kzalloc(sizeof(*client), GFP_KERNEL);
- if (!client)
- goto err_put_task_struct;
-
- client->dev = dev;
- client->handles = RB_ROOT;
- idr_init(&client->idr);
- mutex_init(&client->lock);
- client->task = task;
- client->pid = pid;
- client->name = kstrdup(name, GFP_KERNEL);
- if (!client->name)
- goto err_free_client;
+ a = kzalloc(sizeof(*a), GFP_KERNEL);
+ if (!a)
+ return -ENOMEM;
- down_write(&dev->lock);
- client->display_serial = ion_get_client_serial(&dev->clients, name);
- client->display_name = kasprintf(
- GFP_KERNEL, "%s-%d", name, client->display_serial);
- if (!client->display_name) {
- up_write(&dev->lock);
- goto err_free_client_name;
+ table = dup_sg_table(buffer->sg_table);
+ if (IS_ERR(table)) {
+ kfree(a);
+ return -ENOMEM;
}
- p = &dev->clients.rb_node;
- while (*p) {
- parent = *p;
- entry = rb_entry(parent, struct ion_client, node);
- if (client < entry)
- p = &(*p)->rb_left;
- else if (client > entry)
- p = &(*p)->rb_right;
- }
- rb_link_node(&client->node, parent, p);
- rb_insert_color(&client->node, &dev->clients);
-
- client->debug_root = debugfs_create_file(client->display_name, 0664,
- dev->clients_debug_root,
- client, &debug_client_fops);
- if (!client->debug_root) {
- char buf[256], *path;
-
- path = dentry_path(dev->clients_debug_root, buf, 256);
- pr_err("Failed to create client debugfs at %s/%s\n",
- path, client->display_name);
- }
+ a->table = table;
+ a->dev = dev;
+ INIT_LIST_HEAD(&a->list);
- up_write(&dev->lock);
+ attachment->priv = a;
- return client;
+ mutex_lock(&buffer->lock);
+ list_add(&a->list, &buffer->attachments);
+ mutex_unlock(&buffer->lock);
-err_free_client_name:
- kfree(client->name);
-err_free_client:
- kfree(client);
-err_put_task_struct:
- if (task)
- put_task_struct(current->group_leader);
- return ERR_PTR(-ENOMEM);
+ return 0;
}
-EXPORT_SYMBOL(ion_client_create);
-void ion_client_destroy(struct ion_client *client)
+static void ion_dma_buf_detatch(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attachment)
{
- struct ion_device *dev = client->dev;
- struct rb_node *n;
-
- pr_debug("%s: %d\n", __func__, __LINE__);
- mutex_lock(&debugfs_mutex);
- while ((n = rb_first(&client->handles))) {
- struct ion_handle *handle = rb_entry(n, struct ion_handle,
- node);
- ion_handle_destroy(&handle->ref);
- }
-
- idr_destroy(&client->idr);
+ struct ion_dma_buf_attachment *a = attachment->priv;
+ struct ion_buffer *buffer = dmabuf->priv;
- down_write(&dev->lock);
- if (client->task)
- put_task_struct(client->task);
- rb_erase(&client->node, &dev->clients);
- debugfs_remove_recursive(client->debug_root);
- up_write(&dev->lock);
+ free_duped_table(a->table);
+ mutex_lock(&buffer->lock);
+ list_del(&a->list);
+ mutex_unlock(&buffer->lock);
- kfree(client->display_name);
- kfree(client->name);
- kfree(client);
- mutex_unlock(&debugfs_mutex);
+ kfree(a);
}
-EXPORT_SYMBOL(ion_client_destroy);
-static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
- struct device *dev,
- enum dma_data_direction direction);
static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
enum dma_data_direction direction)
{
- struct dma_buf *dmabuf = attachment->dmabuf;
- struct ion_buffer *buffer = dmabuf->priv;
-
- ion_buffer_sync_for_device(buffer, attachment->dev, direction);
- return buffer->sg_table;
-}
-
-static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
- struct sg_table *table,
- enum dma_data_direction direction)
-{
-}
-
-void ion_pages_sync_for_device(struct device *dev, struct page *page,
- size_t size, enum dma_data_direction dir)
-{
- struct scatterlist sg;
-
- sg_init_table(&sg, 1);
- sg_set_page(&sg, page, size, 0);
- /*
- * This is not correct - sg_dma_address needs a dma_addr_t that is valid
- * for the targeted device, but this works on the currently targeted
- * hardware.
- */
- sg_dma_address(&sg) = page_to_phys(page);
- dma_sync_sg_for_device(dev, &sg, 1, dir);
-}
-
-struct ion_vma_list {
- struct list_head list;
- struct vm_area_struct *vma;
-};
-
-static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
- struct device *dev,
- enum dma_data_direction dir)
-{
- struct ion_vma_list *vma_list;
- int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
- int i;
-
- pr_debug("%s: syncing for device %s\n", __func__,
- dev ? dev_name(dev) : "null");
-
- if (!ion_buffer_fault_user_mappings(buffer))
- return;
-
- mutex_lock(&buffer->lock);
- for (i = 0; i < pages; i++) {
- struct page *page = buffer->pages[i];
-
- if (ion_buffer_page_is_dirty(page))
- ion_pages_sync_for_device(dev, ion_buffer_page(page),
- PAGE_SIZE, dir);
-
- ion_buffer_page_clean(buffer->pages + i);
- }
- list_for_each_entry(vma_list, &buffer->vmas, list) {
- struct vm_area_struct *vma = vma_list->vma;
-
- zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start);
- }
- mutex_unlock(&buffer->lock);
-}
-
-static int ion_vm_fault(struct vm_fault *vmf)
-{
- struct ion_buffer *buffer = vmf->vma->vm_private_data;
- unsigned long pfn;
+ struct ion_dma_buf_attachment *a = attachment->priv;
+ struct sg_table *table;
int ret;
- mutex_lock(&buffer->lock);
- ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
- BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
-
- pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
- ret = vm_insert_pfn(vmf->vma, vmf->address, pfn);
- mutex_unlock(&buffer->lock);
- if (ret)
- return VM_FAULT_ERROR;
+ table = a->table;
- return VM_FAULT_NOPAGE;
-}
-
-static void ion_vm_open(struct vm_area_struct *vma)
-{
- struct ion_buffer *buffer = vma->vm_private_data;
- struct ion_vma_list *vma_list;
+ if (!dma_map_sg(attachment->dev, table->sgl, table->nents,
+ direction)){
+ ret = -ENOMEM;
+ goto err;
+ }
+ return table;
- vma_list = kmalloc(sizeof(*vma_list), GFP_KERNEL);
- if (!vma_list)
- return;
- vma_list->vma = vma;
- mutex_lock(&buffer->lock);
- list_add(&vma_list->list, &buffer->vmas);
- mutex_unlock(&buffer->lock);
- pr_debug("%s: adding %p\n", __func__, vma);
+err:
+ free_duped_table(table);
+ return ERR_PTR(ret);
}
-static void ion_vm_close(struct vm_area_struct *vma)
+static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
+ struct sg_table *table,
+ enum dma_data_direction direction)
{
- struct ion_buffer *buffer = vma->vm_private_data;
- struct ion_vma_list *vma_list, *tmp;
-
- pr_debug("%s\n", __func__);
- mutex_lock(&buffer->lock);
- list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
- if (vma_list->vma != vma)
- continue;
- list_del(&vma_list->list);
- kfree(vma_list);
- pr_debug("%s: deleting %p\n", __func__, vma);
- break;
- }
- mutex_unlock(&buffer->lock);
+ dma_unmap_sg(attachment->dev, table->sgl, table->nents, direction);
}
-static const struct vm_operations_struct ion_vma_ops = {
- .open = ion_vm_open,
- .close = ion_vm_close,
- .fault = ion_vm_fault,
-};
-
static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
{
struct ion_buffer *buffer = dmabuf->priv;
@@ -940,15 +304,6 @@ static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
return -EINVAL;
}
- if (ion_buffer_fault_user_mappings(buffer)) {
- vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
- VM_DONTDUMP;
- vma->vm_private_data = buffer;
- vma->vm_ops = &ion_vma_ops;
- ion_vm_open(vma);
- return 0;
- }
-
if (!(buffer->flags & ION_FLAG_CACHED))
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
@@ -968,7 +323,7 @@ static void ion_dma_buf_release(struct dma_buf *dmabuf)
{
struct ion_buffer *buffer = dmabuf->priv;
- ion_buffer_put(buffer);
+ _ion_buffer_destroy(buffer);
}
static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
@@ -988,26 +343,45 @@ static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
{
struct ion_buffer *buffer = dmabuf->priv;
void *vaddr;
+ struct ion_dma_buf_attachment *a;
- if (!buffer->heap->ops->map_kernel) {
- pr_err("%s: map kernel is not implemented by this heap.\n",
- __func__);
- return -ENODEV;
+ /*
+ * TODO: Move this elsewhere because we don't always need a vaddr
+ */
+ if (buffer->heap->ops->map_kernel) {
+ mutex_lock(&buffer->lock);
+ vaddr = ion_buffer_kmap_get(buffer);
+ mutex_unlock(&buffer->lock);
}
+
mutex_lock(&buffer->lock);
- vaddr = ion_buffer_kmap_get(buffer);
+ list_for_each_entry(a, &buffer->attachments, list) {
+ dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->nents,
+ DMA_BIDIRECTIONAL);
+ }
mutex_unlock(&buffer->lock);
- return PTR_ERR_OR_ZERO(vaddr);
+
+ return 0;
}
static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
enum dma_data_direction direction)
{
struct ion_buffer *buffer = dmabuf->priv;
+ struct ion_dma_buf_attachment *a;
+
+ if (buffer->heap->ops->map_kernel) {
+ mutex_lock(&buffer->lock);
+ ion_buffer_kmap_put(buffer);
+ mutex_unlock(&buffer->lock);
+ }
mutex_lock(&buffer->lock);
- ion_buffer_kmap_put(buffer);
+ list_for_each_entry(a, &buffer->attachments, list) {
+ dma_sync_sg_for_device(a->dev, a->table->sgl, a->table->nents,
+ DMA_BIDIRECTIONAL);
+ }
mutex_unlock(&buffer->lock);
return 0;
@@ -1018,6 +392,8 @@ static const struct dma_buf_ops dma_buf_ops = {
.unmap_dma_buf = ion_unmap_dma_buf,
.mmap = ion_mmap,
.release = ion_dma_buf_release,
+ .attach = ion_dma_buf_attach,
+ .detach = ion_dma_buf_detatch,
.begin_cpu_access = ion_dma_buf_begin_cpu_access,
.end_cpu_access = ion_dma_buf_end_cpu_access,
.map_atomic = ion_dma_buf_kmap,
@@ -1026,24 +402,44 @@ static const struct dma_buf_ops dma_buf_ops = {
.unmap = ion_dma_buf_kunmap,
};
-struct dma_buf *ion_share_dma_buf(struct ion_client *client,
- struct ion_handle *handle)
+int ion_alloc(size_t len, unsigned int heap_id_mask, unsigned int flags)
{
+ struct ion_device *dev = internal_dev;
+ struct ion_buffer *buffer = NULL;
+ struct ion_heap *heap;
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
- struct ion_buffer *buffer;
+ int fd;
struct dma_buf *dmabuf;
- bool valid_handle;
- mutex_lock(&client->lock);
- valid_handle = ion_handle_validate(client, handle);
- if (!valid_handle) {
- WARN(1, "%s: invalid handle passed to share.\n", __func__);
- mutex_unlock(&client->lock);
- return ERR_PTR(-EINVAL);
+ pr_debug("%s: len %zu heap_id_mask %u flags %x\n", __func__,
+ len, heap_id_mask, flags);
+ /*
+ * traverse the list of heaps available in this system in priority
+ * order. If the heap type is supported by the client, and matches the
+ * request of the caller allocate from it. Repeat until allocate has
+ * succeeded or all heaps have been tried
+ */
+ len = PAGE_ALIGN(len);
+
+ if (!len)
+ return -EINVAL;
+
+ down_read(&dev->lock);
+ plist_for_each_entry(heap, &dev->heaps, node) {
+ /* if the caller didn't specify this heap id */
+ if (!((1 << heap->id) & heap_id_mask))
+ continue;
+ buffer = ion_buffer_create(heap, dev, len, flags);
+ if (!IS_ERR(buffer))
+ break;
}
- buffer = handle->buffer;
- ion_buffer_get(buffer);
- mutex_unlock(&client->lock);
+ up_read(&dev->lock);
+
+ if (buffer == NULL)
+ return -ENODEV;
+
+ if (IS_ERR(buffer))
+ return PTR_ERR(buffer);
exp_info.ops = &dma_buf_ops;
exp_info.size = buffer->size;
@@ -1052,22 +448,9 @@ struct dma_buf *ion_share_dma_buf(struct ion_client *client,
dmabuf = dma_buf_export(&exp_info);
if (IS_ERR(dmabuf)) {
- ion_buffer_put(buffer);
- return dmabuf;
- }
-
- return dmabuf;
-}
-EXPORT_SYMBOL(ion_share_dma_buf);
-
-int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
-{
- struct dma_buf *dmabuf;
- int fd;
-
- dmabuf = ion_share_dma_buf(client, handle);
- if (IS_ERR(dmabuf))
+ _ion_buffer_destroy(buffer);
return PTR_ERR(dmabuf);
+ }
fd = dma_buf_fd(dmabuf, O_CLOEXEC);
if (fd < 0)
@@ -1075,93 +458,10 @@ int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
return fd;
}
-EXPORT_SYMBOL(ion_share_dma_buf_fd);
-
-struct ion_handle *ion_import_dma_buf(struct ion_client *client,
- struct dma_buf *dmabuf)
-{
- struct ion_buffer *buffer;
- struct ion_handle *handle;
- int ret;
-
- /* if this memory came from ion */
-
- if (dmabuf->ops != &dma_buf_ops) {
- pr_err("%s: can not import dmabuf from another exporter\n",
- __func__);
- return ERR_PTR(-EINVAL);
- }
- buffer = dmabuf->priv;
-
- mutex_lock(&client->lock);
- /* if a handle exists for this buffer just take a reference to it */
- handle = ion_handle_lookup(client, buffer);
- if (!IS_ERR(handle)) {
- ion_handle_get(handle);
- mutex_unlock(&client->lock);
- goto end;
- }
-
- handle = ion_handle_create(client, buffer);
- if (IS_ERR(handle)) {
- mutex_unlock(&client->lock);
- goto end;
- }
-
- ret = ion_handle_add(client, handle);
- mutex_unlock(&client->lock);
- if (ret) {
- ion_handle_put(handle);
- handle = ERR_PTR(ret);
- }
-
-end:
- return handle;
-}
-EXPORT_SYMBOL(ion_import_dma_buf);
-
-struct ion_handle *ion_import_dma_buf_fd(struct ion_client *client, int fd)
-{
- struct dma_buf *dmabuf;
- struct ion_handle *handle;
-
- dmabuf = dma_buf_get(fd);
- if (IS_ERR(dmabuf))
- return ERR_CAST(dmabuf);
-
- handle = ion_import_dma_buf(client, dmabuf);
- dma_buf_put(dmabuf);
- return handle;
-}
-EXPORT_SYMBOL(ion_import_dma_buf_fd);
-
-int ion_sync_for_device(struct ion_client *client, int fd)
-{
- struct dma_buf *dmabuf;
- struct ion_buffer *buffer;
-
- dmabuf = dma_buf_get(fd);
- if (IS_ERR(dmabuf))
- return PTR_ERR(dmabuf);
-
- /* if this memory came from ion */
- if (dmabuf->ops != &dma_buf_ops) {
- pr_err("%s: can not sync dmabuf from another exporter\n",
- __func__);
- dma_buf_put(dmabuf);
- return -EINVAL;
- }
- buffer = dmabuf->priv;
-
- dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
- buffer->sg_table->nents, DMA_BIDIRECTIONAL);
- dma_buf_put(dmabuf);
- return 0;
-}
-int ion_query_heaps(struct ion_client *client, struct ion_heap_query *query)
+int ion_query_heaps(struct ion_heap_query *query)
{
- struct ion_device *dev = client->dev;
+ struct ion_device *dev = internal_dev;
struct ion_heap_data __user *buffer = u64_to_user_ptr(query->heaps);
int ret = -EINVAL, cnt = 0, max_cnt;
struct ion_heap *heap;
@@ -1198,138 +498,18 @@ int ion_query_heaps(struct ion_client *client, struct ion_heap_query *query)
}
query->cnt = cnt;
+ ret = 0;
out:
up_read(&dev->lock);
return ret;
}
-static int ion_release(struct inode *inode, struct file *file)
-{
- struct ion_client *client = file->private_data;
-
- pr_debug("%s: %d\n", __func__, __LINE__);
- ion_client_destroy(client);
- return 0;
-}
-
-static int ion_open(struct inode *inode, struct file *file)
-{
- struct miscdevice *miscdev = file->private_data;
- struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
- struct ion_client *client;
- char debug_name[64];
-
- pr_debug("%s: %d\n", __func__, __LINE__);
- snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
- client = ion_client_create(dev, debug_name);
- if (IS_ERR(client))
- return PTR_ERR(client);
- file->private_data = client;
-
- return 0;
-}
-
static const struct file_operations ion_fops = {
.owner = THIS_MODULE,
- .open = ion_open,
- .release = ion_release,
.unlocked_ioctl = ion_ioctl,
- .compat_ioctl = compat_ion_ioctl,
-};
-
-static size_t ion_debug_heap_total(struct ion_client *client,
- unsigned int id)
-{
- size_t size = 0;
- struct rb_node *n;
-
- mutex_lock(&client->lock);
- for (n = rb_first(&client->handles); n; n = rb_next(n)) {
- struct ion_handle *handle = rb_entry(n,
- struct ion_handle,
- node);
- if (handle->buffer->heap->id == id)
- size += handle->buffer->size;
- }
- mutex_unlock(&client->lock);
- return size;
-}
-
-static int ion_debug_heap_show(struct seq_file *s, void *unused)
-{
- struct ion_heap *heap = s->private;
- struct ion_device *dev = heap->dev;
- struct rb_node *n;
- size_t total_size = 0;
- size_t total_orphaned_size = 0;
-
- seq_printf(s, "%16s %16s %16s\n", "client", "pid", "size");
- seq_puts(s, "----------------------------------------------------\n");
-
- mutex_lock(&debugfs_mutex);
- for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
- struct ion_client *client = rb_entry(n, struct ion_client,
- node);
- size_t size = ion_debug_heap_total(client, heap->id);
-
- if (!size)
- continue;
- if (client->task) {
- char task_comm[TASK_COMM_LEN];
-
- get_task_comm(task_comm, client->task);
- seq_printf(s, "%16s %16u %16zu\n", task_comm,
- client->pid, size);
- } else {
- seq_printf(s, "%16s %16u %16zu\n", client->name,
- client->pid, size);
- }
- }
- mutex_unlock(&debugfs_mutex);
-
- seq_puts(s, "----------------------------------------------------\n");
- seq_puts(s, "orphaned allocations (info is from last known client):\n");
- mutex_lock(&dev->buffer_lock);
- for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
- struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
- node);
- if (buffer->heap->id != heap->id)
- continue;
- total_size += buffer->size;
- if (!buffer->handle_count) {
- seq_printf(s, "%16s %16u %16zu %d %d\n",
- buffer->task_comm, buffer->pid,
- buffer->size, buffer->kmap_cnt,
- kref_read(&buffer->ref));
- total_orphaned_size += buffer->size;
- }
- }
- mutex_unlock(&dev->buffer_lock);
- seq_puts(s, "----------------------------------------------------\n");
- seq_printf(s, "%16s %16zu\n", "total orphaned",
- total_orphaned_size);
- seq_printf(s, "%16s %16zu\n", "total ", total_size);
- if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
- seq_printf(s, "%16s %16zu\n", "deferred free",
- heap->free_list_size);
- seq_puts(s, "----------------------------------------------------\n");
-
- if (heap->debug_show)
- heap->debug_show(heap, s, unused);
-
- return 0;
-}
-
-static int ion_debug_heap_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ion_debug_heap_show, inode->i_private);
-}
-
-static const struct file_operations debug_heap_fops = {
- .open = ion_debug_heap_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = ion_ioctl,
+#endif
};
static int debug_shrink_set(void *data, u64 val)
@@ -1367,9 +547,10 @@ static int debug_shrink_get(void *data, u64 *val)
DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
debug_shrink_set, "%llu\n");
-void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
+void ion_device_add_heap(struct ion_heap *heap)
{
struct dentry *debug_file;
+ struct ion_device *dev = internal_dev;
if (!heap->ops->allocate || !heap->ops->free)
pr_err("%s: can not add heap with invalid ops struct.\n",
@@ -1386,35 +567,25 @@ void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
heap->dev = dev;
down_write(&dev->lock);
+ heap->id = heap_id++;
/*
* use negative heap->id to reverse the priority -- when traversing
* the list later attempt higher id numbers first
*/
plist_node_init(&heap->node, -heap->id);
plist_add(&heap->node, &dev->heaps);
- debug_file = debugfs_create_file(heap->name, 0664,
- dev->heaps_debug_root, heap,
- &debug_heap_fops);
-
- if (!debug_file) {
- char buf[256], *path;
-
- path = dentry_path(dev->heaps_debug_root, buf, 256);
- pr_err("Failed to create heap debugfs at %s/%s\n",
- path, heap->name);
- }
if (heap->shrinker.count_objects && heap->shrinker.scan_objects) {
char debug_name[64];
snprintf(debug_name, 64, "%s_shrink", heap->name);
debug_file = debugfs_create_file(
- debug_name, 0644, dev->heaps_debug_root, heap,
+ debug_name, 0644, dev->debug_root, heap,
&debug_shrink_fops);
if (!debug_file) {
char buf[256], *path;
- path = dentry_path(dev->heaps_debug_root, buf, 256);
+ path = dentry_path(dev->debug_root, buf, 256);
pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
path, debug_name);
}
@@ -1425,17 +596,14 @@ void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
}
EXPORT_SYMBOL(ion_device_add_heap);
-struct ion_device *ion_device_create(long (*custom_ioctl)
- (struct ion_client *client,
- unsigned int cmd,
- unsigned long arg))
+int ion_device_create(void)
{
struct ion_device *idev;
int ret;
idev = kzalloc(sizeof(*idev), GFP_KERNEL);
if (!idev)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
idev->dev.minor = MISC_DYNAMIC_MINOR;
idev->dev.name = "ion";
@@ -1445,7 +613,7 @@ struct ion_device *ion_device_create(long (*custom_ioctl)
if (ret) {
pr_err("ion: failed to register misc device.\n");
kfree(idev);
- return ERR_PTR(ret);
+ return ret;
}
idev->debug_root = debugfs_create_dir("ion", NULL);
@@ -1453,35 +621,13 @@ struct ion_device *ion_device_create(long (*custom_ioctl)
pr_err("ion: failed to create debugfs root directory.\n");
goto debugfs_done;
}
- idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
- if (!idev->heaps_debug_root) {
- pr_err("ion: failed to create debugfs heaps directory.\n");
- goto debugfs_done;
- }
- idev->clients_debug_root = debugfs_create_dir("clients",
- idev->debug_root);
- if (!idev->clients_debug_root)
- pr_err("ion: failed to create debugfs clients directory.\n");
debugfs_done:
-
- idev->custom_ioctl = custom_ioctl;
idev->buffers = RB_ROOT;
mutex_init(&idev->buffer_lock);
init_rwsem(&idev->lock);
plist_head_init(&idev->heaps);
- idev->clients = RB_ROOT;
- ion_root_client = &idev->clients;
- mutex_init(&debugfs_mutex);
- return idev;
-}
-EXPORT_SYMBOL(ion_device_create);
-
-void ion_device_destroy(struct ion_device *dev)
-{
- misc_deregister(&dev->dev);
- debugfs_remove_recursive(dev->debug_root);
- /* XXX need to free the heaps and clients ? */
- kfree(dev);
+ internal_dev = idev;
+ return 0;
}
-EXPORT_SYMBOL(ion_device_destroy);
+subsys_initcall(ion_device_create);
diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h
index 93dafb4586e4..ace8416bd509 100644
--- a/drivers/staging/android/ion/ion.h
+++ b/drivers/staging/android/ion/ion.h
@@ -14,38 +14,31 @@
*
*/
-#ifndef _LINUX_ION_H
-#define _LINUX_ION_H
+#ifndef _ION_H
+#define _ION_H
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/kref.h>
+#include <linux/mm_types.h>
+#include <linux/mutex.h>
+#include <linux/rbtree.h>
+#include <linux/sched.h>
+#include <linux/shrinker.h>
#include <linux/types.h>
+#include <linux/miscdevice.h>
#include "../uapi/ion.h"
-struct ion_handle;
-struct ion_device;
-struct ion_heap;
-struct ion_mapper;
-struct ion_client;
-struct ion_buffer;
-
-/*
- * This should be removed some day when phys_addr_t's are fully
- * plumbed in the kernel, and all instances of ion_phys_addr_t should
- * be converted to phys_addr_t. For the time being many kernel interfaces
- * do not accept phys_addr_t's that would have to
- */
-#define ion_phys_addr_t unsigned long
-
/**
* struct ion_platform_heap - defines a heap in the given platform
* @type: type of the heap from ion_heap_type enum
- * @id: unique identifier for heap. When allocating higher numbers
+ * @id: unique identifier for heap. When allocating higher numb ers
* will be allocated from first. At allocation these are passed
* as a bit mask and therefore can not exceed ION_NUM_HEAP_IDS.
* @name: used for debug purposes
* @base: base address of heap in physical memory if applicable
* @size: size of the heap in bytes if applicable
- * @align: required alignment in physical memory if applicable
* @priv: private info passed from the board file
*
* Provided by the board file.
@@ -54,123 +47,329 @@ struct ion_platform_heap {
enum ion_heap_type type;
unsigned int id;
const char *name;
- ion_phys_addr_t base;
+ phys_addr_t base;
size_t size;
- ion_phys_addr_t align;
+ phys_addr_t align;
void *priv;
};
/**
- * struct ion_platform_data - array of platform heaps passed from board file
- * @nr: number of structures in the array
- * @heaps: array of platform_heap structions
+ * struct ion_buffer - metadata for a particular buffer
+ * @ref: reference count
+ * @node: node in the ion_device buffers tree
+ * @dev: back pointer to the ion_device
+ * @heap: back pointer to the heap the buffer came from
+ * @flags: buffer specific flags
+ * @private_flags: internal buffer specific flags
+ * @size: size of the buffer
+ * @priv_virt: private data to the buffer representable as
+ * a void *
+ * @lock: protects the buffers cnt fields
+ * @kmap_cnt: number of times the buffer is mapped to the kernel
+ * @vaddr: the kernel mapping if kmap_cnt is not zero
+ * @sg_table: the sg table for the buffer if dmap_cnt is not zero
+ * @pages: flat array of pages in the buffer -- used by fault
+ * handler and only valid for buffers that are faulted in
+ * @vmas: list of vma's mapping this buffer
+ * @handle_count: count of handles referencing this buffer
+ * @task_comm: taskcomm of last client to reference this buffer in a
+ * handle, used for debugging
+ * @pid: pid of last client to reference this buffer in a
+ * handle, used for debugging
+ */
+struct ion_buffer {
+ union {
+ struct rb_node node;
+ struct list_head list;
+ };
+ struct ion_device *dev;
+ struct ion_heap *heap;
+ unsigned long flags;
+ unsigned long private_flags;
+ size_t size;
+ void *priv_virt;
+ struct mutex lock;
+ int kmap_cnt;
+ void *vaddr;
+ struct sg_table *sg_table;
+ struct page **pages;
+ struct list_head vmas;
+ struct list_head attachments;
+ /* used to track orphaned buffers */
+ int handle_count;
+ char task_comm[TASK_COMM_LEN];
+ pid_t pid;
+};
+void ion_buffer_destroy(struct ion_buffer *buffer);
+
+/**
+ * struct ion_device - the metadata of the ion device node
+ * @dev: the actual misc device
+ * @buffers: an rb tree of all the existing buffers
+ * @buffer_lock: lock protecting the tree of buffers
+ * @lock: rwsem protecting the tree of heaps and clients
+ */
+struct ion_device {
+ struct miscdevice dev;
+ struct rb_root buffers;
+ struct mutex buffer_lock;
+ struct rw_semaphore lock;
+ struct plist_head heaps;
+ struct dentry *debug_root;
+ int heap_cnt;
+};
+
+/**
+ * struct ion_heap_ops - ops to operate on a given heap
+ * @allocate: allocate memory
+ * @free: free memory
+ * @map_kernel map memory to the kernel
+ * @unmap_kernel unmap memory to the kernel
+ * @map_user map memory to userspace
*
- * Provided by the board file in the form of platform data to a platform device.
+ * allocate, phys, and map_user return 0 on success, -errno on error.
+ * map_dma and map_kernel return pointer on success, ERR_PTR on
+ * error. @free will be called with ION_PRIV_FLAG_SHRINKER_FREE set in
+ * the buffer's private_flags when called from a shrinker. In that
+ * case, the pages being free'd must be truly free'd back to the
+ * system, not put in a page pool or otherwise cached.
*/
-struct ion_platform_data {
- int nr;
- struct ion_platform_heap *heaps;
+struct ion_heap_ops {
+ int (*allocate)(struct ion_heap *heap,
+ struct ion_buffer *buffer, unsigned long len,
+ unsigned long flags);
+ void (*free)(struct ion_buffer *buffer);
+ void * (*map_kernel)(struct ion_heap *heap, struct ion_buffer *buffer);
+ void (*unmap_kernel)(struct ion_heap *heap, struct ion_buffer *buffer);
+ int (*map_user)(struct ion_heap *mapper, struct ion_buffer *buffer,
+ struct vm_area_struct *vma);
+ int (*shrink)(struct ion_heap *heap, gfp_t gfp_mask, int nr_to_scan);
};
/**
- * ion_client_create() - allocate a client and returns it
- * @dev: the global ion device
- * @name: used for debugging
+ * heap flags - flags between the heaps and core ion code
*/
-struct ion_client *ion_client_create(struct ion_device *dev,
- const char *name);
+#define ION_HEAP_FLAG_DEFER_FREE (1 << 0)
/**
- * ion_client_destroy() - free's a client and all it's handles
- * @client: the client
+ * private flags - flags internal to ion
+ */
+/*
+ * Buffer is being freed from a shrinker function. Skip any possible
+ * heap-specific caching mechanism (e.g. page pools). Guarantees that
+ * any buffer storage that came from the system allocator will be
+ * returned to the system allocator.
+ */
+#define ION_PRIV_FLAG_SHRINKER_FREE (1 << 0)
+
+/**
+ * struct ion_heap - represents a heap in the system
+ * @node: rb node to put the heap on the device's tree of heaps
+ * @dev: back pointer to the ion_device
+ * @type: type of heap
+ * @ops: ops struct as above
+ * @flags: flags
+ * @id: id of heap, also indicates priority of this heap when
+ * allocating. These are specified by platform data and
+ * MUST be unique
+ * @name: used for debugging
+ * @shrinker: a shrinker for the heap
+ * @free_list: free list head if deferred free is used
+ * @free_list_size size of the deferred free list in bytes
+ * @lock: protects the free list
+ * @waitqueue: queue to wait on from deferred free thread
+ * @task: task struct of deferred free thread
+ * @debug_show: called when heap debug file is read to add any
+ * heap specific debug info to output
*
- * Free the provided client and all it's resources including
- * any handles it is holding.
+ * Represents a pool of memory from which buffers can be made. In some
+ * systems the only heap is regular system memory allocated via vmalloc.
+ * On others, some blocks might require large physically contiguous buffers
+ * that are allocated from a specially reserved heap.
*/
-void ion_client_destroy(struct ion_client *client);
+struct ion_heap {
+ struct plist_node node;
+ struct ion_device *dev;
+ enum ion_heap_type type;
+ struct ion_heap_ops *ops;
+ unsigned long flags;
+ unsigned int id;
+ const char *name;
+ struct shrinker shrinker;
+ struct list_head free_list;
+ size_t free_list_size;
+ spinlock_t free_lock;
+ wait_queue_head_t waitqueue;
+ struct task_struct *task;
+
+ int (*debug_show)(struct ion_heap *heap, struct seq_file *, void *);
+};
/**
- * ion_alloc - allocate ion memory
- * @client: the client
- * @len: size of the allocation
- * @align: requested allocation alignment, lots of hardware blocks
- * have alignment requirements of some kind
- * @heap_id_mask: mask of heaps to allocate from, if multiple bits are set
- * heaps will be tried in order from highest to lowest
- * id
- * @flags: heap flags, the low 16 bits are consumed by ion, the
- * high 16 bits are passed on to the respective heap and
- * can be heap custom
+ * ion_buffer_cached - this ion buffer is cached
+ * @buffer: buffer
*
- * Allocate memory in one of the heaps provided in heap mask and return
- * an opaque handle to it.
+ * indicates whether this ion buffer is cached
*/
-struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
- size_t align, unsigned int heap_id_mask,
- unsigned int flags);
+bool ion_buffer_cached(struct ion_buffer *buffer);
/**
- * ion_free - free a handle
- * @client: the client
- * @handle: the handle to free
+ * ion_buffer_fault_user_mappings - fault in user mappings of this buffer
+ * @buffer: buffer
*
- * Free the provided handle.
+ * indicates whether userspace mappings of this buffer will be faulted
+ * in, this can affect how buffers are allocated from the heap.
*/
-void ion_free(struct ion_client *client, struct ion_handle *handle);
+bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer);
/**
- * ion_map_kernel - create mapping for the given handle
- * @client: the client
- * @handle: handle to map
+ * ion_device_add_heap - adds a heap to the ion device
+ * @heap: the heap to add
+ */
+void ion_device_add_heap(struct ion_heap *heap);
+
+/**
+ * some helpers for common operations on buffers using the sg_table
+ * and vaddr fields
+ */
+void *ion_heap_map_kernel(struct ion_heap *heap, struct ion_buffer *buffer);
+void ion_heap_unmap_kernel(struct ion_heap *heap, struct ion_buffer *buffer);
+int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
+ struct vm_area_struct *vma);
+int ion_heap_buffer_zero(struct ion_buffer *buffer);
+int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot);
+
+int ion_alloc(size_t len,
+ unsigned int heap_id_mask,
+ unsigned int flags);
+
+/**
+ * ion_heap_init_shrinker
+ * @heap: the heap
*
- * Map the given handle into the kernel and return a kernel address that
- * can be used to access this address.
+ * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag or defines the shrink op
+ * this function will be called to setup a shrinker to shrink the freelists
+ * and call the heap's shrink op.
*/
-void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle);
+void ion_heap_init_shrinker(struct ion_heap *heap);
/**
- * ion_unmap_kernel() - destroy a kernel mapping for a handle
- * @client: the client
- * @handle: handle to unmap
+ * ion_heap_init_deferred_free -- initialize deferred free functionality
+ * @heap: the heap
+ *
+ * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag this function will
+ * be called to setup deferred frees. Calls to free the buffer will
+ * return immediately and the actual free will occur some time later
*/
-void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle);
+int ion_heap_init_deferred_free(struct ion_heap *heap);
/**
- * ion_share_dma_buf() - share buffer as dma-buf
- * @client: the client
- * @handle: the handle
+ * ion_heap_freelist_add - add a buffer to the deferred free list
+ * @heap: the heap
+ * @buffer: the buffer
+ *
+ * Adds an item to the deferred freelist.
*/
-struct dma_buf *ion_share_dma_buf(struct ion_client *client,
- struct ion_handle *handle);
+void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer);
/**
- * ion_share_dma_buf_fd() - given an ion client, create a dma-buf fd
- * @client: the client
- * @handle: the handle
+ * ion_heap_freelist_drain - drain the deferred free list
+ * @heap: the heap
+ * @size: amount of memory to drain in bytes
+ *
+ * Drains the indicated amount of memory from the deferred freelist immediately.
+ * Returns the total amount freed. The total freed may be higher depending
+ * on the size of the items in the list, or lower if there is insufficient
+ * total memory on the freelist.
*/
-int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle);
+size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size);
/**
- * ion_import_dma_buf() - get ion_handle from dma-buf
- * @client: the client
- * @dmabuf: the dma-buf
+ * ion_heap_freelist_shrink - drain the deferred free
+ * list, skipping any heap-specific
+ * pooling or caching mechanisms
+ *
+ * @heap: the heap
+ * @size: amount of memory to drain in bytes
*
- * Get the ion_buffer associated with the dma-buf and return the ion_handle.
- * If no ion_handle exists for this buffer, return newly created ion_handle.
- * If dma-buf from another exporter is passed, return ERR_PTR(-EINVAL)
+ * Drains the indicated amount of memory from the deferred freelist immediately.
+ * Returns the total amount freed. The total freed may be higher depending
+ * on the size of the items in the list, or lower if there is insufficient
+ * total memory on the freelist.
+ *
+ * Unlike with @ion_heap_freelist_drain, don't put any pages back into
+ * page pools or otherwise cache the pages. Everything must be
+ * genuinely free'd back to the system. If you're free'ing from a
+ * shrinker you probably want to use this. Note that this relies on
+ * the heap.ops.free callback honoring the ION_PRIV_FLAG_SHRINKER_FREE
+ * flag.
*/
-struct ion_handle *ion_import_dma_buf(struct ion_client *client,
- struct dma_buf *dmabuf);
+size_t ion_heap_freelist_shrink(struct ion_heap *heap,
+ size_t size);
/**
- * ion_import_dma_buf_fd() - given a dma-buf fd from the ion exporter get handle
- * @client: the client
- * @fd: the dma-buf fd
+ * ion_heap_freelist_size - returns the size of the freelist in bytes
+ * @heap: the heap
+ */
+size_t ion_heap_freelist_size(struct ion_heap *heap);
+
+
+/**
+ * functions for creating and destroying a heap pool -- allows you
+ * to keep a pool of pre allocated memory to use from your heap. Keeping
+ * a pool of memory that is ready for dma, ie any cached mapping have been
+ * invalidated from the cache, provides a significant performance benefit on
+ * many systems
+ */
+
+/**
+ * struct ion_page_pool - pagepool struct
+ * @high_count: number of highmem items in the pool
+ * @low_count: number of lowmem items in the pool
+ * @high_items: list of highmem items
+ * @low_items: list of lowmem items
+ * @mutex: lock protecting this struct and especially the count
+ * item list
+ * @gfp_mask: gfp_mask to use from alloc
+ * @order: order of pages in the pool
+ * @list: plist node for list of pools
+ * @cached: it's cached pool or not
+ *
+ * Allows you to keep a pool of pre allocated pages to use from your heap.
+ * Keeping a pool of pages that is ready for dma, ie any cached mapping have
+ * been invalidated from the cache, provides a significant performance benefit
+ * on many systems
+ */
+struct ion_page_pool {
+ int high_count;
+ int low_count;
+ bool cached;
+ struct list_head high_items;
+ struct list_head low_items;
+ struct mutex mutex;
+ gfp_t gfp_mask;
+ unsigned int order;
+ struct plist_node list;
+};
+
+struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order,
+ bool cached);
+void ion_page_pool_destroy(struct ion_page_pool *pool);
+struct page *ion_page_pool_alloc(struct ion_page_pool *pool);
+void ion_page_pool_free(struct ion_page_pool *pool, struct page *page);
+
+/** ion_page_pool_shrink - shrinks the size of the memory cached in the pool
+ * @pool: the pool
+ * @gfp_mask: the memory type to reclaim
+ * @nr_to_scan: number of items to shrink in pages
*
- * Given an dma-buf fd that was allocated through ion via ion_share_dma_buf_fd,
- * import that fd and return a handle representing it. If a dma-buf from
- * another exporter is passed in this function will return ERR_PTR(-EINVAL)
+ * returns the number of items freed in pages
*/
-struct ion_handle *ion_import_dma_buf_fd(struct ion_client *client, int fd);
+int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
+ int nr_to_scan);
+
+long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+
+int ion_query_heaps(struct ion_heap_query *query);
-#endif /* _LINUX_ION_H */
+#endif /* _ION_H */
diff --git a/drivers/staging/android/ion/ion_carveout_heap.c b/drivers/staging/android/ion/ion_carveout_heap.c
index a8ea97391c40..5fdc1f328f61 100644
--- a/drivers/staging/android/ion/ion_carveout_heap.c
+++ b/drivers/staging/android/ion/ion_carveout_heap.c
@@ -23,19 +23,17 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include "ion.h"
-#include "ion_priv.h"
#define ION_CARVEOUT_ALLOCATE_FAIL -1
struct ion_carveout_heap {
struct ion_heap heap;
struct gen_pool *pool;
- ion_phys_addr_t base;
+ phys_addr_t base;
};
-static ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
- unsigned long size,
- unsigned long align)
+static phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
+ unsigned long size)
{
struct ion_carveout_heap *carveout_heap =
container_of(heap, struct ion_carveout_heap, heap);
@@ -47,7 +45,7 @@ static ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
return offset;
}
-static void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
+static void ion_carveout_free(struct ion_heap *heap, phys_addr_t addr,
unsigned long size)
{
struct ion_carveout_heap *carveout_heap =
@@ -60,16 +58,13 @@ static void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
static int ion_carveout_heap_allocate(struct ion_heap *heap,
struct ion_buffer *buffer,
- unsigned long size, unsigned long align,
+ unsigned long size,
unsigned long flags)
{
struct sg_table *table;
- ion_phys_addr_t paddr;
+ phys_addr_t paddr;
int ret;
- if (align > PAGE_SIZE)
- return -EINVAL;
-
table = kmalloc(sizeof(*table), GFP_KERNEL);
if (!table)
return -ENOMEM;
@@ -77,7 +72,7 @@ static int ion_carveout_heap_allocate(struct ion_heap *heap,
if (ret)
goto err_free;
- paddr = ion_carveout_allocate(heap, size, align);
+ paddr = ion_carveout_allocate(heap, size);
if (paddr == ION_CARVEOUT_ALLOCATE_FAIL) {
ret = -ENOMEM;
goto err_free_table;
@@ -100,14 +95,10 @@ static void ion_carveout_heap_free(struct ion_buffer *buffer)
struct ion_heap *heap = buffer->heap;
struct sg_table *table = buffer->sg_table;
struct page *page = sg_page(table->sgl);
- ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
+ phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
ion_heap_buffer_zero(buffer);
- if (ion_buffer_cached(buffer))
- dma_sync_sg_for_device(NULL, table->sgl, table->nents,
- DMA_BIDIRECTIONAL);
-
ion_carveout_free(heap, paddr, buffer->size);
sg_free_table(table);
kfree(table);
@@ -132,8 +123,6 @@ struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
page = pfn_to_page(PFN_DOWN(heap_data->base));
size = heap_data->size;
- ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL);
-
ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
if (ret)
return ERR_PTR(ret);
@@ -156,13 +145,3 @@ struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
return &carveout_heap->heap;
}
-
-void ion_carveout_heap_destroy(struct ion_heap *heap)
-{
- struct ion_carveout_heap *carveout_heap =
- container_of(heap, struct ion_carveout_heap, heap);
-
- gen_pool_destroy(carveout_heap->pool);
- kfree(carveout_heap);
- carveout_heap = NULL;
-}
diff --git a/drivers/staging/android/ion/ion_chunk_heap.c b/drivers/staging/android/ion/ion_chunk_heap.c
index 70495dc645ea..102c09398317 100644
--- a/drivers/staging/android/ion/ion_chunk_heap.c
+++ b/drivers/staging/android/ion/ion_chunk_heap.c
@@ -22,12 +22,11 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include "ion.h"
-#include "ion_priv.h"
struct ion_chunk_heap {
struct ion_heap heap;
struct gen_pool *pool;
- ion_phys_addr_t base;
+ phys_addr_t base;
unsigned long chunk_size;
unsigned long size;
unsigned long allocated;
@@ -35,7 +34,7 @@ struct ion_chunk_heap {
static int ion_chunk_heap_allocate(struct ion_heap *heap,
struct ion_buffer *buffer,
- unsigned long size, unsigned long align,
+ unsigned long size,
unsigned long flags)
{
struct ion_chunk_heap *chunk_heap =
@@ -46,9 +45,6 @@ static int ion_chunk_heap_allocate(struct ion_heap *heap,
unsigned long num_chunks;
unsigned long allocated_size;
- if (align > chunk_heap->chunk_size)
- return -EINVAL;
-
allocated_size = ALIGN(size, chunk_heap->chunk_size);
num_chunks = allocated_size / chunk_heap->chunk_size;
@@ -104,10 +100,6 @@ static void ion_chunk_heap_free(struct ion_buffer *buffer)
ion_heap_buffer_zero(buffer);
- if (ion_buffer_cached(buffer))
- dma_sync_sg_for_device(NULL, table->sgl, table->nents,
- DMA_BIDIRECTIONAL);
-
for_each_sg(table->sgl, sg, table->nents, i) {
gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
sg->length);
@@ -135,8 +127,6 @@ struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
page = pfn_to_page(PFN_DOWN(heap_data->base));
size = heap_data->size;
- ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL);
-
ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
if (ret)
return ERR_PTR(ret);
@@ -160,8 +150,8 @@ struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
chunk_heap->heap.ops = &chunk_heap_ops;
chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK;
chunk_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
- pr_debug("%s: base %lu size %zu align %ld\n", __func__,
- chunk_heap->base, heap_data->size, heap_data->align);
+ pr_debug("%s: base %pa size %zu\n", __func__,
+ &chunk_heap->base, heap_data->size);
return &chunk_heap->heap;
@@ -170,12 +160,3 @@ error_gen_pool_create:
return ERR_PTR(ret);
}
-void ion_chunk_heap_destroy(struct ion_heap *heap)
-{
- struct ion_chunk_heap *chunk_heap =
- container_of(heap, struct ion_chunk_heap, heap);
-
- gen_pool_destroy(chunk_heap->pool);
- kfree(chunk_heap);
- chunk_heap = NULL;
-}
diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c
index 6c4068523781..a0949bc0dcf4 100644
--- a/drivers/staging/android/ion/ion_cma_heap.c
+++ b/drivers/staging/android/ion/ion_cma_heap.c
@@ -19,124 +19,75 @@
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/err.h>
-#include <linux/dma-mapping.h>
+#include <linux/cma.h>
+#include <linux/scatterlist.h>
#include "ion.h"
-#include "ion_priv.h"
struct ion_cma_heap {
struct ion_heap heap;
- struct device *dev;
+ struct cma *cma;
};
#define to_cma_heap(x) container_of(x, struct ion_cma_heap, heap)
-struct ion_cma_buffer_info {
- void *cpu_addr;
- dma_addr_t handle;
- struct sg_table *table;
-};
-
/* ION CMA heap operations functions */
static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
- unsigned long len, unsigned long align,
+ unsigned long len,
unsigned long flags)
{
struct ion_cma_heap *cma_heap = to_cma_heap(heap);
- struct device *dev = cma_heap->dev;
- struct ion_cma_buffer_info *info;
-
- dev_dbg(dev, "Request buffer allocation len %ld\n", len);
-
- if (buffer->flags & ION_FLAG_CACHED)
- return -EINVAL;
-
- if (align > PAGE_SIZE)
- return -EINVAL;
+ struct sg_table *table;
+ struct page *pages;
+ int ret;
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
+ pages = cma_alloc(cma_heap->cma, len, 0, GFP_KERNEL);
+ if (!pages)
return -ENOMEM;
- info->cpu_addr = dma_alloc_coherent(dev, len, &(info->handle),
- GFP_HIGHUSER | __GFP_ZERO);
-
- if (!info->cpu_addr) {
- dev_err(dev, "Fail to allocate buffer\n");
+ table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!table)
goto err;
- }
- info->table = kmalloc(sizeof(*info->table), GFP_KERNEL);
- if (!info->table)
+ ret = sg_alloc_table(table, 1, GFP_KERNEL);
+ if (ret)
goto free_mem;
- if (dma_get_sgtable(dev, info->table, info->cpu_addr, info->handle,
- len))
- goto free_table;
- /* keep this for memory release */
- buffer->priv_virt = info;
- buffer->sg_table = info->table;
- dev_dbg(dev, "Allocate buffer %p\n", buffer);
+ sg_set_page(table->sgl, pages, len, 0);
+
+ buffer->priv_virt = pages;
+ buffer->sg_table = table;
return 0;
-free_table:
- kfree(info->table);
free_mem:
- dma_free_coherent(dev, len, info->cpu_addr, info->handle);
+ kfree(table);
err:
- kfree(info);
+ cma_release(cma_heap->cma, pages, buffer->size);
return -ENOMEM;
}
static void ion_cma_free(struct ion_buffer *buffer)
{
struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
- struct device *dev = cma_heap->dev;
- struct ion_cma_buffer_info *info = buffer->priv_virt;
+ struct page *pages = buffer->priv_virt;
- dev_dbg(dev, "Release buffer %p\n", buffer);
/* release memory */
- dma_free_coherent(dev, buffer->size, info->cpu_addr, info->handle);
+ cma_release(cma_heap->cma, pages, buffer->size);
/* release sg table */
- sg_free_table(info->table);
- kfree(info->table);
- kfree(info);
-}
-
-static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer,
- struct vm_area_struct *vma)
-{
- struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
- struct device *dev = cma_heap->dev;
- struct ion_cma_buffer_info *info = buffer->priv_virt;
-
- return dma_mmap_coherent(dev, vma, info->cpu_addr, info->handle,
- buffer->size);
-}
-
-static void *ion_cma_map_kernel(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- struct ion_cma_buffer_info *info = buffer->priv_virt;
- /* kernel memory mapping has been done at allocation time */
- return info->cpu_addr;
-}
-
-static void ion_cma_unmap_kernel(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
+ sg_free_table(buffer->sg_table);
+ kfree(buffer->sg_table);
}
static struct ion_heap_ops ion_cma_ops = {
.allocate = ion_cma_allocate,
.free = ion_cma_free,
- .map_user = ion_cma_mmap,
- .map_kernel = ion_cma_map_kernel,
- .unmap_kernel = ion_cma_unmap_kernel,
+ .map_user = ion_heap_map_user,
+ .map_kernel = ion_heap_map_kernel,
+ .unmap_kernel = ion_heap_unmap_kernel,
};
-struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data)
+static struct ion_heap *__ion_cma_heap_create(struct cma *cma)
{
struct ion_cma_heap *cma_heap;
@@ -150,14 +101,28 @@ struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data)
* get device from private heaps data, later it will be
* used to make the link with reserved CMA memory
*/
- cma_heap->dev = data->priv;
+ cma_heap->cma = cma;
cma_heap->heap.type = ION_HEAP_TYPE_DMA;
return &cma_heap->heap;
}
-void ion_cma_heap_destroy(struct ion_heap *heap)
+int __ion_add_cma_heaps(struct cma *cma, void *data)
{
- struct ion_cma_heap *cma_heap = to_cma_heap(heap);
+ struct ion_heap *heap;
- kfree(cma_heap);
+ heap = __ion_cma_heap_create(cma);
+ if (IS_ERR(heap))
+ return PTR_ERR(heap);
+
+ heap->name = cma_get_name(cma);
+
+ ion_device_add_heap(heap);
+ return 0;
+}
+
+static int ion_add_cma_heaps(void)
+{
+ cma_for_each_area(__ion_add_cma_heaps, NULL);
+ return 0;
}
+device_initcall(ion_add_cma_heaps);
diff --git a/drivers/staging/android/ion/ion_dummy_driver.c b/drivers/staging/android/ion/ion_dummy_driver.c
deleted file mode 100644
index cf5c010d32bc..000000000000
--- a/drivers/staging/android/ion/ion_dummy_driver.c
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
- * drivers/gpu/ion/ion_dummy_driver.c
- *
- * Copyright (C) 2013 Linaro, Inc
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/err.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/bootmem.h>
-#include <linux/memblock.h>
-#include <linux/sizes.h>
-#include <linux/io.h>
-#include "ion.h"
-#include "ion_priv.h"
-
-static struct ion_device *idev;
-static struct ion_heap **heaps;
-
-static void *carveout_ptr;
-static void *chunk_ptr;
-
-static struct ion_platform_heap dummy_heaps[] = {
- {
- .id = ION_HEAP_TYPE_SYSTEM,
- .type = ION_HEAP_TYPE_SYSTEM,
- .name = "system",
- },
- {
- .id = ION_HEAP_TYPE_SYSTEM_CONTIG,
- .type = ION_HEAP_TYPE_SYSTEM_CONTIG,
- .name = "system contig",
- },
- {
- .id = ION_HEAP_TYPE_CARVEOUT,
- .type = ION_HEAP_TYPE_CARVEOUT,
- .name = "carveout",
- .size = SZ_4M,
- },
- {
- .id = ION_HEAP_TYPE_CHUNK,
- .type = ION_HEAP_TYPE_CHUNK,
- .name = "chunk",
- .size = SZ_4M,
- .align = SZ_16K,
- .priv = (void *)(SZ_16K),
- },
-};
-
-static const struct ion_platform_data dummy_ion_pdata = {
- .nr = ARRAY_SIZE(dummy_heaps),
- .heaps = dummy_heaps,
-};
-
-static int __init ion_dummy_init(void)
-{
- int i, err;
-
- idev = ion_device_create(NULL);
- if (IS_ERR(idev))
- return PTR_ERR(idev);
- heaps = kcalloc(dummy_ion_pdata.nr, sizeof(struct ion_heap *),
- GFP_KERNEL);
- if (!heaps)
- return -ENOMEM;
-
-
- /* Allocate a dummy carveout heap */
- carveout_ptr = alloc_pages_exact(
- dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size,
- GFP_KERNEL);
- if (carveout_ptr)
- dummy_heaps[ION_HEAP_TYPE_CARVEOUT].base =
- virt_to_phys(carveout_ptr);
- else
- pr_err("ion_dummy: Could not allocate carveout\n");
-
- /* Allocate a dummy chunk heap */
- chunk_ptr = alloc_pages_exact(
- dummy_heaps[ION_HEAP_TYPE_CHUNK].size,
- GFP_KERNEL);
- if (chunk_ptr)
- dummy_heaps[ION_HEAP_TYPE_CHUNK].base = virt_to_phys(chunk_ptr);
- else
- pr_err("ion_dummy: Could not allocate chunk\n");
-
- for (i = 0; i < dummy_ion_pdata.nr; i++) {
- struct ion_platform_heap *heap_data = &dummy_ion_pdata.heaps[i];
-
- if (heap_data->type == ION_HEAP_TYPE_CARVEOUT &&
- !heap_data->base)
- continue;
-
- if (heap_data->type == ION_HEAP_TYPE_CHUNK && !heap_data->base)
- continue;
-
- heaps[i] = ion_heap_create(heap_data);
- if (IS_ERR_OR_NULL(heaps[i])) {
- err = PTR_ERR(heaps[i]);
- goto err;
- }
- ion_device_add_heap(idev, heaps[i]);
- }
- return 0;
-err:
- for (i = 0; i < dummy_ion_pdata.nr; ++i)
- ion_heap_destroy(heaps[i]);
- kfree(heaps);
-
- if (carveout_ptr) {
- free_pages_exact(carveout_ptr,
- dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size);
- carveout_ptr = NULL;
- }
- if (chunk_ptr) {
- free_pages_exact(chunk_ptr,
- dummy_heaps[ION_HEAP_TYPE_CHUNK].size);
- chunk_ptr = NULL;
- }
- return err;
-}
-device_initcall(ion_dummy_init);
-
-static void __exit ion_dummy_exit(void)
-{
- int i;
-
- ion_device_destroy(idev);
-
- for (i = 0; i < dummy_ion_pdata.nr; i++)
- ion_heap_destroy(heaps[i]);
- kfree(heaps);
-
- if (carveout_ptr) {
- free_pages_exact(carveout_ptr,
- dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size);
- carveout_ptr = NULL;
- }
- if (chunk_ptr) {
- free_pages_exact(chunk_ptr,
- dummy_heaps[ION_HEAP_TYPE_CHUNK].size);
- chunk_ptr = NULL;
- }
-}
-__exitcall(ion_dummy_exit);
diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c
index c69d0bd53693..91faa7f035b9 100644
--- a/drivers/staging/android/ion/ion_heap.c
+++ b/drivers/staging/android/ion/ion_heap.c
@@ -24,7 +24,6 @@
#include <linux/scatterlist.h>
#include <linux/vmalloc.h>
#include "ion.h"
-#include "ion_priv.h"
void *ion_heap_map_kernel(struct ion_heap *heap,
struct ion_buffer *buffer)
@@ -315,70 +314,3 @@ void ion_heap_init_shrinker(struct ion_heap *heap)
heap->shrinker.batch = 0;
register_shrinker(&heap->shrinker);
}
-
-struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
-{
- struct ion_heap *heap = NULL;
-
- switch (heap_data->type) {
- case ION_HEAP_TYPE_SYSTEM_CONTIG:
- heap = ion_system_contig_heap_create(heap_data);
- break;
- case ION_HEAP_TYPE_SYSTEM:
- heap = ion_system_heap_create(heap_data);
- break;
- case ION_HEAP_TYPE_CARVEOUT:
- heap = ion_carveout_heap_create(heap_data);
- break;
- case ION_HEAP_TYPE_CHUNK:
- heap = ion_chunk_heap_create(heap_data);
- break;
- case ION_HEAP_TYPE_DMA:
- heap = ion_cma_heap_create(heap_data);
- break;
- default:
- pr_err("%s: Invalid heap type %d\n", __func__,
- heap_data->type);
- return ERR_PTR(-EINVAL);
- }
-
- if (IS_ERR_OR_NULL(heap)) {
- pr_err("%s: error creating heap %s type %d base %lu size %zu\n",
- __func__, heap_data->name, heap_data->type,
- heap_data->base, heap_data->size);
- return ERR_PTR(-EINVAL);
- }
-
- heap->name = heap_data->name;
- heap->id = heap_data->id;
- return heap;
-}
-EXPORT_SYMBOL(ion_heap_create);
-
-void ion_heap_destroy(struct ion_heap *heap)
-{
- if (!heap)
- return;
-
- switch (heap->type) {
- case ION_HEAP_TYPE_SYSTEM_CONTIG:
- ion_system_contig_heap_destroy(heap);
- break;
- case ION_HEAP_TYPE_SYSTEM:
- ion_system_heap_destroy(heap);
- break;
- case ION_HEAP_TYPE_CARVEOUT:
- ion_carveout_heap_destroy(heap);
- break;
- case ION_HEAP_TYPE_CHUNK:
- ion_chunk_heap_destroy(heap);
- break;
- case ION_HEAP_TYPE_DMA:
- ion_cma_heap_destroy(heap);
- break;
- default:
- pr_err("%s: Invalid heap type %d\n", __func__,
- heap->type);
- }
-}
-EXPORT_SYMBOL(ion_heap_destroy);
diff --git a/drivers/staging/android/ion/ion_of.c b/drivers/staging/android/ion/ion_of.c
deleted file mode 100644
index 7791c705ea31..000000000000
--- a/drivers/staging/android/ion/ion_of.c
+++ /dev/null
@@ -1,184 +0,0 @@
-/*
- * Based on work from:
- * Andrew Andrianov <andrew@ncrmnt.org>
- * Google
- * The Linux Foundation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/of.h>
-#include <linux/of_platform.h>
-#include <linux/of_address.h>
-#include <linux/clk.h>
-#include <linux/dma-mapping.h>
-#include <linux/cma.h>
-#include <linux/dma-contiguous.h>
-#include <linux/io.h>
-#include <linux/of_reserved_mem.h>
-#include "ion.h"
-#include "ion_priv.h"
-#include "ion_of.h"
-
-static int ion_parse_dt_heap_common(struct device_node *heap_node,
- struct ion_platform_heap *heap,
- struct ion_of_heap *compatible)
-{
- int i;
-
- for (i = 0; compatible[i].name; i++) {
- if (of_device_is_compatible(heap_node, compatible[i].compat))
- break;
- }
-
- if (!compatible[i].name)
- return -ENODEV;
-
- heap->id = compatible[i].heap_id;
- heap->type = compatible[i].type;
- heap->name = compatible[i].name;
- heap->align = compatible[i].align;
-
- /* Some kind of callback function pointer? */
-
- pr_info("%s: id %d type %d name %s align %lx\n", __func__,
- heap->id, heap->type, heap->name, heap->align);
- return 0;
-}
-
-static int ion_setup_heap_common(struct platform_device *parent,
- struct device_node *heap_node,
- struct ion_platform_heap *heap)
-{
- int ret = 0;
-
- switch (heap->type) {
- case ION_HEAP_TYPE_CARVEOUT:
- case ION_HEAP_TYPE_CHUNK:
- if (heap->base && heap->size)
- return 0;
-
- ret = of_reserved_mem_device_init(heap->priv);
- break;
- default:
- break;
- }
-
- return ret;
-}
-
-struct ion_platform_data *ion_parse_dt(struct platform_device *pdev,
- struct ion_of_heap *compatible)
-{
- int num_heaps, ret;
- const struct device_node *dt_node = pdev->dev.of_node;
- struct device_node *node;
- struct ion_platform_heap *heaps;
- struct ion_platform_data *data;
- int i = 0;
-
- num_heaps = of_get_available_child_count(dt_node);
-
- if (!num_heaps)
- return ERR_PTR(-EINVAL);
-
- heaps = devm_kzalloc(&pdev->dev,
- sizeof(struct ion_platform_heap) * num_heaps,
- GFP_KERNEL);
- if (!heaps)
- return ERR_PTR(-ENOMEM);
-
- data = devm_kzalloc(&pdev->dev, sizeof(struct ion_platform_data),
- GFP_KERNEL);
- if (!data)
- return ERR_PTR(-ENOMEM);
-
- for_each_available_child_of_node(dt_node, node) {
- struct platform_device *heap_pdev;
-
- ret = ion_parse_dt_heap_common(node, &heaps[i], compatible);
- if (ret)
- return ERR_PTR(ret);
-
- heap_pdev = of_platform_device_create(node, heaps[i].name,
- &pdev->dev);
- if (!heap_pdev)
- return ERR_PTR(-ENOMEM);
- heap_pdev->dev.platform_data = &heaps[i];
-
- heaps[i].priv = &heap_pdev->dev;
-
- ret = ion_setup_heap_common(pdev, node, &heaps[i]);
- if (ret)
- goto out_err;
- i++;
- }
-
- data->heaps = heaps;
- data->nr = num_heaps;
- return data;
-
-out_err:
- for ( ; i >= 0; i--)
- if (heaps[i].priv)
- of_device_unregister(to_platform_device(heaps[i].priv));
-
- return ERR_PTR(ret);
-}
-
-void ion_destroy_platform_data(struct ion_platform_data *data)
-{
- int i;
-
- for (i = 0; i < data->nr; i++)
- if (data->heaps[i].priv)
- of_device_unregister(to_platform_device(
- data->heaps[i].priv));
-}
-
-#ifdef CONFIG_OF_RESERVED_MEM
-#include <linux/of.h>
-#include <linux/of_fdt.h>
-#include <linux/of_reserved_mem.h>
-
-static int rmem_ion_device_init(struct reserved_mem *rmem, struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct ion_platform_heap *heap = pdev->dev.platform_data;
-
- heap->base = rmem->base;
- heap->base = rmem->size;
- pr_debug("%s: heap %s base %pa size %pa dev %p\n", __func__,
- heap->name, &rmem->base, &rmem->size, dev);
- return 0;
-}
-
-static void rmem_ion_device_release(struct reserved_mem *rmem,
- struct device *dev)
-{
-}
-
-static const struct reserved_mem_ops rmem_dma_ops = {
- .device_init = rmem_ion_device_init,
- .device_release = rmem_ion_device_release,
-};
-
-static int __init rmem_ion_setup(struct reserved_mem *rmem)
-{
- phys_addr_t size = rmem->size;
-
- size = size / 1024;
-
- pr_info("Ion memory setup at %pa size %pa MiB\n",
- &rmem->base, &size);
- rmem->ops = &rmem_dma_ops;
- return 0;
-}
-
-RESERVEDMEM_OF_DECLARE(ion, "ion-region", rmem_ion_setup);
-#endif
diff --git a/drivers/staging/android/ion/ion_of.h b/drivers/staging/android/ion/ion_of.h
deleted file mode 100644
index 8241a1770f0a..000000000000
--- a/drivers/staging/android/ion/ion_of.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Based on work from:
- * Andrew Andrianov <andrew@ncrmnt.org>
- * Google
- * The Linux Foundation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ION_OF_H
-#define _ION_OF_H
-
-struct ion_of_heap {
- const char *compat;
- int heap_id;
- int type;
- const char *name;
- int align;
-};
-
-#define PLATFORM_HEAP(_compat, _id, _type, _name) \
-{ \
- .compat = _compat, \
- .heap_id = _id, \
- .type = _type, \
- .name = _name, \
- .align = PAGE_SIZE, \
-}
-
-struct ion_platform_data *ion_parse_dt(struct platform_device *pdev,
- struct ion_of_heap *compatible);
-
-void ion_destroy_platform_data(struct ion_platform_data *data);
-
-#endif
diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c
index aea89c1ec345..817849df9de3 100644
--- a/drivers/staging/android/ion/ion_page_pool.c
+++ b/drivers/staging/android/ion/ion_page_pool.c
@@ -22,7 +22,8 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/swap.h>
-#include "ion_priv.h"
+
+#include "ion.h"
static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
{
@@ -30,9 +31,6 @@ static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
if (!page)
return NULL;
- if (!pool->cached)
- ion_pages_sync_for_device(NULL, page, PAGE_SIZE << pool->order,
- DMA_BIDIRECTIONAL);
return page;
}
diff --git a/drivers/staging/android/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h
deleted file mode 100644
index 5b3059c78431..000000000000
--- a/drivers/staging/android/ion/ion_priv.h
+++ /dev/null
@@ -1,473 +0,0 @@
-/*
- * drivers/staging/android/ion/ion_priv.h
- *
- * Copyright (C) 2011 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _ION_PRIV_H
-#define _ION_PRIV_H
-
-#include <linux/device.h>
-#include <linux/dma-direction.h>
-#include <linux/kref.h>
-#include <linux/mm_types.h>
-#include <linux/mutex.h>
-#include <linux/rbtree.h>
-#include <linux/sched.h>
-#include <linux/shrinker.h>
-#include <linux/types.h>
-#include <linux/miscdevice.h>
-
-#include "ion.h"
-
-/**
- * struct ion_buffer - metadata for a particular buffer
- * @ref: reference count
- * @node: node in the ion_device buffers tree
- * @dev: back pointer to the ion_device
- * @heap: back pointer to the heap the buffer came from
- * @flags: buffer specific flags
- * @private_flags: internal buffer specific flags
- * @size: size of the buffer
- * @priv_virt: private data to the buffer representable as
- * a void *
- * @lock: protects the buffers cnt fields
- * @kmap_cnt: number of times the buffer is mapped to the kernel
- * @vaddr: the kernel mapping if kmap_cnt is not zero
- * @dmap_cnt: number of times the buffer is mapped for dma
- * @sg_table: the sg table for the buffer if dmap_cnt is not zero
- * @pages: flat array of pages in the buffer -- used by fault
- * handler and only valid for buffers that are faulted in
- * @vmas: list of vma's mapping this buffer
- * @handle_count: count of handles referencing this buffer
- * @task_comm: taskcomm of last client to reference this buffer in a
- * handle, used for debugging
- * @pid: pid of last client to reference this buffer in a
- * handle, used for debugging
- */
-struct ion_buffer {
- struct kref ref;
- union {
- struct rb_node node;
- struct list_head list;
- };
- struct ion_device *dev;
- struct ion_heap *heap;
- unsigned long flags;
- unsigned long private_flags;
- size_t size;
- void *priv_virt;
- struct mutex lock;
- int kmap_cnt;
- void *vaddr;
- int dmap_cnt;
- struct sg_table *sg_table;
- struct page **pages;
- struct list_head vmas;
- /* used to track orphaned buffers */
- int handle_count;
- char task_comm[TASK_COMM_LEN];
- pid_t pid;
-};
-void ion_buffer_destroy(struct ion_buffer *buffer);
-
-/**
- * struct ion_device - the metadata of the ion device node
- * @dev: the actual misc device
- * @buffers: an rb tree of all the existing buffers
- * @buffer_lock: lock protecting the tree of buffers
- * @lock: rwsem protecting the tree of heaps and clients
- * @heaps: list of all the heaps in the system
- * @user_clients: list of all the clients created from userspace
- */
-struct ion_device {
- struct miscdevice dev;
- struct rb_root buffers;
- struct mutex buffer_lock;
- struct rw_semaphore lock;
- struct plist_head heaps;
- long (*custom_ioctl)(struct ion_client *client, unsigned int cmd,
- unsigned long arg);
- struct rb_root clients;
- struct dentry *debug_root;
- struct dentry *heaps_debug_root;
- struct dentry *clients_debug_root;
- int heap_cnt;
-};
-
-/**
- * struct ion_client - a process/hw block local address space
- * @node: node in the tree of all clients
- * @dev: backpointer to ion device
- * @handles: an rb tree of all the handles in this client
- * @idr: an idr space for allocating handle ids
- * @lock: lock protecting the tree of handles
- * @name: used for debugging
- * @display_name: used for debugging (unique version of @name)
- * @display_serial: used for debugging (to make display_name unique)
- * @task: used for debugging
- *
- * A client represents a list of buffers this client may access.
- * The mutex stored here is used to protect both handles tree
- * as well as the handles themselves, and should be held while modifying either.
- */
-struct ion_client {
- struct rb_node node;
- struct ion_device *dev;
- struct rb_root handles;
- struct idr idr;
- struct mutex lock;
- const char *name;
- char *display_name;
- int display_serial;
- struct task_struct *task;
- pid_t pid;
- struct dentry *debug_root;
-};
-
-/**
- * ion_handle - a client local reference to a buffer
- * @ref: reference count
- * @client: back pointer to the client the buffer resides in
- * @buffer: pointer to the buffer
- * @node: node in the client's handle rbtree
- * @kmap_cnt: count of times this client has mapped to kernel
- * @id: client-unique id allocated by client->idr
- *
- * Modifications to node, map_cnt or mapping should be protected by the
- * lock in the client. Other fields are never changed after initialization.
- */
-struct ion_handle {
- struct kref ref;
- struct ion_client *client;
- struct ion_buffer *buffer;
- struct rb_node node;
- unsigned int kmap_cnt;
- int id;
-};
-
-/**
- * struct ion_heap_ops - ops to operate on a given heap
- * @allocate: allocate memory
- * @free: free memory
- * @map_kernel map memory to the kernel
- * @unmap_kernel unmap memory to the kernel
- * @map_user map memory to userspace
- *
- * allocate, phys, and map_user return 0 on success, -errno on error.
- * map_dma and map_kernel return pointer on success, ERR_PTR on
- * error. @free will be called with ION_PRIV_FLAG_SHRINKER_FREE set in
- * the buffer's private_flags when called from a shrinker. In that
- * case, the pages being free'd must be truly free'd back to the
- * system, not put in a page pool or otherwise cached.
- */
-struct ion_heap_ops {
- int (*allocate)(struct ion_heap *heap,
- struct ion_buffer *buffer, unsigned long len,
- unsigned long align, unsigned long flags);
- void (*free)(struct ion_buffer *buffer);
- void * (*map_kernel)(struct ion_heap *heap, struct ion_buffer *buffer);
- void (*unmap_kernel)(struct ion_heap *heap, struct ion_buffer *buffer);
- int (*map_user)(struct ion_heap *mapper, struct ion_buffer *buffer,
- struct vm_area_struct *vma);
- int (*shrink)(struct ion_heap *heap, gfp_t gfp_mask, int nr_to_scan);
-};
-
-/**
- * heap flags - flags between the heaps and core ion code
- */
-#define ION_HEAP_FLAG_DEFER_FREE (1 << 0)
-
-/**
- * private flags - flags internal to ion
- */
-/*
- * Buffer is being freed from a shrinker function. Skip any possible
- * heap-specific caching mechanism (e.g. page pools). Guarantees that
- * any buffer storage that came from the system allocator will be
- * returned to the system allocator.
- */
-#define ION_PRIV_FLAG_SHRINKER_FREE (1 << 0)
-
-/**
- * struct ion_heap - represents a heap in the system
- * @node: rb node to put the heap on the device's tree of heaps
- * @dev: back pointer to the ion_device
- * @type: type of heap
- * @ops: ops struct as above
- * @flags: flags
- * @id: id of heap, also indicates priority of this heap when
- * allocating. These are specified by platform data and
- * MUST be unique
- * @name: used for debugging
- * @shrinker: a shrinker for the heap
- * @free_list: free list head if deferred free is used
- * @free_list_size size of the deferred free list in bytes
- * @lock: protects the free list
- * @waitqueue: queue to wait on from deferred free thread
- * @task: task struct of deferred free thread
- * @debug_show: called when heap debug file is read to add any
- * heap specific debug info to output
- *
- * Represents a pool of memory from which buffers can be made. In some
- * systems the only heap is regular system memory allocated via vmalloc.
- * On others, some blocks might require large physically contiguous buffers
- * that are allocated from a specially reserved heap.
- */
-struct ion_heap {
- struct plist_node node;
- struct ion_device *dev;
- enum ion_heap_type type;
- struct ion_heap_ops *ops;
- unsigned long flags;
- unsigned int id;
- const char *name;
- struct shrinker shrinker;
- struct list_head free_list;
- size_t free_list_size;
- spinlock_t free_lock;
- wait_queue_head_t waitqueue;
- struct task_struct *task;
-
- int (*debug_show)(struct ion_heap *heap, struct seq_file *, void *);
-};
-
-/**
- * ion_buffer_cached - this ion buffer is cached
- * @buffer: buffer
- *
- * indicates whether this ion buffer is cached
- */
-bool ion_buffer_cached(struct ion_buffer *buffer);
-
-/**
- * ion_buffer_fault_user_mappings - fault in user mappings of this buffer
- * @buffer: buffer
- *
- * indicates whether userspace mappings of this buffer will be faulted
- * in, this can affect how buffers are allocated from the heap.
- */
-bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer);
-
-/**
- * ion_device_create - allocates and returns an ion device
- * @custom_ioctl: arch specific ioctl function if applicable
- *
- * returns a valid device or -PTR_ERR
- */
-struct ion_device *ion_device_create(long (*custom_ioctl)
- (struct ion_client *client,
- unsigned int cmd,
- unsigned long arg));
-
-/**
- * ion_device_destroy - free and device and it's resource
- * @dev: the device
- */
-void ion_device_destroy(struct ion_device *dev);
-
-/**
- * ion_device_add_heap - adds a heap to the ion device
- * @dev: the device
- * @heap: the heap to add
- */
-void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap);
-
-/**
- * some helpers for common operations on buffers using the sg_table
- * and vaddr fields
- */
-void *ion_heap_map_kernel(struct ion_heap *heap, struct ion_buffer *buffer);
-void ion_heap_unmap_kernel(struct ion_heap *heap, struct ion_buffer *buffer);
-int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
- struct vm_area_struct *vma);
-int ion_heap_buffer_zero(struct ion_buffer *buffer);
-int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot);
-
-/**
- * ion_heap_init_shrinker
- * @heap: the heap
- *
- * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag or defines the shrink op
- * this function will be called to setup a shrinker to shrink the freelists
- * and call the heap's shrink op.
- */
-void ion_heap_init_shrinker(struct ion_heap *heap);
-
-/**
- * ion_heap_init_deferred_free -- initialize deferred free functionality
- * @heap: the heap
- *
- * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag this function will
- * be called to setup deferred frees. Calls to free the buffer will
- * return immediately and the actual free will occur some time later
- */
-int ion_heap_init_deferred_free(struct ion_heap *heap);
-
-/**
- * ion_heap_freelist_add - add a buffer to the deferred free list
- * @heap: the heap
- * @buffer: the buffer
- *
- * Adds an item to the deferred freelist.
- */
-void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer);
-
-/**
- * ion_heap_freelist_drain - drain the deferred free list
- * @heap: the heap
- * @size: amount of memory to drain in bytes
- *
- * Drains the indicated amount of memory from the deferred freelist immediately.
- * Returns the total amount freed. The total freed may be higher depending
- * on the size of the items in the list, or lower if there is insufficient
- * total memory on the freelist.
- */
-size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size);
-
-/**
- * ion_heap_freelist_shrink - drain the deferred free
- * list, skipping any heap-specific
- * pooling or caching mechanisms
- *
- * @heap: the heap
- * @size: amount of memory to drain in bytes
- *
- * Drains the indicated amount of memory from the deferred freelist immediately.
- * Returns the total amount freed. The total freed may be higher depending
- * on the size of the items in the list, or lower if there is insufficient
- * total memory on the freelist.
- *
- * Unlike with @ion_heap_freelist_drain, don't put any pages back into
- * page pools or otherwise cache the pages. Everything must be
- * genuinely free'd back to the system. If you're free'ing from a
- * shrinker you probably want to use this. Note that this relies on
- * the heap.ops.free callback honoring the ION_PRIV_FLAG_SHRINKER_FREE
- * flag.
- */
-size_t ion_heap_freelist_shrink(struct ion_heap *heap,
- size_t size);
-
-/**
- * ion_heap_freelist_size - returns the size of the freelist in bytes
- * @heap: the heap
- */
-size_t ion_heap_freelist_size(struct ion_heap *heap);
-
-
-/**
- * functions for creating and destroying the built in ion heaps.
- * architectures can add their own custom architecture specific
- * heaps as appropriate.
- */
-
-struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data);
-void ion_heap_destroy(struct ion_heap *heap);
-struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused);
-void ion_system_heap_destroy(struct ion_heap *heap);
-
-struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *heap);
-void ion_system_contig_heap_destroy(struct ion_heap *heap);
-
-struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data);
-void ion_carveout_heap_destroy(struct ion_heap *heap);
-
-struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data);
-void ion_chunk_heap_destroy(struct ion_heap *heap);
-struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data);
-void ion_cma_heap_destroy(struct ion_heap *heap);
-
-/**
- * functions for creating and destroying a heap pool -- allows you
- * to keep a pool of pre allocated memory to use from your heap. Keeping
- * a pool of memory that is ready for dma, ie any cached mapping have been
- * invalidated from the cache, provides a significant performance benefit on
- * many systems
- */
-
-/**
- * struct ion_page_pool - pagepool struct
- * @high_count: number of highmem items in the pool
- * @low_count: number of lowmem items in the pool
- * @high_items: list of highmem items
- * @low_items: list of lowmem items
- * @mutex: lock protecting this struct and especially the count
- * item list
- * @gfp_mask: gfp_mask to use from alloc
- * @order: order of pages in the pool
- * @list: plist node for list of pools
- * @cached: it's cached pool or not
- *
- * Allows you to keep a pool of pre allocated pages to use from your heap.
- * Keeping a pool of pages that is ready for dma, ie any cached mapping have
- * been invalidated from the cache, provides a significant performance benefit
- * on many systems
- */
-struct ion_page_pool {
- int high_count;
- int low_count;
- bool cached;
- struct list_head high_items;
- struct list_head low_items;
- struct mutex mutex;
- gfp_t gfp_mask;
- unsigned int order;
- struct plist_node list;
-};
-
-struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order,
- bool cached);
-void ion_page_pool_destroy(struct ion_page_pool *pool);
-struct page *ion_page_pool_alloc(struct ion_page_pool *pool);
-void ion_page_pool_free(struct ion_page_pool *pool, struct page *page);
-
-/** ion_page_pool_shrink - shrinks the size of the memory cached in the pool
- * @pool: the pool
- * @gfp_mask: the memory type to reclaim
- * @nr_to_scan: number of items to shrink in pages
- *
- * returns the number of items freed in pages
- */
-int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
- int nr_to_scan);
-
-/**
- * ion_pages_sync_for_device - cache flush pages for use with the specified
- * device
- * @dev: the device the pages will be used with
- * @page: the first page to be flushed
- * @size: size in bytes of region to be flushed
- * @dir: direction of dma transfer
- */
-void ion_pages_sync_for_device(struct device *dev, struct page *page,
- size_t size, enum dma_data_direction dir);
-
-long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
-
-int ion_sync_for_device(struct ion_client *client, int fd);
-
-struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
- int id);
-
-void ion_free_nolock(struct ion_client *client, struct ion_handle *handle);
-
-int ion_handle_put_nolock(struct ion_handle *handle);
-
-struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
- int id);
-
-int ion_handle_put(struct ion_handle *handle);
-
-int ion_query_heaps(struct ion_client *client, struct ion_heap_query *query);
-
-#endif /* _ION_PRIV_H */
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
index 3ebbb75746e8..c50f2d9fc58c 100644
--- a/drivers/staging/android/ion/ion_system_heap.c
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -24,7 +24,6 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include "ion.h"
-#include "ion_priv.h"
#define NUM_ORDERS ARRAY_SIZE(orders)
@@ -75,9 +74,6 @@ static struct page *alloc_buffer_page(struct ion_system_heap *heap,
page = ion_page_pool_alloc(pool);
- if (cached)
- ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order,
- DMA_BIDIRECTIONAL);
return page;
}
@@ -129,7 +125,7 @@ static struct page *alloc_largest_available(struct ion_system_heap *heap,
static int ion_system_heap_allocate(struct ion_heap *heap,
struct ion_buffer *buffer,
- unsigned long size, unsigned long align,
+ unsigned long size,
unsigned long flags)
{
struct ion_system_heap *sys_heap = container_of(heap,
@@ -143,9 +139,6 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
unsigned long size_remaining = PAGE_ALIGN(size);
unsigned int max_order = orders[0];
- if (align > PAGE_SIZE)
- return -EINVAL;
-
if (size / PAGE_SIZE > totalram_pages / 2)
return -ENOMEM;
@@ -327,7 +320,7 @@ err_create_pool:
return -ENOMEM;
}
-struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
+static struct ion_heap *__ion_system_heap_create(void)
{
struct ion_system_heap *heap;
@@ -355,24 +348,23 @@ free_heap:
return ERR_PTR(-ENOMEM);
}
-void ion_system_heap_destroy(struct ion_heap *heap)
+static int ion_system_heap_create(void)
{
- struct ion_system_heap *sys_heap = container_of(heap,
- struct ion_system_heap,
- heap);
- int i;
+ struct ion_heap *heap;
- for (i = 0; i < NUM_ORDERS; i++) {
- ion_page_pool_destroy(sys_heap->uncached_pools[i]);
- ion_page_pool_destroy(sys_heap->cached_pools[i]);
- }
- kfree(sys_heap);
+ heap = __ion_system_heap_create();
+ if (IS_ERR(heap))
+ return PTR_ERR(heap);
+ heap->name = "ion_system_heap";
+
+ ion_device_add_heap(heap);
+ return 0;
}
+device_initcall(ion_system_heap_create);
static int ion_system_contig_heap_allocate(struct ion_heap *heap,
struct ion_buffer *buffer,
unsigned long len,
- unsigned long align,
unsigned long flags)
{
int order = get_order(len);
@@ -381,9 +373,6 @@ static int ion_system_contig_heap_allocate(struct ion_heap *heap,
unsigned long i;
int ret;
- if (align > (PAGE_SIZE << order))
- return -EINVAL;
-
page = alloc_pages(low_order_gfp_flags, order);
if (!page)
return -ENOMEM;
@@ -408,8 +397,6 @@ static int ion_system_contig_heap_allocate(struct ion_heap *heap,
buffer->sg_table = table;
- ion_pages_sync_for_device(NULL, page, len, DMA_BIDIRECTIONAL);
-
return 0;
free_table:
@@ -442,7 +429,7 @@ static struct ion_heap_ops kmalloc_ops = {
.map_user = ion_heap_map_user,
};
-struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused)
+static struct ion_heap *__ion_system_contig_heap_create(void)
{
struct ion_heap *heap;
@@ -451,10 +438,20 @@ struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused)
return ERR_PTR(-ENOMEM);
heap->ops = &kmalloc_ops;
heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
+ heap->name = "ion_system_contig_heap";
return heap;
}
-void ion_system_contig_heap_destroy(struct ion_heap *heap)
+static int ion_system_contig_heap_create(void)
{
- kfree(heap);
+ struct ion_heap *heap;
+
+ heap = __ion_system_contig_heap_create();
+ if (IS_ERR(heap))
+ return PTR_ERR(heap);
+
+ ion_device_add_heap(heap);
+ return 0;
}
+device_initcall(ion_system_contig_heap_create);
+
diff --git a/drivers/staging/android/ion/ion_test.c b/drivers/staging/android/ion/ion_test.c
deleted file mode 100644
index 5abf8320a96a..000000000000
--- a/drivers/staging/android/ion/ion_test.c
+++ /dev/null
@@ -1,305 +0,0 @@
-/*
- *
- * Copyright (C) 2013 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#define pr_fmt(fmt) "ion-test: " fmt
-
-#include <linux/dma-buf.h>
-#include <linux/dma-direction.h>
-#include <linux/fs.h>
-#include <linux/miscdevice.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <linux/vmalloc.h>
-
-#include "ion.h"
-#include "../uapi/ion_test.h"
-
-#define u64_to_uptr(x) ((void __user *)(unsigned long)(x))
-
-struct ion_test_device {
- struct miscdevice misc;
-};
-
-struct ion_test_data {
- struct dma_buf *dma_buf;
- struct device *dev;
-};
-
-static int ion_handle_test_dma(struct device *dev, struct dma_buf *dma_buf,
- void __user *ptr, size_t offset, size_t size,
- bool write)
-{
- int ret = 0;
- struct dma_buf_attachment *attach;
- struct sg_table *table;
- pgprot_t pgprot = pgprot_writecombine(PAGE_KERNEL);
- enum dma_data_direction dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
- struct sg_page_iter sg_iter;
- unsigned long offset_page;
-
- attach = dma_buf_attach(dma_buf, dev);
- if (IS_ERR(attach))
- return PTR_ERR(attach);
-
- table = dma_buf_map_attachment(attach, dir);
- if (IS_ERR(table))
- return PTR_ERR(table);
-
- offset_page = offset >> PAGE_SHIFT;
- offset %= PAGE_SIZE;
-
- for_each_sg_page(table->sgl, &sg_iter, table->nents, offset_page) {
- struct page *page = sg_page_iter_page(&sg_iter);
- void *vaddr = vmap(&page, 1, VM_MAP, pgprot);
- size_t to_copy = PAGE_SIZE - offset;
-
- to_copy = min(to_copy, size);
- if (!vaddr) {
- ret = -ENOMEM;
- goto err;
- }
-
- if (write)
- ret = copy_from_user(vaddr + offset, ptr, to_copy);
- else
- ret = copy_to_user(ptr, vaddr + offset, to_copy);
-
- vunmap(vaddr);
- if (ret) {
- ret = -EFAULT;
- goto err;
- }
- size -= to_copy;
- if (!size)
- break;
- ptr += to_copy;
- offset = 0;
- }
-
-err:
- dma_buf_unmap_attachment(attach, table, dir);
- dma_buf_detach(dma_buf, attach);
- return ret;
-}
-
-static int ion_handle_test_kernel(struct dma_buf *dma_buf, void __user *ptr,
- size_t offset, size_t size, bool write)
-{
- int ret;
- unsigned long page_offset = offset >> PAGE_SHIFT;
- size_t copy_offset = offset % PAGE_SIZE;
- size_t copy_size = size;
- enum dma_data_direction dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
-
- if (offset > dma_buf->size || size > dma_buf->size - offset)
- return -EINVAL;
-
- ret = dma_buf_begin_cpu_access(dma_buf, dir);
- if (ret)
- return ret;
-
- while (copy_size > 0) {
- size_t to_copy;
- void *vaddr = dma_buf_kmap(dma_buf, page_offset);
-
- if (!vaddr)
- goto err;
-
- to_copy = min_t(size_t, PAGE_SIZE - copy_offset, copy_size);
-
- if (write)
- ret = copy_from_user(vaddr + copy_offset, ptr, to_copy);
- else
- ret = copy_to_user(ptr, vaddr + copy_offset, to_copy);
-
- dma_buf_kunmap(dma_buf, page_offset, vaddr);
- if (ret) {
- ret = -EFAULT;
- goto err;
- }
-
- copy_size -= to_copy;
- ptr += to_copy;
- page_offset++;
- copy_offset = 0;
- }
-err:
- dma_buf_end_cpu_access(dma_buf, dir);
- return ret;
-}
-
-static long ion_test_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg)
-{
- struct ion_test_data *test_data = filp->private_data;
- int ret = 0;
-
- union {
- struct ion_test_rw_data test_rw;
- } data;
-
- if (_IOC_SIZE(cmd) > sizeof(data))
- return -EINVAL;
-
- if (_IOC_DIR(cmd) & _IOC_WRITE)
- if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
- return -EFAULT;
-
- switch (cmd) {
- case ION_IOC_TEST_SET_FD:
- {
- struct dma_buf *dma_buf = NULL;
- int fd = arg;
-
- if (fd >= 0) {
- dma_buf = dma_buf_get((int)arg);
- if (IS_ERR(dma_buf))
- return PTR_ERR(dma_buf);
- }
- if (test_data->dma_buf)
- dma_buf_put(test_data->dma_buf);
- test_data->dma_buf = dma_buf;
- break;
- }
- case ION_IOC_TEST_DMA_MAPPING:
- {
- ret = ion_handle_test_dma(test_data->dev, test_data->dma_buf,
- u64_to_uptr(data.test_rw.ptr),
- data.test_rw.offset,
- data.test_rw.size,
- data.test_rw.write);
- break;
- }
- case ION_IOC_TEST_KERNEL_MAPPING:
- {
- ret = ion_handle_test_kernel(test_data->dma_buf,
- u64_to_uptr(data.test_rw.ptr),
- data.test_rw.offset,
- data.test_rw.size,
- data.test_rw.write);
- break;
- }
- default:
- return -ENOTTY;
- }
-
- if (_IOC_DIR(cmd) & _IOC_READ) {
- if (copy_to_user((void __user *)arg, &data, sizeof(data)))
- return -EFAULT;
- }
- return ret;
-}
-
-static int ion_test_open(struct inode *inode, struct file *file)
-{
- struct ion_test_data *data;
- struct miscdevice *miscdev = file->private_data;
-
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- data->dev = miscdev->parent;
-
- file->private_data = data;
-
- return 0;
-}
-
-static int ion_test_release(struct inode *inode, struct file *file)
-{
- struct ion_test_data *data = file->private_data;
-
- kfree(data);
-
- return 0;
-}
-
-static const struct file_operations ion_test_fops = {
- .owner = THIS_MODULE,
- .unlocked_ioctl = ion_test_ioctl,
- .compat_ioctl = ion_test_ioctl,
- .open = ion_test_open,
- .release = ion_test_release,
-};
-
-static int __init ion_test_probe(struct platform_device *pdev)
-{
- int ret;
- struct ion_test_device *testdev;
-
- testdev = devm_kzalloc(&pdev->dev, sizeof(struct ion_test_device),
- GFP_KERNEL);
- if (!testdev)
- return -ENOMEM;
-
- testdev->misc.minor = MISC_DYNAMIC_MINOR;
- testdev->misc.name = "ion-test";
- testdev->misc.fops = &ion_test_fops;
- testdev->misc.parent = &pdev->dev;
- ret = misc_register(&testdev->misc);
- if (ret) {
- pr_err("failed to register misc device.\n");
- return ret;
- }
-
- platform_set_drvdata(pdev, testdev);
-
- return 0;
-}
-
-static int ion_test_remove(struct platform_device *pdev)
-{
- struct ion_test_device *testdev;
-
- testdev = platform_get_drvdata(pdev);
- if (!testdev)
- return -ENODATA;
-
- misc_deregister(&testdev->misc);
- return 0;
-}
-
-static struct platform_device *ion_test_pdev;
-static struct platform_driver ion_test_platform_driver = {
- .remove = ion_test_remove,
- .driver = {
- .name = "ion-test",
- },
-};
-
-static int __init ion_test_init(void)
-{
- ion_test_pdev = platform_device_register_simple("ion-test",
- -1, NULL, 0);
- if (IS_ERR(ion_test_pdev))
- return PTR_ERR(ion_test_pdev);
-
- return platform_driver_probe(&ion_test_platform_driver, ion_test_probe);
-}
-
-static void __exit ion_test_exit(void)
-{
- platform_driver_unregister(&ion_test_platform_driver);
- platform_device_unregister(ion_test_pdev);
-}
-
-module_init(ion_test_init);
-module_exit(ion_test_exit);
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/android/ion/tegra/Makefile b/drivers/staging/android/ion/tegra/Makefile
deleted file mode 100644
index 808f1f53f11a..000000000000
--- a/drivers/staging/android/ion/tegra/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-$(CONFIG_ION_TEGRA) += tegra_ion.o
diff --git a/drivers/staging/android/ion/tegra/tegra_ion.c b/drivers/staging/android/ion/tegra/tegra_ion.c
deleted file mode 100644
index 49e55e5acead..000000000000
--- a/drivers/staging/android/ion/tegra/tegra_ion.c
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * drivers/gpu/tegra/tegra_ion.c
- *
- * Copyright (C) 2011 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include "../ion.h"
-#include "../ion_priv.h"
-
-static struct ion_device *idev;
-static int num_heaps;
-static struct ion_heap **heaps;
-
-static int tegra_ion_probe(struct platform_device *pdev)
-{
- struct ion_platform_data *pdata = pdev->dev.platform_data;
- int err;
- int i;
-
- num_heaps = pdata->nr;
-
- heaps = devm_kcalloc(&pdev->dev, pdata->nr,
- sizeof(struct ion_heap *), GFP_KERNEL);
-
- idev = ion_device_create(NULL);
- if (IS_ERR(idev))
- return PTR_ERR(idev);
-
- /* create the heaps as specified in the board file */
- for (i = 0; i < num_heaps; i++) {
- struct ion_platform_heap *heap_data = &pdata->heaps[i];
-
- heaps[i] = ion_heap_create(heap_data);
- if (IS_ERR_OR_NULL(heaps[i])) {
- err = PTR_ERR(heaps[i]);
- goto err;
- }
- ion_device_add_heap(idev, heaps[i]);
- }
- platform_set_drvdata(pdev, idev);
- return 0;
-err:
- for (i = 0; i < num_heaps; ++i)
- ion_heap_destroy(heaps[i]);
- return err;
-}
-
-static int tegra_ion_remove(struct platform_device *pdev)
-{
- struct ion_device *idev = platform_get_drvdata(pdev);
- int i;
-
- ion_device_destroy(idev);
- for (i = 0; i < num_heaps; i++)
- ion_heap_destroy(heaps[i]);
- return 0;
-}
-
-static struct platform_driver ion_driver = {
- .probe = tegra_ion_probe,
- .remove = tegra_ion_remove,
- .driver = { .name = "ion-tegra" }
-};
-
-module_platform_driver(ion_driver);
-