summaryrefslogtreecommitdiff
path: root/drivers/staging/media/atomisp/pci/atomisp2/hmm
diff options
context:
space:
mode:
authorAlan Cox <alan@linux.intel.com>2017-02-17 16:55:17 +0000
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-03-06 09:39:54 +0100
commita49d25364dfb9f8a64037488a39ab1f56c5fa419 (patch)
treebd97382cf06a958cef045e75334fc622500ba209 /drivers/staging/media/atomisp/pci/atomisp2/hmm
parent372499b589ae5ec38d3dec88b72f2bde3b3790d4 (diff)
staging/atomisp: Add support for the Intel IPU v2
This patch adds support for the Intel IPU v2 as found on Android and IoT Baytrail-T and Baytrail-CR platforms (those with the IPU PCI mapped). You will also need the firmware files from your device (Android usually puts them into /etc) - or you can find them in the downloadable restore/upgrade kits if you blew them away for some reason. It may be possible to extend the driver to handle the BYT/T windows platforms such as the ASUS T100TA. These platforms don't expose the IPU via the PCI interface but via ACPI buried in the GPU description and with the camera information somewhere unknown so would need a platform driver interface adding to the codebase *IFF* the firmware works on such devices. To get good results you also need a suitable support library such as libxcam. The camera is intended to be driven from Android so it has a lot of features that many desktop apps don't fully spport. In theory all the pieces are there to build it with -DISP2401 and some differing files to get CherryTrail/T support, but unifying the drivers properlly is a work in progress. The IPU driver represents the work of a lot of people within Intel over many years. It's historical goal was portability rather than Linux upstream. Any queries about the upstream aimed driver should be sent to me not to the original authors. Signed-off-by: Alan Cox <alan@linux.intel.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/staging/media/atomisp/pci/atomisp2/hmm')
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm.c769
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_bo.c1542
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_bo_dev.c333
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_dynamic_pool.c257
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_reserved_pool.c258
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_vm.c218
6 files changed, 3377 insertions, 0 deletions
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm.c b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm.c
new file mode 100644
index 000000000000..358d340e7434
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm.c
@@ -0,0 +1,769 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+/*
+ * This file contains entry functions for memory management of ISP driver
+ */
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/highmem.h> /* for kmap */
+#include <linux/io.h> /* for page_to_phys */
+#include <linux/sysfs.h>
+
+#include "hmm/hmm.h"
+#include "hmm/hmm_pool.h"
+#include "hmm/hmm_bo.h"
+
+#include "atomisp_internal.h"
+#include "asm/cacheflush.h"
+#include "mmu/isp_mmu.h"
+#include "mmu/sh_mmu_mrfld.h"
+
+#ifdef USE_SSSE3
+#include <asm/ssse3.h>
+#endif
+
+struct hmm_bo_device bo_device;
+struct hmm_pool dynamic_pool;
+struct hmm_pool reserved_pool;
+static ia_css_ptr dummy_ptr;
+struct _hmm_mem_stat hmm_mem_stat;
+
+const char *hmm_bo_type_strings[HMM_BO_LAST] = {
+ "p", /* private */
+ "s", /* shared */
+ "u", /* user */
+#ifdef CONFIG_ION
+ "i", /* ion */
+#endif
+};
+
+static ssize_t bo_show(struct device *dev, struct device_attribute *attr,
+ char *buf, struct list_head *bo_list, bool active)
+{
+ ssize_t ret = 0;
+ struct hmm_buffer_object *bo;
+ unsigned long flags;
+ int i;
+ long total[HMM_BO_LAST] = { 0 };
+ long count[HMM_BO_LAST] = { 0 };
+ int index1 = 0;
+ int index2 = 0;
+
+ ret = scnprintf(buf, PAGE_SIZE, "type pgnr\n");
+ if (ret <= 0)
+ return 0;
+
+ index1 += ret;
+
+ spin_lock_irqsave(&bo_device.list_lock, flags);
+ list_for_each_entry(bo, bo_list, list) {
+ if ((active && (bo->status & HMM_BO_ALLOCED)) ||
+ (!active && !(bo->status & HMM_BO_ALLOCED))) {
+ ret = scnprintf(buf + index1, PAGE_SIZE - index1,
+ "%s %d\n",
+ hmm_bo_type_strings[bo->type], bo->pgnr);
+
+ total[bo->type] += bo->pgnr;
+ count[bo->type]++;
+ if (ret > 0)
+ index1 += ret;
+ }
+ }
+ spin_unlock_irqrestore(&bo_device.list_lock, flags);
+
+ for (i = 0; i < HMM_BO_LAST; i++) {
+ if (count[i]) {
+ ret = scnprintf(buf + index1 + index2,
+ PAGE_SIZE - index1 - index2,
+ "%ld %s buffer objects: %ld KB\n",
+ count[i], hmm_bo_type_strings[i], total[i] * 4);
+ if (ret > 0)
+ index2 += ret;
+ }
+ }
+
+ /* Add trailing zero, not included by scnprintf */
+ return index1 + index2 + 1;
+}
+
+static ssize_t active_bo_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return bo_show(dev, attr, buf, &bo_device.entire_bo_list, true);
+}
+
+static ssize_t free_bo_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return bo_show(dev, attr, buf, &bo_device.entire_bo_list, false);
+}
+
+static ssize_t reserved_pool_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t ret = 0;
+
+ struct hmm_reserved_pool_info *pinfo = reserved_pool.pool_info;
+ unsigned long flags;
+
+ if (!pinfo || !pinfo->initialized)
+ return 0;
+
+ spin_lock_irqsave(&pinfo->list_lock, flags);
+ ret = scnprintf(buf, PAGE_SIZE, "%d out of %d pages available\n",
+ pinfo->index, pinfo->pgnr);
+ spin_unlock_irqrestore(&pinfo->list_lock, flags);
+
+ if (ret > 0)
+ ret++; /* Add trailing zero, not included by scnprintf */
+
+ return ret;
+};
+
+static ssize_t dynamic_pool_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t ret = 0;
+
+ struct hmm_dynamic_pool_info *pinfo = dynamic_pool.pool_info;
+ unsigned long flags;
+
+ if (!pinfo || !pinfo->initialized)
+ return 0;
+
+ spin_lock_irqsave(&pinfo->list_lock, flags);
+ ret = scnprintf(buf, PAGE_SIZE, "%d (max %d) pages available\n",
+ pinfo->pgnr, pinfo->pool_size);
+ spin_unlock_irqrestore(&pinfo->list_lock, flags);
+
+ if (ret > 0)
+ ret++; /* Add trailing zero, not included by scnprintf */
+
+ return ret;
+};
+
+static DEVICE_ATTR(active_bo, S_IRUGO, active_bo_show, NULL);
+static DEVICE_ATTR(free_bo, S_IRUGO, free_bo_show, NULL);
+static DEVICE_ATTR(reserved_pool, S_IRUGO, reserved_pool_show, NULL);
+static DEVICE_ATTR(dynamic_pool, S_IRUGO, dynamic_pool_show, NULL);
+
+static struct attribute *sysfs_attrs_ctrl[] = {
+ &dev_attr_active_bo.attr,
+ &dev_attr_free_bo.attr,
+ &dev_attr_reserved_pool.attr,
+ &dev_attr_dynamic_pool.attr,
+ NULL
+};
+
+static struct attribute_group atomisp_attribute_group[] = {
+ {.attrs = sysfs_attrs_ctrl },
+};
+
+int hmm_init(void)
+{
+ int ret;
+
+ ret = hmm_bo_device_init(&bo_device, &sh_mmu_mrfld,
+ ISP_VM_START, ISP_VM_SIZE);
+ if (ret)
+ dev_err(atomisp_dev, "hmm_bo_device_init failed.\n");
+
+ /*
+ * As hmm use NULL to indicate invalid ISP virtual address,
+ * and ISP_VM_START is defined to 0 too, so we allocate
+ * one piece of dummy memory, which should return value 0,
+ * at the beginning, to avoid hmm_alloc return 0 in the
+ * further allocation.
+ */
+ dummy_ptr = hmm_alloc(1, HMM_BO_PRIVATE, 0, 0, HMM_UNCACHED);
+
+ if (!ret) {
+ ret = sysfs_create_group(&atomisp_dev->kobj,
+ atomisp_attribute_group);
+ if (ret)
+ dev_err(atomisp_dev,
+ "%s Failed to create sysfs\n", __func__);
+ }
+
+ return ret;
+}
+
+void hmm_cleanup(void)
+{
+ sysfs_remove_group(&atomisp_dev->kobj, atomisp_attribute_group);
+
+ /*
+ * free dummy memory first
+ */
+ hmm_free(dummy_ptr);
+ dummy_ptr = 0;
+
+ hmm_bo_device_exit(&bo_device);
+}
+
+ia_css_ptr hmm_alloc(size_t bytes, enum hmm_bo_type type,
+ int from_highmem, void *userptr, bool cached)
+{
+ unsigned int pgnr;
+ struct hmm_buffer_object *bo;
+ int ret;
+
+ /*Get page number from size*/
+ pgnr = size_to_pgnr_ceil(bytes);
+
+ /*Buffer object structure init*/
+ bo = hmm_bo_alloc(&bo_device, pgnr);
+ if (!bo) {
+ dev_err(atomisp_dev, "hmm_bo_create failed.\n");
+ goto create_bo_err;
+ }
+
+ /*Allocate pages for memory*/
+ ret = hmm_bo_alloc_pages(bo, type, from_highmem, userptr, cached);
+ if (ret) {
+ dev_err(atomisp_dev,
+ "hmm_bo_alloc_pages failed.\n");
+ goto alloc_page_err;
+ }
+
+ /*Combind the virtual address and pages togather*/
+ ret = hmm_bo_bind(bo);
+ if (ret) {
+ dev_err(atomisp_dev, "hmm_bo_bind failed.\n");
+ goto bind_err;
+ }
+
+ hmm_mem_stat.tol_cnt += pgnr;
+
+ return bo->start;
+
+bind_err:
+ hmm_bo_free_pages(bo);
+alloc_page_err:
+ hmm_bo_unref(bo);
+create_bo_err:
+ return 0;
+}
+
+void hmm_free(ia_css_ptr virt)
+{
+ struct hmm_buffer_object *bo;
+
+ bo = hmm_bo_device_search_start(&bo_device, (unsigned int)virt);
+
+ if (!bo) {
+ dev_err(atomisp_dev,
+ "can not find buffer object start with "
+ "address 0x%x\n", (unsigned int)virt);
+ return;
+ }
+
+ hmm_mem_stat.tol_cnt -= bo->pgnr;
+
+ hmm_bo_unbind(bo);
+
+ hmm_bo_free_pages(bo);
+
+ hmm_bo_unref(bo);
+}
+
+static inline int hmm_check_bo(struct hmm_buffer_object *bo, unsigned int ptr)
+{
+ if (!bo) {
+ dev_err(atomisp_dev,
+ "can not find buffer object contains "
+ "address 0x%x\n", ptr);
+ return -EINVAL;
+ }
+
+ if (!hmm_bo_page_allocated(bo)) {
+ dev_err(atomisp_dev,
+ "buffer object has no page allocated.\n");
+ return -EINVAL;
+ }
+
+ if (!hmm_bo_allocated(bo)) {
+ dev_err(atomisp_dev,
+ "buffer object has no virtual address"
+ " space allocated.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*Read function in ISP memory management*/
+static int load_and_flush_by_kmap(ia_css_ptr virt, void *data, unsigned int bytes)
+{
+ struct hmm_buffer_object *bo;
+ unsigned int idx, offset, len;
+ char *src, *des;
+ int ret;
+
+ bo = hmm_bo_device_search_in_range(&bo_device, virt);
+ ret = hmm_check_bo(bo, virt);
+ if (ret)
+ return ret;
+
+ des = (char *)data;
+ while (bytes) {
+ idx = (virt - bo->start) >> PAGE_SHIFT;
+ offset = (virt - bo->start) - (idx << PAGE_SHIFT);
+
+ src = (char *)kmap(bo->page_obj[idx].page);
+ if (!src) {
+ dev_err(atomisp_dev,
+ "kmap buffer object page failed: "
+ "pg_idx = %d\n", idx);
+ return -EINVAL;
+ }
+
+ src += offset;
+
+ if ((bytes + offset) >= PAGE_SIZE) {
+ len = PAGE_SIZE - offset;
+ bytes -= len;
+ } else {
+ len = bytes;
+ bytes = 0;
+ }
+
+ virt += len; /* update virt for next loop */
+
+ if (des) {
+
+#ifdef USE_SSSE3
+ _ssse3_memcpy(des, src, len);
+#else
+ memcpy(des, src, len);
+#endif
+ des += len;
+ }
+
+ clflush_cache_range(src, len);
+
+ kunmap(bo->page_obj[idx].page);
+ }
+
+ return 0;
+}
+
+/*Read function in ISP memory management*/
+static int load_and_flush(ia_css_ptr virt, void *data, unsigned int bytes)
+{
+ struct hmm_buffer_object *bo;
+ int ret;
+
+ bo = hmm_bo_device_search_in_range(&bo_device, virt);
+ ret = hmm_check_bo(bo, virt);
+ if (ret)
+ return ret;
+
+ if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
+ void *src = bo->vmap_addr;
+
+ src += (virt - bo->start);
+#ifdef USE_SSSE3
+ _ssse3_memcpy(data, src, bytes);
+#else
+ memcpy(data, src, bytes);
+#endif
+ if (bo->status & HMM_BO_VMAPED_CACHED)
+ clflush_cache_range(src, bytes);
+ } else {
+ void *vptr;
+
+ vptr = hmm_bo_vmap(bo, true);
+ if (!vptr)
+ return load_and_flush_by_kmap(virt, data, bytes);
+ else
+ vptr = vptr + (virt - bo->start);
+
+#ifdef USE_SSSE3
+ _ssse3_memcpy(data, vptr, bytes);
+#else
+ memcpy(data, vptr, bytes);
+#endif
+ clflush_cache_range(vptr, bytes);
+ hmm_bo_vunmap(bo);
+ }
+
+ return 0;
+}
+
+/*Read function in ISP memory management*/
+int hmm_load(ia_css_ptr virt, void *data, unsigned int bytes)
+{
+ if (!data) {
+ dev_err(atomisp_dev,
+ "hmm_load NULL argument\n");
+ return -EINVAL;
+ }
+ return load_and_flush(virt, data, bytes);
+}
+
+/*Flush hmm data from the data cache*/
+int hmm_flush(ia_css_ptr virt, unsigned int bytes)
+{
+ return load_and_flush(virt, NULL, bytes);
+}
+
+/*Write function in ISP memory management*/
+int hmm_store(ia_css_ptr virt, const void *data, unsigned int bytes)
+{
+ struct hmm_buffer_object *bo;
+ unsigned int idx, offset, len;
+ char *src, *des;
+ int ret;
+
+ bo = hmm_bo_device_search_in_range(&bo_device, virt);
+ ret = hmm_check_bo(bo, virt);
+ if (ret)
+ return ret;
+
+ if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
+ void *dst = bo->vmap_addr;
+
+ dst += (virt - bo->start);
+#ifdef USE_SSSE3
+ _ssse3_memcpy(dst, data, bytes);
+#else
+ memcpy(dst, data, bytes);
+#endif
+ if (bo->status & HMM_BO_VMAPED_CACHED)
+ clflush_cache_range(dst, bytes);
+ } else {
+ void *vptr;
+
+ vptr = hmm_bo_vmap(bo, true);
+ if (vptr) {
+ vptr = vptr + (virt - bo->start);
+
+#ifdef USE_SSSE3
+ _ssse3_memcpy(vptr, data, bytes);
+#else
+ memcpy(vptr, data, bytes);
+#endif
+ clflush_cache_range(vptr, bytes);
+ hmm_bo_vunmap(bo);
+ return 0;
+ }
+ }
+
+ src = (char *)data;
+ while (bytes) {
+ idx = (virt - bo->start) >> PAGE_SHIFT;
+ offset = (virt - bo->start) - (idx << PAGE_SHIFT);
+
+ if (in_atomic())
+ des = (char *)kmap_atomic(bo->page_obj[idx].page);
+ else
+ des = (char *)kmap(bo->page_obj[idx].page);
+
+ if (!des) {
+ dev_err(atomisp_dev,
+ "kmap buffer object page failed: "
+ "pg_idx = %d\n", idx);
+ return -EINVAL;
+ }
+
+ des += offset;
+
+ if ((bytes + offset) >= PAGE_SIZE) {
+ len = PAGE_SIZE - offset;
+ bytes -= len;
+ } else {
+ len = bytes;
+ bytes = 0;
+ }
+
+ virt += len;
+
+#ifdef USE_SSSE3
+ _ssse3_memcpy(des, src, len);
+#else
+ memcpy(des, src, len);
+#endif
+ src += len;
+
+ clflush_cache_range(des, len);
+
+ if (in_atomic())
+ /*
+ * Note: kunmap_atomic requires return addr from
+ * kmap_atomic, not the page. See linux/highmem.h
+ */
+ kunmap_atomic(des - offset);
+ else
+ kunmap(bo->page_obj[idx].page);
+ }
+
+ return 0;
+}
+
+/*memset function in ISP memory management*/
+int hmm_set(ia_css_ptr virt, int c, unsigned int bytes)
+{
+ struct hmm_buffer_object *bo;
+ unsigned int idx, offset, len;
+ char *des;
+ int ret;
+
+ bo = hmm_bo_device_search_in_range(&bo_device, virt);
+ ret = hmm_check_bo(bo, virt);
+ if (ret)
+ return ret;
+
+ if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
+ void *dst = bo->vmap_addr;
+
+ dst += (virt - bo->start);
+ memset(dst, c, bytes);
+
+ if (bo->status & HMM_BO_VMAPED_CACHED)
+ clflush_cache_range(dst, bytes);
+ } else {
+ void *vptr;
+
+ vptr = hmm_bo_vmap(bo, true);
+ if (vptr) {
+ vptr = vptr + (virt - bo->start);
+ memset((void *)vptr, c, bytes);
+ clflush_cache_range(vptr, bytes);
+ hmm_bo_vunmap(bo);
+ return 0;
+ }
+ }
+
+ while (bytes) {
+ idx = (virt - bo->start) >> PAGE_SHIFT;
+ offset = (virt - bo->start) - (idx << PAGE_SHIFT);
+
+ des = (char *)kmap(bo->page_obj[idx].page);
+ if (!des) {
+ dev_err(atomisp_dev,
+ "kmap buffer object page failed: "
+ "pg_idx = %d\n", idx);
+ return -EINVAL;
+ }
+ des += offset;
+
+ if ((bytes + offset) >= PAGE_SIZE) {
+ len = PAGE_SIZE - offset;
+ bytes -= len;
+ } else {
+ len = bytes;
+ bytes = 0;
+ }
+
+ virt += len;
+
+ memset(des, c, len);
+
+ clflush_cache_range(des, len);
+
+ kunmap(bo->page_obj[idx].page);
+ }
+
+ return 0;
+}
+
+/*Virtual address to physical address convert*/
+phys_addr_t hmm_virt_to_phys(ia_css_ptr virt)
+{
+ unsigned int idx, offset;
+ struct hmm_buffer_object *bo;
+
+ bo = hmm_bo_device_search_in_range(&bo_device, virt);
+ if (!bo) {
+ dev_err(atomisp_dev,
+ "can not find buffer object contains address 0x%x\n",
+ virt);
+ return -1;
+ }
+
+ idx = (virt - bo->start) >> PAGE_SHIFT;
+ offset = (virt - bo->start) - (idx << PAGE_SHIFT);
+
+ return page_to_phys(bo->page_obj[idx].page) + offset;
+}
+
+int hmm_mmap(struct vm_area_struct *vma, ia_css_ptr virt)
+{
+ struct hmm_buffer_object *bo;
+
+ bo = hmm_bo_device_search_start(&bo_device, virt);
+ if (!bo) {
+ dev_err(atomisp_dev,
+ "can not find buffer object start with address 0x%x\n",
+ virt);
+ return -EINVAL;
+ }
+
+ return hmm_bo_mmap(vma, bo);
+}
+
+/*Map ISP virtual address into IA virtual address*/
+void *hmm_vmap(ia_css_ptr virt, bool cached)
+{
+ struct hmm_buffer_object *bo;
+ void *ptr;
+
+ bo = hmm_bo_device_search_in_range(&bo_device, virt);
+ if (!bo) {
+ dev_err(atomisp_dev,
+ "can not find buffer object contains address 0x%x\n",
+ virt);
+ return NULL;
+ }
+
+ ptr = hmm_bo_vmap(bo, cached);
+ if (ptr)
+ return ptr + (virt - bo->start);
+ else
+ return NULL;
+}
+
+/* Flush the memory which is mapped as cached memory through hmm_vmap */
+void hmm_flush_vmap(ia_css_ptr virt)
+{
+ struct hmm_buffer_object *bo;
+
+ bo = hmm_bo_device_search_in_range(&bo_device, virt);
+ if (!bo) {
+ dev_warn(atomisp_dev,
+ "can not find buffer object contains address 0x%x\n",
+ virt);
+ return;
+ }
+
+ hmm_bo_flush_vmap(bo);
+}
+
+void hmm_vunmap(ia_css_ptr virt)
+{
+ struct hmm_buffer_object *bo;
+
+ bo = hmm_bo_device_search_in_range(&bo_device, virt);
+ if (!bo) {
+ dev_warn(atomisp_dev,
+ "can not find buffer object contains address 0x%x\n",
+ virt);
+ return;
+ }
+
+ return hmm_bo_vunmap(bo);
+}
+
+int hmm_pool_register(unsigned int pool_size,
+ enum hmm_pool_type pool_type)
+{
+ switch (pool_type) {
+ case HMM_POOL_TYPE_RESERVED:
+ reserved_pool.pops = &reserved_pops;
+ return reserved_pool.pops->pool_init(&reserved_pool.pool_info,
+ pool_size);
+ case HMM_POOL_TYPE_DYNAMIC:
+ dynamic_pool.pops = &dynamic_pops;
+ return dynamic_pool.pops->pool_init(&dynamic_pool.pool_info,
+ pool_size);
+ default:
+ dev_err(atomisp_dev, "invalid pool type.\n");
+ return -EINVAL;
+ }
+}
+
+void hmm_pool_unregister(enum hmm_pool_type pool_type)
+{
+ switch (pool_type) {
+ case HMM_POOL_TYPE_RESERVED:
+ if (reserved_pool.pops && reserved_pool.pops->pool_exit)
+ reserved_pool.pops->pool_exit(&reserved_pool.pool_info);
+ break;
+ case HMM_POOL_TYPE_DYNAMIC:
+ if (dynamic_pool.pops && dynamic_pool.pops->pool_exit)
+ dynamic_pool.pops->pool_exit(&dynamic_pool.pool_info);
+ break;
+ default:
+ dev_err(atomisp_dev, "invalid pool type.\n");
+ break;
+ }
+
+ return;
+}
+
+void *hmm_isp_vaddr_to_host_vaddr(ia_css_ptr ptr, bool cached)
+{
+ return hmm_vmap(ptr, cached);
+ /* vmunmap will be done in hmm_bo_release() */
+}
+
+ia_css_ptr hmm_host_vaddr_to_hrt_vaddr(const void *ptr)
+{
+ struct hmm_buffer_object *bo;
+
+ bo = hmm_bo_device_search_vmap_start(&bo_device, ptr);
+ if (bo)
+ return bo->start;
+
+ dev_err(atomisp_dev,
+ "can not find buffer object whose kernel virtual address is %p\n",
+ ptr);
+ return 0;
+}
+
+void hmm_show_mem_stat(const char *func, const int line)
+{
+ trace_printk("tol_cnt=%d usr_size=%d res_size=%d res_cnt=%d sys_size=%d dyc_thr=%d dyc_size=%d.\n",
+ hmm_mem_stat.tol_cnt,
+ hmm_mem_stat.usr_size, hmm_mem_stat.res_size,
+ hmm_mem_stat.res_cnt, hmm_mem_stat.sys_size,
+ hmm_mem_stat.dyc_thr, hmm_mem_stat.dyc_size);
+}
+
+void hmm_init_mem_stat(int res_pgnr, int dyc_en, int dyc_pgnr)
+{
+ hmm_mem_stat.res_size = res_pgnr;
+ /* If reserved mem pool is not enabled, set its "mem stat" values as -1. */
+ if (0 == hmm_mem_stat.res_size) {
+ hmm_mem_stat.res_size = -1;
+ hmm_mem_stat.res_cnt = -1;
+ }
+
+ /* If dynamic memory pool is not enabled, set its "mem stat" values as -1. */
+ if (!dyc_en) {
+ hmm_mem_stat.dyc_size = -1;
+ hmm_mem_stat.dyc_thr = -1;
+ } else {
+ hmm_mem_stat.dyc_size = 0;
+ hmm_mem_stat.dyc_thr = dyc_pgnr;
+ }
+ hmm_mem_stat.usr_size = 0;
+ hmm_mem_stat.sys_size = 0;
+ hmm_mem_stat.tol_cnt = 0;
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_bo.c b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_bo.c
new file mode 100644
index 000000000000..05ff912f3b5c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_bo.c
@@ -0,0 +1,1542 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+/*
+ * This file contains functions for buffer object structure management
+ */
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/gfp.h> /* for GFP_ATOMIC */
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <linux/hugetlb.h>
+#include <linux/highmem.h>
+#include <linux/slab.h> /* for kmalloc */
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/string.h>
+#include <linux/list.h>
+#include <linux/errno.h>
+#include <asm/cacheflush.h>
+#include <linux/io.h>
+#include <asm/current.h>
+#include <linux/sched.h>
+#include <linux/file.h>
+
+#include "atomisp_internal.h"
+#include "hmm/hmm_common.h"
+#include "hmm/hmm_pool.h"
+#include "hmm/hmm_bo.h"
+
+static unsigned int order_to_nr(unsigned int order)
+{
+ return 1U << order;
+}
+
+static unsigned int nr_to_order_bottom(unsigned int nr)
+{
+ return fls(nr) - 1;
+}
+
+struct hmm_buffer_object *__bo_alloc(struct kmem_cache *bo_cache)
+{
+ struct hmm_buffer_object *bo;
+
+ bo = kmem_cache_alloc(bo_cache, GFP_KERNEL);
+ if (!bo)
+ dev_err(atomisp_dev, "%s: __bo_alloc failed!\n", __func__);
+
+ return bo;
+}
+
+static int __bo_init(struct hmm_bo_device *bdev, struct hmm_buffer_object *bo,
+ unsigned int pgnr)
+{
+ check_bodev_null_return(bdev, -EINVAL);
+ var_equal_return(hmm_bo_device_inited(bdev), 0, -EINVAL,
+ "hmm_bo_device not inited yet.\n");
+ /* prevent zero size buffer object */
+ if (pgnr == 0) {
+ dev_err(atomisp_dev, "0 size buffer is not allowed.\n");
+ return -EINVAL;
+ }
+
+ memset(bo, 0, sizeof(*bo));
+ mutex_init(&bo->mutex);
+
+ /* init the bo->list HEAD as an element of entire_bo_list */
+ INIT_LIST_HEAD(&bo->list);
+
+ bo->bdev = bdev;
+ bo->vmap_addr = NULL;
+ bo->status = HMM_BO_FREE;
+ bo->start = bdev->start;
+ bo->pgnr = pgnr;
+ bo->end = bo->start + pgnr_to_size(pgnr);
+ bo->prev = NULL;
+ bo->next = NULL;
+
+ return 0;
+}
+
+struct hmm_buffer_object *__bo_search_and_remove_from_free_rbtree(
+ struct rb_node *node, unsigned int pgnr)
+{
+ struct hmm_buffer_object *this, *ret_bo, *temp_bo;
+
+ this = rb_entry(node, struct hmm_buffer_object, node);
+ if (this->pgnr == pgnr ||
+ (this->pgnr > pgnr && this->node.rb_left == NULL)) {
+ goto remove_bo_and_return;
+ } else {
+ if (this->pgnr < pgnr) {
+ if (!this->node.rb_right)
+ return NULL;
+ ret_bo = __bo_search_and_remove_from_free_rbtree(
+ this->node.rb_right, pgnr);
+ } else {
+ ret_bo = __bo_search_and_remove_from_free_rbtree(
+ this->node.rb_left, pgnr);
+ }
+ if (!ret_bo) {
+ if (this->pgnr > pgnr)
+ goto remove_bo_and_return;
+ else
+ return NULL;
+ }
+ return ret_bo;
+ }
+
+remove_bo_and_return:
+ /* NOTE: All nodes on free rbtree have a 'prev' that points to NULL.
+ * 1. check if 'this->next' is NULL:
+ * yes: erase 'this' node and rebalance rbtree, return 'this'.
+ */
+ if (this->next == NULL) {
+ rb_erase(&this->node, &this->bdev->free_rbtree);
+ return this;
+ }
+ /* NOTE: if 'this->next' is not NULL, always return 'this->next' bo.
+ * 2. check if 'this->next->next' is NULL:
+ * yes: change the related 'next/prev' pointer,
+ * return 'this->next' but the rbtree stays unchanged.
+ */
+ temp_bo = this->next;
+ this->next = temp_bo->next;
+ if (temp_bo->next)
+ temp_bo->next->prev = this;
+ temp_bo->next = NULL;
+ temp_bo->prev = NULL;
+ return temp_bo;
+}
+
+struct hmm_buffer_object *__bo_search_by_addr(struct rb_root *root,
+ ia_css_ptr start)
+{
+ struct rb_node *n = root->rb_node;
+ struct hmm_buffer_object *bo;
+
+ do {
+ bo = rb_entry(n, struct hmm_buffer_object, node);
+
+ if (bo->start > start) {
+ if (n->rb_left == NULL)
+ return NULL;
+ n = n->rb_left;
+ } else if (bo->start < start) {
+ if (n->rb_right == NULL)
+ return NULL;
+ n = n->rb_right;
+ } else {
+ return bo;
+ }
+ } while (n);
+
+ return NULL;
+}
+
+struct hmm_buffer_object *__bo_search_by_addr_in_range(struct rb_root *root,
+ unsigned int start)
+{
+ struct rb_node *n = root->rb_node;
+ struct hmm_buffer_object *bo;
+
+ do {
+ bo = rb_entry(n, struct hmm_buffer_object, node);
+
+ if (bo->start > start) {
+ if (n->rb_left == NULL)
+ return NULL;
+ n = n->rb_left;
+ } else {
+ if (bo->end > start)
+ return bo;
+ if (n->rb_right == NULL)
+ return NULL;
+ n = n->rb_right;
+ }
+ } while (n);
+
+ return NULL;
+}
+
+static void __bo_insert_to_free_rbtree(struct rb_root *root,
+ struct hmm_buffer_object *bo)
+{
+ struct rb_node **new = &(root->rb_node);
+ struct rb_node *parent = NULL;
+ struct hmm_buffer_object *this;
+ unsigned int pgnr = bo->pgnr;
+
+ while (*new) {
+ parent = *new;
+ this = container_of(*new, struct hmm_buffer_object, node);
+
+ if (pgnr < this->pgnr) {
+ new = &((*new)->rb_left);
+ } else if (pgnr > this->pgnr) {
+ new = &((*new)->rb_right);
+ } else {
+ bo->prev = this;
+ bo->next = this->next;
+ if (this->next)
+ this->next->prev = bo;
+ this->next = bo;
+ bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_FREE;
+ return;
+ }
+ }
+
+ bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_FREE;
+
+ rb_link_node(&bo->node, parent, new);
+ rb_insert_color(&bo->node, root);
+}
+
+static void __bo_insert_to_alloc_rbtree(struct rb_root *root,
+ struct hmm_buffer_object *bo)
+{
+ struct rb_node **new = &(root->rb_node);
+ struct rb_node *parent = NULL;
+ struct hmm_buffer_object *this;
+ unsigned int start = bo->start;
+
+ while (*new) {
+ parent = *new;
+ this = container_of(*new, struct hmm_buffer_object, node);
+
+ if (start < this->start)
+ new = &((*new)->rb_left);
+ else
+ new = &((*new)->rb_right);
+ }
+
+ kref_init(&bo->kref);
+ bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_ALLOCED;
+
+ rb_link_node(&bo->node, parent, new);
+ rb_insert_color(&bo->node, root);
+}
+
+struct hmm_buffer_object *__bo_break_up(struct hmm_bo_device *bdev,
+ struct hmm_buffer_object *bo,
+ unsigned int pgnr)
+{
+ struct hmm_buffer_object *new_bo;
+ unsigned long flags;
+ int ret;
+
+ new_bo = __bo_alloc(bdev->bo_cache);
+ if (!new_bo) {
+ dev_err(atomisp_dev, "%s: __bo_alloc failed!\n", __func__);
+ return NULL;
+ }
+ ret = __bo_init(bdev, new_bo, pgnr);
+ if (ret) {
+ dev_err(atomisp_dev, "%s: __bo_init failed!\n", __func__);
+ kmem_cache_free(bdev->bo_cache, new_bo);
+ return NULL;
+ }
+
+ new_bo->start = bo->start;
+ new_bo->end = new_bo->start + pgnr_to_size(pgnr);
+ bo->start = new_bo->end;
+ bo->pgnr = bo->pgnr - pgnr;
+
+ spin_lock_irqsave(&bdev->list_lock, flags);
+ list_add_tail(&new_bo->list, &bo->list);
+ spin_unlock_irqrestore(&bdev->list_lock, flags);
+
+ return new_bo;
+}
+
+static void __bo_take_off_handling(struct hmm_buffer_object *bo)
+{
+ struct hmm_bo_device *bdev = bo->bdev;
+ /* There are 4 situations when we take off a known bo from free rbtree:
+ * 1. if bo->next && bo->prev == NULL, bo is a rbtree node
+ * and does not have a linked list after bo, to take off this bo,
+ * we just need erase bo directly and rebalance the free rbtree
+ */
+ if (bo->prev == NULL && bo->next == NULL) {
+ rb_erase(&bo->node, &bdev->free_rbtree);
+ /* 2. when bo->next != NULL && bo->prev == NULL, bo is a rbtree node,
+ * and has a linked list,to take off this bo we need erase bo
+ * first, then, insert bo->next into free rbtree and rebalance
+ * the free rbtree
+ */
+ } else if (bo->prev == NULL && bo->next != NULL) {
+ bo->next->prev = NULL;
+ rb_erase(&bo->node, &bdev->free_rbtree);
+ __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo->next);
+ bo->next = NULL;
+ /* 3. when bo->prev != NULL && bo->next == NULL, bo is not a rbtree
+ * node, bo is the last element of the linked list after rbtree
+ * node, to take off this bo, we just need set the "prev/next"
+ * pointers to NULL, the free rbtree stays unchaged
+ */
+ } else if (bo->prev != NULL && bo->next == NULL) {
+ bo->prev->next = NULL;
+ bo->prev = NULL;
+ /* 4. when bo->prev != NULL && bo->next != NULL ,bo is not a rbtree
+ * node, bo is in the middle of the linked list after rbtree node,
+ * to take off this bo, we just set take the "prev/next" pointers
+ * to NULL, the free rbtree stays unchaged
+ */
+ } else {
+ bo->next->prev = bo->prev;
+ bo->prev->next = bo->next;
+ bo->next = NULL;
+ bo->prev = NULL;
+ }
+}
+
+struct hmm_buffer_object *__bo_merge(struct hmm_buffer_object *bo,
+ struct hmm_buffer_object *next_bo)
+{
+ struct hmm_bo_device *bdev;
+ unsigned long flags;
+
+ bdev = bo->bdev;
+ next_bo->start = bo->start;
+ next_bo->pgnr = next_bo->pgnr + bo->pgnr;
+
+ spin_lock_irqsave(&bdev->list_lock, flags);
+ list_del(&bo->list);
+ spin_unlock_irqrestore(&bdev->list_lock, flags);
+
+ kmem_cache_free(bo->bdev->bo_cache, bo);
+
+ return next_bo;
+}
+
+/*
+ * hmm_bo_device functions.
+ */
+int hmm_bo_device_init(struct hmm_bo_device *bdev,
+ struct isp_mmu_client *mmu_driver,
+ unsigned int vaddr_start,
+ unsigned int size)
+{
+ struct hmm_buffer_object *bo;
+ unsigned long flags;
+ int ret;
+
+ check_bodev_null_return(bdev, -EINVAL);
+
+ ret = isp_mmu_init(&bdev->mmu, mmu_driver);
+ if (ret) {
+ dev_err(atomisp_dev, "isp_mmu_init failed.\n");
+ return ret;
+ }
+
+ bdev->start = vaddr_start;
+ bdev->pgnr = size_to_pgnr_ceil(size);
+ bdev->size = pgnr_to_size(bdev->pgnr);
+
+ spin_lock_init(&bdev->list_lock);
+ mutex_init(&bdev->rbtree_mutex);
+
+ bdev->flag = HMM_BO_DEVICE_INITED;
+
+ INIT_LIST_HEAD(&bdev->entire_bo_list);
+ bdev->allocated_rbtree = RB_ROOT;
+ bdev->free_rbtree = RB_ROOT;
+
+ bdev->bo_cache = kmem_cache_create("bo_cache",
+ sizeof(struct hmm_buffer_object), 0, 0, NULL);
+ if (!bdev->bo_cache) {
+ dev_err(atomisp_dev, "%s: create cache failed!\n", __func__);
+ isp_mmu_exit(&bdev->mmu);
+ return -ENOMEM;
+ }
+
+ bo = __bo_alloc(bdev->bo_cache);
+ if (!bo) {
+ dev_err(atomisp_dev, "%s: __bo_alloc failed!\n", __func__);
+ isp_mmu_exit(&bdev->mmu);
+ return -ENOMEM;
+ }
+
+ ret = __bo_init(bdev, bo, bdev->pgnr);
+ if (ret) {
+ dev_err(atomisp_dev, "%s: __bo_init failed!\n", __func__);
+ kmem_cache_free(bdev->bo_cache, bo);
+ isp_mmu_exit(&bdev->mmu);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&bdev->list_lock, flags);
+ list_add_tail(&bo->list, &bdev->entire_bo_list);
+ spin_unlock_irqrestore(&bdev->list_lock, flags);
+
+ __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
+
+ return 0;
+}
+
+struct hmm_buffer_object *hmm_bo_alloc(struct hmm_bo_device *bdev,
+ unsigned int pgnr)
+{
+ struct hmm_buffer_object *bo, *new_bo;
+ struct rb_root *root = &bdev->free_rbtree;
+
+ check_bodev_null_return(bdev, NULL);
+ var_equal_return(hmm_bo_device_inited(bdev), 0, NULL,
+ "hmm_bo_device not inited yet.\n");
+
+ if (pgnr == 0) {
+ dev_err(atomisp_dev, "0 size buffer is not allowed.\n");
+ return NULL;
+ }
+
+ mutex_lock(&bdev->rbtree_mutex);
+ bo = __bo_search_and_remove_from_free_rbtree(root->rb_node, pgnr);
+ if (!bo) {
+ mutex_unlock(&bdev->rbtree_mutex);
+ dev_err(atomisp_dev, "%s: Out of Memory! hmm_bo_alloc failed",
+ __func__);
+ return NULL;
+ }
+
+ if (bo->pgnr > pgnr) {
+ new_bo = __bo_break_up(bdev, bo, pgnr);
+ if (!new_bo) {
+ mutex_unlock(&bdev->rbtree_mutex);
+ dev_err(atomisp_dev, "%s: __bo_break_up failed!\n",
+ __func__);
+ return NULL;
+ }
+
+ __bo_insert_to_alloc_rbtree(&bdev->allocated_rbtree, new_bo);
+ __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
+
+ mutex_unlock(&bdev->rbtree_mutex);
+ return new_bo;
+ }
+
+ __bo_insert_to_alloc_rbtree(&bdev->allocated_rbtree, bo);
+
+ mutex_unlock(&bdev->rbtree_mutex);
+ return bo;
+}
+
+void hmm_bo_release(struct hmm_buffer_object *bo)
+{
+ struct hmm_bo_device *bdev = bo->bdev;
+ struct hmm_buffer_object *next_bo, *prev_bo;
+
+ mutex_lock(&bdev->rbtree_mutex);
+
+ /*
+ * FIX ME:
+ *
+ * how to destroy the bo when it is stilled MMAPED?
+ *
+ * ideally, this will not happened as hmm_bo_release
+ * will only be called when kref reaches 0, and in mmap
+ * operation the hmm_bo_ref will eventually be called.
+ * so, if this happened, something goes wrong.
+ */
+ if (bo->status & HMM_BO_MMAPED) {
+ mutex_unlock(&bdev->rbtree_mutex);
+ dev_dbg(atomisp_dev, "destroy bo which is MMAPED, do nothing\n");
+ return;
+ }
+
+ if (bo->status & HMM_BO_BINDED) {
+ dev_warn(atomisp_dev, "the bo is still binded, unbind it first...\n");
+ hmm_bo_unbind(bo);
+ }
+
+ if (bo->status & HMM_BO_PAGE_ALLOCED) {
+ dev_warn(atomisp_dev, "the pages is not freed, free pages first\n");
+ hmm_bo_free_pages(bo);
+ }
+ if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
+ dev_warn(atomisp_dev, "the vunmap is not done, do it...\n");
+ hmm_bo_vunmap(bo);
+ }
+
+ rb_erase(&bo->node, &bdev->allocated_rbtree);
+
+ prev_bo = list_entry(bo->list.prev, struct hmm_buffer_object, list);
+ next_bo = list_entry(bo->list.next, struct hmm_buffer_object, list);
+
+ if (bo->list.prev != &bdev->entire_bo_list &&
+ prev_bo->end == bo->start &&
+ (prev_bo->status & HMM_BO_MASK) == HMM_BO_FREE) {
+ __bo_take_off_handling(prev_bo);
+ bo = __bo_merge(prev_bo, bo);
+ }
+
+ if (bo->list.next != &bdev->entire_bo_list &&
+ next_bo->start == bo->end &&
+ (next_bo->status & HMM_BO_MASK) == HMM_BO_FREE) {
+ __bo_take_off_handling(next_bo);
+ bo = __bo_merge(bo, next_bo);
+ }
+
+ __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
+
+ mutex_unlock(&bdev->rbtree_mutex);
+ return;
+}
+
+void hmm_bo_device_exit(struct hmm_bo_device *bdev)
+{
+ struct hmm_buffer_object *bo;
+ unsigned long flags;
+
+ dev_dbg(atomisp_dev, "%s: entering!\n", __func__);
+
+ check_bodev_null_return_void(bdev);
+
+ /*
+ * release all allocated bos even they a in use
+ * and all bos will be merged into a big bo
+ */
+ while (!RB_EMPTY_ROOT(&bdev->allocated_rbtree))
+ hmm_bo_release(
+ rbtree_node_to_hmm_bo(bdev->allocated_rbtree.rb_node));
+
+ dev_dbg(atomisp_dev, "%s: finished releasing all allocated bos!\n",
+ __func__);
+
+ /* free all bos to release all ISP virtual memory */
+ while (!list_empty(&bdev->entire_bo_list)) {
+ bo = list_to_hmm_bo(bdev->entire_bo_list.next);
+
+ spin_lock_irqsave(&bdev->list_lock, flags);
+ list_del(&bo->list);
+ spin_unlock_irqrestore(&bdev->list_lock, flags);
+
+ kmem_cache_free(bdev->bo_cache, bo);
+ }
+
+ dev_dbg(atomisp_dev, "%s: finished to free all bos!\n", __func__);
+
+ kmem_cache_destroy(bdev->bo_cache);
+
+ isp_mmu_exit(&bdev->mmu);
+}
+
+int hmm_bo_device_inited(struct hmm_bo_device *bdev)
+{
+ check_bodev_null_return(bdev, -EINVAL);
+
+ return bdev->flag == HMM_BO_DEVICE_INITED;
+}
+
+int hmm_bo_allocated(struct hmm_buffer_object *bo)
+{
+ check_bo_null_return(bo, 0);
+
+ return bo->status & HMM_BO_ALLOCED;
+}
+
+struct hmm_buffer_object *hmm_bo_device_search_start(
+ struct hmm_bo_device *bdev, ia_css_ptr vaddr)
+{
+ struct hmm_buffer_object *bo;
+
+ check_bodev_null_return(bdev, NULL);
+
+ mutex_lock(&bdev->rbtree_mutex);
+ bo = __bo_search_by_addr(&bdev->allocated_rbtree, vaddr);
+ if (!bo) {
+ mutex_unlock(&bdev->rbtree_mutex);
+ dev_err(atomisp_dev, "%s can not find bo with addr: 0x%x\n",
+ __func__, vaddr);
+ return NULL;
+ }
+ mutex_unlock(&bdev->rbtree_mutex);
+
+ return bo;
+}
+
+struct hmm_buffer_object *hmm_bo_device_search_in_range(
+ struct hmm_bo_device *bdev, unsigned int vaddr)
+{
+ struct hmm_buffer_object *bo;
+
+ check_bodev_null_return(bdev, NULL);
+
+ mutex_lock(&bdev->rbtree_mutex);
+ bo = __bo_search_by_addr_in_range(&bdev->allocated_rbtree, vaddr);
+ if (!bo) {
+ mutex_unlock(&bdev->rbtree_mutex);
+ dev_err(atomisp_dev, "%s can not find bo contain addr: 0x%x\n",
+ __func__, vaddr);
+ return NULL;
+ }
+ mutex_unlock(&bdev->rbtree_mutex);
+
+ return bo;
+}
+
+struct hmm_buffer_object *hmm_bo_device_search_vmap_start(
+ struct hmm_bo_device *bdev, const void *vaddr)
+{
+ struct list_head *pos;
+ struct hmm_buffer_object *bo;
+ unsigned long flags;
+
+ check_bodev_null_return(bdev, NULL);
+
+ spin_lock_irqsave(&bdev->list_lock, flags);
+ list_for_each(pos, &bdev->entire_bo_list) {
+ bo = list_to_hmm_bo(pos);
+ /* pass bo which has no vm_node allocated */
+ if ((bo->status & HMM_BO_MASK) == HMM_BO_FREE)
+ continue;
+ if (bo->vmap_addr == vaddr)
+ goto found;
+ }
+ spin_unlock_irqrestore(&bdev->list_lock, flags);
+ return NULL;
+found:
+ spin_unlock_irqrestore(&bdev->list_lock, flags);
+ return bo;
+
+}
+
+
+static void free_private_bo_pages(struct hmm_buffer_object *bo,
+ struct hmm_pool *dypool,
+ struct hmm_pool *repool,
+ int free_pgnr)
+{
+ int i, ret;
+
+ for (i = 0; i < free_pgnr; i++) {
+ switch (bo->page_obj[i].type) {
+ case HMM_PAGE_TYPE_RESERVED:
+ if (repool->pops
+ && repool->pops->pool_free_pages) {
+ repool->pops->pool_free_pages(repool->pool_info,
+ &bo->page_obj[i]);
+ hmm_mem_stat.res_cnt--;
+ }
+ break;
+ /*
+ * HMM_PAGE_TYPE_GENERAL indicates that pages are from system
+ * memory, so when free them, they should be put into dynamic
+ * pool.
+ */
+ case HMM_PAGE_TYPE_DYNAMIC:
+ case HMM_PAGE_TYPE_GENERAL:
+ if (dypool->pops
+ && dypool->pops->pool_inited
+ && dypool->pops->pool_inited(dypool->pool_info)) {
+ if (dypool->pops->pool_free_pages)
+ dypool->pops->pool_free_pages(
+ dypool->pool_info,
+ &bo->page_obj[i]);
+ break;
+ }
+
+ /*
+ * if dynamic memory pool doesn't exist, need to free
+ * pages to system directly.
+ */
+ default:
+ ret = set_pages_wb(bo->page_obj[i].page, 1);
+ if (ret)
+ dev_err(atomisp_dev,
+ "set page to WB err ...ret = %d\n",
+ ret);
+ /*
+ W/A: set_pages_wb seldom return value = -EFAULT
+ indicate that address of page is not in valid
+ range(0xffff880000000000~0xffffc7ffffffffff)
+ then, _free_pages would panic; Do not know why page
+ address be valid,it maybe memory corruption by lowmemory
+ */
+ if (!ret) {
+ __free_pages(bo->page_obj[i].page, 0);
+ hmm_mem_stat.sys_size--;
+ }
+ break;
+ }
+ }
+
+ return;
+}
+
+/*Allocate pages which will be used only by ISP*/
+static int alloc_private_pages(struct hmm_buffer_object *bo,
+ int from_highmem,
+ bool cached,
+ struct hmm_pool *dypool,
+ struct hmm_pool *repool)
+{
+ int ret;
+ unsigned int pgnr, order, blk_pgnr, alloc_pgnr;
+ struct page *pages;
+ gfp_t gfp = GFP_NOWAIT | __GFP_NOWARN; /* REVISIT: need __GFP_FS too? */
+ int i, j;
+ int failure_number = 0;
+ bool reduce_order = false;
+ bool lack_mem = true;
+
+ if (from_highmem)
+ gfp |= __GFP_HIGHMEM;
+
+ pgnr = bo->pgnr;
+
+ bo->page_obj = atomisp_kernel_malloc(
+ sizeof(struct hmm_page_object) * pgnr);
+ if (unlikely(!bo->page_obj)) {
+ dev_err(atomisp_dev, "out of memory for bo->page_obj\n");
+ return -ENOMEM;
+ }
+
+ i = 0;
+ alloc_pgnr = 0;
+
+ /*
+ * get physical pages from dynamic pages pool.
+ */
+ if (dypool->pops && dypool->pops->pool_alloc_pages) {
+ alloc_pgnr = dypool->pops->pool_alloc_pages(dypool->pool_info,
+ bo->page_obj, pgnr,
+ cached);
+ hmm_mem_stat.dyc_size -= alloc_pgnr;
+
+ if (alloc_pgnr == pgnr)
+ return 0;
+ }
+
+ pgnr -= alloc_pgnr;
+ i += alloc_pgnr;
+
+ /*
+ * get physical pages from reserved pages pool for atomisp.
+ */
+ if (repool->pops && repool->pops->pool_alloc_pages) {
+ alloc_pgnr = repool->pops->pool_alloc_pages(repool->pool_info,
+ &bo->page_obj[i], pgnr,
+ cached);
+ hmm_mem_stat.res_cnt += alloc_pgnr;
+ if (alloc_pgnr == pgnr)
+ return 0;
+ }
+
+ pgnr -= alloc_pgnr;
+ i += alloc_pgnr;
+
+ while (pgnr) {
+ order = nr_to_order_bottom(pgnr);
+ /*
+ * if be short of memory, we will set order to 0
+ * everytime.
+ */
+ if (lack_mem)
+ order = HMM_MIN_ORDER;
+ else if (order > HMM_MAX_ORDER)
+ order = HMM_MAX_ORDER;
+retry:
+ /*
+ * When order > HMM_MIN_ORDER, for performance reasons we don't
+ * want alloc_pages() to sleep. In case it fails and fallbacks
+ * to HMM_MIN_ORDER or in case the requested order is originally
+ * the minimum value, we can allow alloc_pages() to sleep for
+ * robustness purpose.
+ *
+ * REVISIT: why __GFP_FS is necessary?
+ */
+ if (order == HMM_MIN_ORDER) {
+ gfp &= ~GFP_NOWAIT;
+ gfp |= __GFP_RECLAIM | __GFP_FS;
+ }
+
+ pages = alloc_pages(gfp, order);
+ if (unlikely(!pages)) {
+ /*
+ * in low memory case, if allocation page fails,
+ * we turn to try if order=0 allocation could
+ * succeed. if order=0 fails too, that means there is
+ * no memory left.
+ */
+ if (order == HMM_MIN_ORDER) {
+ dev_err(atomisp_dev,
+ "%s: cannot allocate pages\n",
+ __func__);
+ goto cleanup;
+ }
+ order = HMM_MIN_ORDER;
+ failure_number++;
+ reduce_order = true;
+ /*
+ * if fail two times continuously, we think be short
+ * of memory now.
+ */
+ if (failure_number == 2) {
+ lack_mem = true;
+ failure_number = 0;
+ }
+ goto retry;
+ } else {
+ blk_pgnr = order_to_nr(order);
+
+ if (!cached) {
+ /*
+ * set memory to uncacheable -- UC_MINUS
+ */
+ ret = set_pages_uc(pages, blk_pgnr);
+ if (ret) {
+ dev_err(atomisp_dev,
+ "set page uncacheable"
+ "failed.\n");
+
+ __free_pages(pages, order);
+
+ goto cleanup;
+ }
+ }
+
+ for (j = 0; j < blk_pgnr; j++) {
+ bo->page_obj[i].page = pages + j;
+ bo->page_obj[i++].type = HMM_PAGE_TYPE_GENERAL;
+ }
+
+ pgnr -= blk_pgnr;
+ hmm_mem_stat.sys_size += blk_pgnr;
+
+ /*
+ * if order is not reduced this time, clear
+ * failure_number.
+ */
+ if (reduce_order)
+ reduce_order = false;
+ else
+ failure_number = 0;
+ }
+ }
+
+ return 0;
+cleanup:
+ alloc_pgnr = i;
+ free_private_bo_pages(bo, dypool, repool, alloc_pgnr);
+
+ atomisp_kernel_free(bo->page_obj);
+
+ return -ENOMEM;
+}
+
+static void free_private_pages(struct hmm_buffer_object *bo,
+ struct hmm_pool *dypool,
+ struct hmm_pool *repool)
+{
+ free_private_bo_pages(bo, dypool, repool, bo->pgnr);
+
+ atomisp_kernel_free(bo->page_obj);
+}
+
+/*
+ * Hacked from kernel function __get_user_pages in mm/memory.c
+ *
+ * Handle buffers allocated by other kernel space driver and mmaped into user
+ * space, function Ignore the VM_PFNMAP and VM_IO flag in VMA structure
+ *
+ * Get physical pages from user space virtual address and update into page list
+ */
+static int __get_pfnmap_pages(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, int nr_pages,
+ unsigned int gup_flags, struct page **pages,
+ struct vm_area_struct **vmas)
+{
+ int i, ret;
+ unsigned long vm_flags;
+
+ if (nr_pages <= 0)
+ return 0;
+
+ VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
+
+ /*
+ * Require read or write permissions.
+ * If FOLL_FORCE is set, we only require the "MAY" flags.
+ */
+ vm_flags = (gup_flags & FOLL_WRITE) ?
+ (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
+ vm_flags &= (gup_flags & FOLL_FORCE) ?
+ (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
+ i = 0;
+
+ do {
+ struct vm_area_struct *vma;
+
+ vma = find_vma(mm, start);
+ if (!vma) {
+ dev_err(atomisp_dev, "find_vma failed\n");
+ return i ? : -EFAULT;
+ }
+
+ if (is_vm_hugetlb_page(vma)) {
+ /*
+ i = follow_hugetlb_page(mm, vma, pages, vmas,
+ &start, &nr_pages, i, gup_flags);
+ */
+ continue;
+ }
+
+ do {
+ struct page *page;
+ unsigned long pfn;
+
+ /*
+ * If we have a pending SIGKILL, don't keep faulting
+ * pages and potentially allocating memory.
+ */
+ if (unlikely(fatal_signal_pending(current))) {
+ dev_err(atomisp_dev,
+ "fatal_signal_pending in %s\n",
+ __func__);
+ return i ? i : -ERESTARTSYS;
+ }
+
+ ret = follow_pfn(vma, start, &pfn);
+ if (ret) {
+ dev_err(atomisp_dev, "follow_pfn() failed\n");
+ return i ? : -EFAULT;
+ }
+
+ page = pfn_to_page(pfn);
+ if (IS_ERR(page))
+ return i ? i : PTR_ERR(page);
+ if (pages) {
+ pages[i] = page;
+ get_page(page);
+ flush_anon_page(vma, page, start);
+ flush_dcache_page(page);
+ }
+ if (vmas)
+ vmas[i] = vma;
+ i++;
+ start += PAGE_SIZE;
+ nr_pages--;
+ } while (nr_pages && start < vma->vm_end);
+ } while (nr_pages);
+
+ return i;
+}
+
+static int get_pfnmap_pages(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, int nr_pages, int write, int force,
+ struct page **pages, struct vm_area_struct **vmas)
+{
+ int flags = FOLL_TOUCH;
+
+ if (pages)
+ flags |= FOLL_GET;
+ if (write)
+ flags |= FOLL_WRITE;
+ if (force)
+ flags |= FOLL_FORCE;
+
+ return __get_pfnmap_pages(tsk, mm, start, nr_pages, flags, pages, vmas);
+}
+
+/*
+ * Convert user space virtual address into pages list
+ */
+static int alloc_user_pages(struct hmm_buffer_object *bo,
+ void *userptr, bool cached)
+{
+ int page_nr;
+ int i;
+ struct vm_area_struct *vma;
+ struct page **pages;
+
+ pages = atomisp_kernel_malloc(sizeof(struct page *) * bo->pgnr);
+ if (unlikely(!pages)) {
+ dev_err(atomisp_dev, "out of memory for pages...\n");
+ return -ENOMEM;
+ }
+
+ bo->page_obj = atomisp_kernel_malloc(
+ sizeof(struct hmm_page_object) * bo->pgnr);
+ if (unlikely(!bo->page_obj)) {
+ dev_err(atomisp_dev, "out of memory for bo->page_obj...\n");
+ atomisp_kernel_free(pages);
+ return -ENOMEM;
+ }
+
+ mutex_unlock(&bo->mutex);
+ down_read(&current->mm->mmap_sem);
+ vma = find_vma(current->mm, (unsigned long)userptr);
+ up_read(&current->mm->mmap_sem);
+ if (vma == NULL) {
+ dev_err(atomisp_dev, "find_vma failed\n");
+ atomisp_kernel_free(bo->page_obj);
+ atomisp_kernel_free(pages);
+ return -EFAULT;
+ }
+ mutex_lock(&bo->mutex);
+ /*
+ * Handle frame buffer allocated in other kerenl space driver
+ * and map to user space
+ */
+ if (vma->vm_flags & (VM_IO | VM_PFNMAP)) {
+ page_nr = get_pfnmap_pages(current, current->mm,
+ (unsigned long)userptr,
+ (int)(bo->pgnr), 1, 0,
+ pages, NULL);
+ bo->mem_type = HMM_BO_MEM_TYPE_PFN;
+ } else {
+ /*Handle frame buffer allocated in user space*/
+ mutex_unlock(&bo->mutex);
+ down_read(&current->mm->mmap_sem);
+ page_nr = get_user_pages((unsigned long)userptr,
+ (int)(bo->pgnr), 1, pages, NULL);
+ up_read(&current->mm->mmap_sem);
+ mutex_lock(&bo->mutex);
+ bo->mem_type = HMM_BO_MEM_TYPE_USER;
+ }
+
+ /* can be written by caller, not forced */
+ if (page_nr != bo->pgnr) {
+ dev_err(atomisp_dev,
+ "get_user_pages err: bo->pgnr = %d, "
+ "pgnr actually pinned = %d.\n",
+ bo->pgnr, page_nr);
+ goto out_of_mem;
+ }
+
+ for (i = 0; i < bo->pgnr; i++) {
+ bo->page_obj[i].page = pages[i];
+ bo->page_obj[i].type = HMM_PAGE_TYPE_GENERAL;
+ }
+ hmm_mem_stat.usr_size += bo->pgnr;
+ atomisp_kernel_free(pages);
+
+ return 0;
+
+out_of_mem:
+ for (i = 0; i < page_nr; i++)
+ put_page(pages[i]);
+ atomisp_kernel_free(pages);
+ atomisp_kernel_free(bo->page_obj);
+
+ return -ENOMEM;
+}
+
+static void free_user_pages(struct hmm_buffer_object *bo)
+{
+ int i;
+
+ for (i = 0; i < bo->pgnr; i++)
+ put_page(bo->page_obj[i].page);
+ hmm_mem_stat.usr_size -= bo->pgnr;
+
+ atomisp_kernel_free(bo->page_obj);
+}
+
+/*
+ * allocate/free physical pages for the bo.
+ *
+ * type indicate where are the pages from. currently we have 3 types
+ * of memory: HMM_BO_PRIVATE, HMM_BO_USER, HMM_BO_SHARE.
+ *
+ * from_highmem is only valid when type is HMM_BO_PRIVATE, it will
+ * try to alloc memory from highmem if from_highmem is set.
+ *
+ * userptr is only valid when type is HMM_BO_USER, it indicates
+ * the start address from user space task.
+ *
+ * from_highmem and userptr will both be ignored when type is
+ * HMM_BO_SHARE.
+ */
+int hmm_bo_alloc_pages(struct hmm_buffer_object *bo,
+ enum hmm_bo_type type, int from_highmem,
+ void *userptr, bool cached)
+{
+ int ret = -EINVAL;
+
+ check_bo_null_return(bo, -EINVAL);
+
+ mutex_lock(&bo->mutex);
+ check_bo_status_no_goto(bo, HMM_BO_PAGE_ALLOCED, status_err);
+
+ /*
+ * TO DO:
+ * add HMM_BO_USER type
+ */
+ if (type == HMM_BO_PRIVATE) {
+ ret = alloc_private_pages(bo, from_highmem,
+ cached, &dynamic_pool, &reserved_pool);
+ } else if (type == HMM_BO_USER) {
+ ret = alloc_user_pages(bo, userptr, cached);
+ } else {
+ dev_err(atomisp_dev, "invalid buffer type.\n");
+ ret = -EINVAL;
+ }
+ if (ret)
+ goto alloc_err;
+
+ bo->type = type;
+
+ bo->status |= HMM_BO_PAGE_ALLOCED;
+
+ mutex_unlock(&bo->mutex);
+
+ return 0;
+
+alloc_err:
+ mutex_unlock(&bo->mutex);
+ dev_err(atomisp_dev, "alloc pages err...\n");
+ return ret;
+status_err:
+ mutex_unlock(&bo->mutex);
+ dev_err(atomisp_dev,
+ "buffer object has already page allocated.\n");
+ return -EINVAL;
+}
+
+/*
+ * free physical pages of the bo.
+ */
+void hmm_bo_free_pages(struct hmm_buffer_object *bo)
+{
+ check_bo_null_return_void(bo);
+
+ mutex_lock(&bo->mutex);
+
+ check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err2);
+
+ /* clear the flag anyway. */
+ bo->status &= (~HMM_BO_PAGE_ALLOCED);
+
+ if (bo->type == HMM_BO_PRIVATE)
+ free_private_pages(bo, &dynamic_pool, &reserved_pool);
+ else if (bo->type == HMM_BO_USER)
+ free_user_pages(bo);
+ else
+ dev_err(atomisp_dev, "invalid buffer type.\n");
+ mutex_unlock(&bo->mutex);
+
+ return;
+
+status_err2:
+ mutex_unlock(&bo->mutex);
+ dev_err(atomisp_dev,
+ "buffer object not page allocated yet.\n");
+}
+
+int hmm_bo_page_allocated(struct hmm_buffer_object *bo)
+{
+ int ret;
+
+ check_bo_null_return(bo, 0);
+
+ ret = bo->status & HMM_BO_PAGE_ALLOCED;
+
+ return ret;
+}
+
+/*
+ * get physical page info of the bo.
+ */
+int hmm_bo_get_page_info(struct hmm_buffer_object *bo,
+ struct hmm_page_object **page_obj, int *pgnr)
+{
+ check_bo_null_return(bo, -EINVAL);
+
+ mutex_lock(&bo->mutex);
+
+ check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err);
+
+ *page_obj = bo->page_obj;
+ *pgnr = bo->pgnr;
+
+ mutex_unlock(&bo->mutex);
+
+ return 0;
+
+status_err:
+ dev_err(atomisp_dev,
+ "buffer object not page allocated yet.\n");
+ mutex_unlock(&bo->mutex);
+ return -EINVAL;
+}
+
+/*
+ * bind the physical pages to a virtual address space.
+ */
+int hmm_bo_bind(struct hmm_buffer_object *bo)
+{
+ int ret;
+ unsigned int virt;
+ struct hmm_bo_device *bdev;
+ unsigned int i;
+
+ check_bo_null_return(bo, -EINVAL);
+
+ mutex_lock(&bo->mutex);
+
+ check_bo_status_yes_goto(bo,
+ HMM_BO_PAGE_ALLOCED | HMM_BO_ALLOCED,
+ status_err1);
+
+ check_bo_status_no_goto(bo, HMM_BO_BINDED, status_err2);
+
+ bdev = bo->bdev;
+
+ virt = bo->start;
+
+ for (i = 0; i < bo->pgnr; i++) {
+ ret =
+ isp_mmu_map(&bdev->mmu, virt,
+ page_to_phys(bo->page_obj[i].page), 1);
+ if (ret)
+ goto map_err;
+ virt += (1 << PAGE_SHIFT);
+ }
+
+ /*
+ * flush TBL here.
+ *
+ * theoretically, we donot need to flush TLB as we didnot change
+ * any existed address mappings, but for Silicon Hive's MMU, its
+ * really a bug here. I guess when fetching PTEs (page table entity)
+ * to TLB, its MMU will fetch additional INVALID PTEs automatically
+ * for performance issue. EX, we only set up 1 page address mapping,
+ * meaning updating 1 PTE, but the MMU fetches 4 PTE at one time,
+ * so the additional 3 PTEs are invalid.
+ */
+ if (bo->start != 0x0)
+ isp_mmu_flush_tlb_range(&bdev->mmu, bo->start,
+ (bo->pgnr << PAGE_SHIFT));
+
+ bo->status |= HMM_BO_BINDED;
+
+ mutex_unlock(&bo->mutex);
+
+ return 0;
+
+map_err:
+ /* unbind the physical pages with related virtual address space */
+ virt = bo->start;
+ for ( ; i > 0; i--) {
+ isp_mmu_unmap(&bdev->mmu, virt, 1);
+ virt += pgnr_to_size(1);
+ }
+
+ mutex_unlock(&bo->mutex);
+ dev_err(atomisp_dev,
+ "setup MMU address mapping failed.\n");
+ return ret;
+
+status_err2:
+ mutex_unlock(&bo->mutex);
+ dev_err(atomisp_dev, "buffer object already binded.\n");
+ return -EINVAL;
+status_err1:
+ mutex_unlock(&bo->mutex);
+ dev_err(atomisp_dev,
+ "buffer object vm_node or page not allocated.\n");
+ return -EINVAL;
+}
+
+/*
+ * unbind the physical pages with related virtual address space.
+ */
+void hmm_bo_unbind(struct hmm_buffer_object *bo)
+{
+ unsigned int virt;
+ struct hmm_bo_device *bdev;
+ unsigned int i;
+
+ check_bo_null_return_void(bo);
+
+ mutex_lock(&bo->mutex);
+
+ check_bo_status_yes_goto(bo,
+ HMM_BO_PAGE_ALLOCED |
+ HMM_BO_ALLOCED |
+ HMM_BO_BINDED, status_err);
+
+ bdev = bo->bdev;
+
+ virt = bo->start;
+
+ for (i = 0; i < bo->pgnr; i++) {
+ isp_mmu_unmap(&bdev->mmu, virt, 1);
+ virt += pgnr_to_size(1);
+ }
+
+ /*
+ * flush TLB as the address mapping has been removed and
+ * related TLBs should be invalidated.
+ */
+ isp_mmu_flush_tlb_range(&bdev->mmu, bo->start,
+ (bo->pgnr << PAGE_SHIFT));
+
+ bo->status &= (~HMM_BO_BINDED);
+
+ mutex_unlock(&bo->mutex);
+
+ return;
+
+status_err:
+ mutex_unlock(&bo->mutex);
+ dev_err(atomisp_dev,
+ "buffer vm or page not allocated or not binded yet.\n");
+}
+
+int hmm_bo_binded(struct hmm_buffer_object *bo)
+{
+ int ret;
+
+ check_bo_null_return(bo, 0);
+
+ mutex_lock(&bo->mutex);
+
+ ret = bo->status & HMM_BO_BINDED;
+
+ mutex_unlock(&bo->mutex);
+
+ return ret;
+}
+
+void *hmm_bo_vmap(struct hmm_buffer_object *bo, bool cached)
+{
+ struct page **pages;
+ int i;
+
+ check_bo_null_return(bo, NULL);
+
+ mutex_lock(&bo->mutex);
+ if (((bo->status & HMM_BO_VMAPED) && !cached) ||
+ ((bo->status & HMM_BO_VMAPED_CACHED) && cached)) {
+ mutex_unlock(&bo->mutex);
+ return bo->vmap_addr;
+ }
+
+ /* cached status need to be changed, so vunmap first */
+ if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
+ vunmap(bo->vmap_addr);
+ bo->vmap_addr = NULL;
+ bo->status &= ~(HMM_BO_VMAPED | HMM_BO_VMAPED_CACHED);
+ }
+
+ pages = atomisp_kernel_malloc(sizeof(*pages) * bo->pgnr);
+ if (unlikely(!pages)) {
+ mutex_unlock(&bo->mutex);
+ dev_err(atomisp_dev, "out of memory for pages...\n");
+ return NULL;
+ }
+
+ for (i = 0; i < bo->pgnr; i++)
+ pages[i] = bo->page_obj[i].page;
+
+ bo->vmap_addr = vmap(pages, bo->pgnr, VM_MAP,
+ cached ? PAGE_KERNEL : PAGE_KERNEL_NOCACHE);
+ if (unlikely(!bo->vmap_addr)) {
+ atomisp_kernel_free(pages);
+ mutex_unlock(&bo->mutex);
+ dev_err(atomisp_dev, "vmap failed...\n");
+ return NULL;
+ }
+ bo->status |= (cached ? HMM_BO_VMAPED_CACHED : HMM_BO_VMAPED);
+
+ atomisp_kernel_free(pages);
+
+ mutex_unlock(&bo->mutex);
+ return bo->vmap_addr;
+}
+
+void hmm_bo_flush_vmap(struct hmm_buffer_object *bo)
+{
+ check_bo_null_return_void(bo);
+
+ mutex_lock(&bo->mutex);
+ if (!(bo->status & HMM_BO_VMAPED_CACHED) || !bo->vmap_addr) {
+ mutex_unlock(&bo->mutex);
+ return;
+ }
+
+ clflush_cache_range(bo->vmap_addr, bo->pgnr * PAGE_SIZE);
+ mutex_unlock(&bo->mutex);
+}
+
+void hmm_bo_vunmap(struct hmm_buffer_object *bo)
+{
+ check_bo_null_return_void(bo);
+
+ mutex_lock(&bo->mutex);
+ if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
+ vunmap(bo->vmap_addr);
+ bo->vmap_addr = NULL;
+ bo->status &= ~(HMM_BO_VMAPED | HMM_BO_VMAPED_CACHED);
+ }
+
+ mutex_unlock(&bo->mutex);
+ return;
+}
+
+void hmm_bo_ref(struct hmm_buffer_object *bo)
+{
+ check_bo_null_return_void(bo);
+
+ kref_get(&bo->kref);
+}
+
+static void kref_hmm_bo_release(struct kref *kref)
+{
+ if (!kref)
+ return;
+
+ hmm_bo_release(kref_to_hmm_bo(kref));
+}
+
+void hmm_bo_unref(struct hmm_buffer_object *bo)
+{
+ check_bo_null_return_void(bo);
+
+ kref_put(&bo->kref, kref_hmm_bo_release);
+}
+
+static void hmm_bo_vm_open(struct vm_area_struct *vma)
+{
+ struct hmm_buffer_object *bo =
+ (struct hmm_buffer_object *)vma->vm_private_data;
+
+ check_bo_null_return_void(bo);
+
+ hmm_bo_ref(bo);
+
+ mutex_lock(&bo->mutex);
+
+ bo->status |= HMM_BO_MMAPED;
+
+ bo->mmap_count++;
+
+ mutex_unlock(&bo->mutex);
+}
+
+static void hmm_bo_vm_close(struct vm_area_struct *vma)
+{
+ struct hmm_buffer_object *bo =
+ (struct hmm_buffer_object *)vma->vm_private_data;
+
+ check_bo_null_return_void(bo);
+
+ hmm_bo_unref(bo);
+
+ mutex_lock(&bo->mutex);
+
+ bo->mmap_count--;
+
+ if (!bo->mmap_count) {
+ bo->status &= (~HMM_BO_MMAPED);
+ vma->vm_private_data = NULL;
+ }
+
+ mutex_unlock(&bo->mutex);
+}
+
+static const struct vm_operations_struct hmm_bo_vm_ops = {
+ .open = hmm_bo_vm_open,
+ .close = hmm_bo_vm_close,
+};
+
+/*
+ * mmap the bo to user space.
+ */
+int hmm_bo_mmap(struct vm_area_struct *vma, struct hmm_buffer_object *bo)
+{
+ unsigned int start, end;
+ unsigned int virt;
+ unsigned int pgnr, i;
+ unsigned int pfn;
+
+ check_bo_null_return(bo, -EINVAL);
+
+ check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err);
+
+ pgnr = bo->pgnr;
+ start = vma->vm_start;
+ end = vma->vm_end;
+
+ /*
+ * check vma's virtual address space size and buffer object's size.
+ * must be the same.
+ */
+ if ((start + pgnr_to_size(pgnr)) != end) {
+ dev_warn(atomisp_dev,
+ "vma's address space size not equal"
+ " to buffer object's size");
+ return -EINVAL;
+ }
+
+ virt = vma->vm_start;
+ for (i = 0; i < pgnr; i++) {
+ pfn = page_to_pfn(bo->page_obj[i].page);
+ if (remap_pfn_range(vma, virt, pfn, PAGE_SIZE, PAGE_SHARED)) {
+ dev_warn(atomisp_dev,
+ "remap_pfn_range failed:"
+ " virt = 0x%x, pfn = 0x%x,"
+ " mapped_pgnr = %d\n", virt, pfn, 1);
+ return -EINVAL;
+ }
+ virt += PAGE_SIZE;
+ }
+
+ vma->vm_private_data = bo;
+
+ vma->vm_ops = &hmm_bo_vm_ops;
+ vma->vm_flags |= VM_IO|VM_DONTEXPAND|VM_DONTDUMP;
+
+ /*
+ * call hmm_bo_vm_open explictly.
+ */
+ hmm_bo_vm_open(vma);
+
+ return 0;
+
+status_err:
+ dev_err(atomisp_dev, "buffer page not allocated yet.\n");
+ return -EINVAL;
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_bo_dev.c b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_bo_dev.c
new file mode 100644
index 000000000000..87090cea5b9d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_bo_dev.c
@@ -0,0 +1,333 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/gfp.h>
+#include <linux/mm.h> /* for GFP_ATOMIC */
+#include <linux/slab.h> /* for kmalloc */
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/string.h>
+#include <linux/list.h>
+#include <linux/errno.h>
+
+#ifdef CONFIG_ION
+#include <linux/ion.h>
+#endif
+
+#include "atomisp_internal.h"
+#include "hmm/hmm_common.h"
+#include "hmm/hmm_bo_dev.h"
+#include "hmm/hmm_bo.h"
+
+/*
+ * hmm_bo_device functions.
+ */
+int hmm_bo_device_init(struct hmm_bo_device *bdev,
+ struct isp_mmu_client *mmu_driver,
+ unsigned int vaddr_start, unsigned int size)
+{
+ int ret;
+
+ check_bodev_null_return(bdev, -EINVAL);
+
+ ret = isp_mmu_init(&bdev->mmu, mmu_driver);
+ if (ret) {
+ dev_err(atomisp_dev, "isp_mmu_init failed.\n");
+ goto isp_mmu_init_err;
+ }
+
+ ret = hmm_vm_init(&bdev->vaddr_space, vaddr_start, size);
+ if (ret) {
+ dev_err(atomisp_dev, "hmm_vm_init falied. vaddr_start = 0x%x, size = %d\n",
+ vaddr_start, size);
+ goto vm_init_err;
+ }
+
+ INIT_LIST_HEAD(&bdev->free_bo_list);
+ INIT_LIST_HEAD(&bdev->active_bo_list);
+
+ spin_lock_init(&bdev->list_lock);
+#ifdef CONFIG_ION
+ /*
+ * TODO:
+ * The ion_dev should be defined by ION driver. But ION driver does
+ * not implement it yet, will fix it when it is ready.
+ */
+ if (!ion_dev)
+ goto vm_init_err;
+
+ bdev->iclient = ion_client_create(ion_dev, "atomisp");
+ if (IS_ERR_OR_NULL(bdev->iclient)) {
+ ret = PTR_ERR(bdev->iclient);
+ if (!bdev->iclient)
+ ret = -EINVAL;
+ goto vm_init_err;
+ }
+#endif
+ bdev->flag = HMM_BO_DEVICE_INITED;
+
+ return 0;
+
+vm_init_err:
+ isp_mmu_exit(&bdev->mmu);
+isp_mmu_init_err:
+ return ret;
+}
+
+void hmm_bo_device_exit(struct hmm_bo_device *bdev)
+{
+ check_bodev_null_return_void(bdev);
+
+ /*
+ * destroy all bos in the bo list, even they are in use.
+ */
+ if (!list_empty(&bdev->active_bo_list))
+ dev_warn(atomisp_dev,
+ "there're still activated bo in use. "
+ "force to free them.\n");
+
+ while (!list_empty(&bdev->active_bo_list))
+ hmm_bo_unref(list_to_hmm_bo(bdev->active_bo_list.next));
+
+ if (!list_empty(&bdev->free_bo_list))
+ dev_warn(atomisp_dev,
+ "there're still bo in free_bo_list. "
+ "force to free them.\n");
+
+ while (!list_empty(&bdev->free_bo_list))
+ hmm_bo_unref(list_to_hmm_bo(bdev->free_bo_list.next));
+
+ isp_mmu_exit(&bdev->mmu);
+ hmm_vm_clean(&bdev->vaddr_space);
+#ifdef CONFIG_ION
+ if (bdev->iclient != NULL)
+ ion_client_destroy(bdev->iclient);
+#endif
+}
+
+int hmm_bo_device_inited(struct hmm_bo_device *bdev)
+{
+ check_bodev_null_return(bdev, -EINVAL);
+
+ return bdev->flag == HMM_BO_DEVICE_INITED;
+}
+
+/*
+ * find the buffer object with virtual address vaddr.
+ * return NULL if no such buffer object found.
+ */
+struct hmm_buffer_object *hmm_bo_device_search_start(struct hmm_bo_device *bdev,
+ ia_css_ptr vaddr)
+{
+ struct list_head *pos;
+ struct hmm_buffer_object *bo;
+ unsigned long flags;
+
+ check_bodev_null_return(bdev, NULL);
+
+ spin_lock_irqsave(&bdev->list_lock, flags);
+ list_for_each(pos, &bdev->active_bo_list) {
+ bo = list_to_hmm_bo(pos);
+ /* pass bo which has no vm_node allocated */
+ if (!hmm_bo_vm_allocated(bo))
+ continue;
+ if (bo->vm_node->start == vaddr)
+ goto found;
+ }
+ spin_unlock_irqrestore(&bdev->list_lock, flags);
+ return NULL;
+found:
+ spin_unlock_irqrestore(&bdev->list_lock, flags);
+ return bo;
+}
+
+static int in_range(unsigned int start, unsigned int size, unsigned int addr)
+{
+ return (start <= addr) && (start + size > addr);
+}
+
+struct hmm_buffer_object *hmm_bo_device_search_in_range(struct hmm_bo_device
+ *bdev,
+ unsigned int vaddr)
+{
+ struct list_head *pos;
+ struct hmm_buffer_object *bo;
+ unsigned long flags;
+ int cnt = 0;
+
+ check_bodev_null_return(bdev, NULL);
+
+ spin_lock_irqsave(&bdev->list_lock, flags);
+ list_for_each(pos, &bdev->active_bo_list) {
+ cnt++;
+ bo = list_to_hmm_bo(pos);
+ /* pass bo which has no vm_node allocated */
+ if (!hmm_bo_vm_allocated(bo))
+ continue;
+ if (in_range(bo->vm_node->start, bo->vm_node->size, vaddr))
+ goto found;
+ }
+ spin_unlock_irqrestore(&bdev->list_lock, flags);
+ return NULL;
+found:
+ if (cnt > HMM_BO_CACHE_SIZE)
+ list_move(pos, &bdev->active_bo_list);
+ spin_unlock_irqrestore(&bdev->list_lock, flags);
+ return bo;
+}
+
+struct hmm_buffer_object *
+hmm_bo_device_search_vmap_start(struct hmm_bo_device *bdev, const void *vaddr)
+{
+ struct list_head *pos;
+ struct hmm_buffer_object *bo;
+ unsigned long flags;
+
+ check_bodev_null_return(bdev, NULL);
+
+ spin_lock_irqsave(&bdev->list_lock, flags);
+ list_for_each(pos, &bdev->active_bo_list) {
+ bo = list_to_hmm_bo(pos);
+ /* pass bo which has no vm_node allocated */
+ if (!hmm_bo_vm_allocated(bo))
+ continue;
+ if (bo->vmap_addr == vaddr)
+ goto found;
+ }
+ spin_unlock_irqrestore(&bdev->list_lock, flags);
+ return NULL;
+found:
+ spin_unlock_irqrestore(&bdev->list_lock, flags);
+ return bo;
+}
+
+/*
+ * find a buffer object with pgnr pages from free_bo_list and
+ * activate it (remove from free_bo_list and add to
+ * active_bo_list)
+ *
+ * return NULL if no such buffer object found.
+ */
+struct hmm_buffer_object *hmm_bo_device_get_bo(struct hmm_bo_device *bdev,
+ unsigned int pgnr)
+{
+ struct list_head *pos;
+ struct hmm_buffer_object *bo;
+ unsigned long flags;
+
+ check_bodev_null_return(bdev, NULL);
+
+ spin_lock_irqsave(&bdev->list_lock, flags);
+ list_for_each(pos, &bdev->free_bo_list) {
+ bo = list_to_hmm_bo(pos);
+ if (bo->pgnr == pgnr)
+ goto found;
+ }
+ spin_unlock_irqrestore(&bdev->list_lock, flags);
+ return NULL;
+found:
+ list_del(&bo->list);
+ list_add(&bo->list, &bdev->active_bo_list);
+ spin_unlock_irqrestore(&bdev->list_lock, flags);
+
+ return bo;
+}
+
+/*
+ * destroy all buffer objects in the free_bo_list.
+ */
+void hmm_bo_device_destroy_free_bo_list(struct hmm_bo_device *bdev)
+{
+ struct hmm_buffer_object *bo, *tmp;
+ unsigned long flags;
+ struct list_head new_head;
+
+ check_bodev_null_return_void(bdev);
+
+ spin_lock_irqsave(&bdev->list_lock, flags);
+ list_replace_init(&bdev->free_bo_list, &new_head);
+ spin_unlock_irqrestore(&bdev->list_lock, flags);
+
+ list_for_each_entry_safe(bo, tmp, &new_head, list) {
+ list_del(&bo->list);
+ hmm_bo_unref(bo);
+ }
+}
+
+/*
+ * destroy buffer object with start virtual address vaddr.
+ */
+void hmm_bo_device_destroy_free_bo_addr(struct hmm_bo_device *bdev,
+ unsigned int vaddr)
+{
+ struct list_head *pos;
+ struct hmm_buffer_object *bo;
+ unsigned long flags;
+
+ check_bodev_null_return_void(bdev);
+
+ spin_lock_irqsave(&bdev->list_lock, flags);
+ list_for_each(pos, &bdev->free_bo_list) {
+ bo = list_to_hmm_bo(pos);
+ /* pass bo which has no vm_node allocated */
+ if (!hmm_bo_vm_allocated(bo))
+ continue;
+ if (bo->vm_node->start == vaddr)
+ goto found;
+ }
+ spin_unlock_irqrestore(&bdev->list_lock, flags);
+ return;
+found:
+ list_del(&bo->list);
+ spin_unlock_irqrestore(&bdev->list_lock, flags);
+ hmm_bo_unref(bo);
+}
+
+/*
+ * destroy all buffer objects with pgnr pages.
+ */
+void hmm_bo_device_destroy_free_bo_size(struct hmm_bo_device *bdev,
+ unsigned int pgnr)
+{
+ struct list_head *pos;
+ struct hmm_buffer_object *bo;
+ unsigned long flags;
+
+ check_bodev_null_return_void(bdev);
+
+retry:
+ spin_lock_irqsave(&bdev->list_lock, flags);
+ list_for_each(pos, &bdev->free_bo_list) {
+ bo = list_to_hmm_bo(pos);
+ if (bo->pgnr == pgnr)
+ goto found;
+ }
+ spin_unlock_irqrestore(&bdev->list_lock, flags);
+ return;
+found:
+ list_del(&bo->list);
+ spin_unlock_irqrestore(&bdev->list_lock, flags);
+ hmm_bo_unref(bo);
+ goto retry;
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_dynamic_pool.c b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_dynamic_pool.c
new file mode 100644
index 000000000000..f1ebc355ec72
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_dynamic_pool.c
@@ -0,0 +1,257 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+/*
+ * This file contains functions for dynamic memory pool management
+ */
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+
+#include "asm/cacheflush.h"
+
+#include "atomisp_internal.h"
+
+#include "hmm/hmm_pool.h"
+
+/*
+ * dynamic memory pool ops.
+ */
+static unsigned int get_pages_from_dynamic_pool(void *pool,
+ struct hmm_page_object *page_obj,
+ unsigned int size, bool cached)
+{
+ struct hmm_page *hmm_page;
+ unsigned long flags;
+ unsigned int i = 0;
+ struct hmm_dynamic_pool_info *dypool_info = pool;
+
+ if (!dypool_info)
+ return 0;
+
+ spin_lock_irqsave(&dypool_info->list_lock, flags);
+ if (dypool_info->initialized) {
+ while (!list_empty(&dypool_info->pages_list)) {
+ hmm_page = list_entry(dypool_info->pages_list.next,
+ struct hmm_page, list);
+
+ list_del(&hmm_page->list);
+ dypool_info->pgnr--;
+ spin_unlock_irqrestore(&dypool_info->list_lock, flags);
+
+ page_obj[i].page = hmm_page->page;
+ page_obj[i++].type = HMM_PAGE_TYPE_DYNAMIC;
+#ifdef USE_KMEM_CACHE
+ kmem_cache_free(dypool_info->pgptr_cache, hmm_page);
+#else
+ atomisp_kernel_free(hmm_page);
+#endif
+
+ if (i == size)
+ return i;
+
+ spin_lock_irqsave(&dypool_info->list_lock, flags);
+ }
+ }
+ spin_unlock_irqrestore(&dypool_info->list_lock, flags);
+
+ return i;
+}
+
+static void free_pages_to_dynamic_pool(void *pool,
+ struct hmm_page_object *page_obj)
+{
+ struct hmm_page *hmm_page;
+ unsigned long flags;
+ int ret;
+ struct hmm_dynamic_pool_info *dypool_info = pool;
+
+ if (!dypool_info)
+ return;
+
+ spin_lock_irqsave(&dypool_info->list_lock, flags);
+ if (!dypool_info->initialized) {
+ spin_unlock_irqrestore(&dypool_info->list_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&dypool_info->list_lock, flags);
+
+ if (page_obj->type == HMM_PAGE_TYPE_RESERVED)
+ return;
+
+ if (dypool_info->pgnr >= dypool_info->pool_size) {
+ /* free page directly back to system */
+ ret = set_pages_wb(page_obj->page, 1);
+ if (ret)
+ dev_err(atomisp_dev,
+ "set page to WB err ...ret=%d\n", ret);
+ /*
+ W/A: set_pages_wb seldom return value = -EFAULT
+ indicate that address of page is not in valid
+ range(0xffff880000000000~0xffffc7ffffffffff)
+ then, _free_pages would panic; Do not know why page
+ address be valid, it maybe memory corruption by lowmemory
+ */
+ if (!ret) {
+ __free_pages(page_obj->page, 0);
+ hmm_mem_stat.sys_size--;
+ }
+ return;
+ }
+#ifdef USE_KMEM_CACHE
+ hmm_page = kmem_cache_zalloc(dypool_info->pgptr_cache,
+ GFP_KERNEL);
+#else
+ hmm_page = atomisp_kernel_malloc(sizeof(struct hmm_page));
+#endif
+ if (!hmm_page) {
+ dev_err(atomisp_dev, "out of memory for hmm_page.\n");
+
+ /* free page directly */
+ ret = set_pages_wb(page_obj->page, 1);
+ if (ret)
+ dev_err(atomisp_dev,
+ "set page to WB err ...ret=%d\n", ret);
+ if (!ret) {
+ __free_pages(page_obj->page, 0);
+ hmm_mem_stat.sys_size--;
+ }
+ return;
+ }
+
+ hmm_page->page = page_obj->page;
+
+ /*
+ * add to pages_list of pages_pool
+ */
+ spin_lock_irqsave(&dypool_info->list_lock, flags);
+ list_add_tail(&hmm_page->list, &dypool_info->pages_list);
+ dypool_info->pgnr++;
+ spin_unlock_irqrestore(&dypool_info->list_lock, flags);
+ hmm_mem_stat.dyc_size++;
+}
+
+static int hmm_dynamic_pool_init(void **pool, unsigned int pool_size)
+{
+ struct hmm_dynamic_pool_info *dypool_info;
+
+ if (pool_size == 0)
+ return 0;
+
+ dypool_info = atomisp_kernel_malloc(
+ sizeof(struct hmm_dynamic_pool_info));
+ if (unlikely(!dypool_info)) {
+ dev_err(atomisp_dev, "out of memory for repool_info.\n");
+ return -ENOMEM;
+ }
+
+#ifdef USE_KMEM_CACHE
+ dypool_info->pgptr_cache = kmem_cache_create("pgptr_cache",
+ sizeof(struct hmm_page), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!dypool_info->pgptr_cache) {
+ atomisp_kernel_free(dypool_info);
+ return -ENOMEM;
+ }
+#endif
+
+ INIT_LIST_HEAD(&dypool_info->pages_list);
+ spin_lock_init(&dypool_info->list_lock);
+ dypool_info->initialized = true;
+ dypool_info->pool_size = pool_size;
+ dypool_info->pgnr = 0;
+
+ *pool = dypool_info;
+
+ return 0;
+}
+
+static void hmm_dynamic_pool_exit(void **pool)
+{
+ struct hmm_dynamic_pool_info *dypool_info = *pool;
+ struct hmm_page *hmm_page;
+ unsigned long flags;
+ int ret;
+
+ if (!dypool_info)
+ return;
+
+ spin_lock_irqsave(&dypool_info->list_lock, flags);
+ if (!dypool_info->initialized) {
+ spin_unlock_irqrestore(&dypool_info->list_lock, flags);
+ return;
+ }
+ dypool_info->initialized = false;
+
+ while (!list_empty(&dypool_info->pages_list)) {
+ hmm_page = list_entry(dypool_info->pages_list.next,
+ struct hmm_page, list);
+
+ list_del(&hmm_page->list);
+ spin_unlock_irqrestore(&dypool_info->list_lock, flags);
+
+ /* can cause thread sleep, so cannot be put into spin_lock */
+ ret = set_pages_wb(hmm_page->page, 1);
+ if (ret)
+ dev_err(atomisp_dev,
+ "set page to WB err...ret=%d\n", ret);
+ if (!ret) {
+ __free_pages(hmm_page->page, 0);
+ hmm_mem_stat.dyc_size--;
+ hmm_mem_stat.sys_size--;
+ }
+#ifdef USE_KMEM_CACHE
+ kmem_cache_free(dypool_info->pgptr_cache, hmm_page);
+#else
+ atomisp_kernel_free(hmm_page);
+#endif
+ spin_lock_irqsave(&dypool_info->list_lock, flags);
+ }
+
+ spin_unlock_irqrestore(&dypool_info->list_lock, flags);
+
+#ifdef USE_KMEM_CACHE
+ kmem_cache_destroy(dypool_info->pgptr_cache);
+#endif
+
+ atomisp_kernel_free(dypool_info);
+
+ *pool = NULL;
+}
+
+static int hmm_dynamic_pool_inited(void *pool)
+{
+ struct hmm_dynamic_pool_info *dypool_info = pool;
+
+ if (!dypool_info)
+ return 0;
+
+ return dypool_info->initialized;
+}
+
+struct hmm_pool_ops dynamic_pops = {
+ .pool_init = hmm_dynamic_pool_init,
+ .pool_exit = hmm_dynamic_pool_exit,
+ .pool_alloc_pages = get_pages_from_dynamic_pool,
+ .pool_free_pages = free_pages_to_dynamic_pool,
+ .pool_inited = hmm_dynamic_pool_inited,
+};
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_reserved_pool.c b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_reserved_pool.c
new file mode 100644
index 000000000000..590ff7bc6c5f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_reserved_pool.c
@@ -0,0 +1,258 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+/*
+ * This file contains functions for reserved memory pool management
+ */
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+
+#include "asm/cacheflush.h"
+#include "atomisp_internal.h"
+#include "hmm/hmm_pool.h"
+
+/*
+ * reserved memory pool ops.
+ */
+static unsigned int get_pages_from_reserved_pool(void *pool,
+ struct hmm_page_object *page_obj,
+ unsigned int size, bool cached)
+{
+ unsigned long flags;
+ unsigned int i = 0;
+ unsigned int repool_pgnr;
+ int j;
+ struct hmm_reserved_pool_info *repool_info = pool;
+
+ if (!repool_info)
+ return 0;
+
+ spin_lock_irqsave(&repool_info->list_lock, flags);
+ if (repool_info->initialized) {
+ repool_pgnr = repool_info->index;
+
+ for (j = repool_pgnr-1; j >= 0; j--) {
+ page_obj[i].page = repool_info->pages[j];
+ page_obj[i].type = HMM_PAGE_TYPE_RESERVED;
+ i++;
+ repool_info->index--;
+ if (i == size)
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&repool_info->list_lock, flags);
+ return i;
+}
+
+static void free_pages_to_reserved_pool(void *pool,
+ struct hmm_page_object *page_obj)
+{
+ unsigned long flags;
+ struct hmm_reserved_pool_info *repool_info = pool;
+
+ if (!repool_info)
+ return;
+
+ spin_lock_irqsave(&repool_info->list_lock, flags);
+
+ if (repool_info->initialized &&
+ repool_info->index < repool_info->pgnr &&
+ page_obj->type == HMM_PAGE_TYPE_RESERVED) {
+ repool_info->pages[repool_info->index++] = page_obj->page;
+ }
+
+ spin_unlock_irqrestore(&repool_info->list_lock, flags);
+}
+
+static int hmm_reserved_pool_setup(struct hmm_reserved_pool_info **repool_info,
+ unsigned int pool_size)
+{
+ struct hmm_reserved_pool_info *pool_info;
+
+ pool_info = atomisp_kernel_malloc(
+ sizeof(struct hmm_reserved_pool_info));
+ if (unlikely(!pool_info)) {
+ dev_err(atomisp_dev, "out of memory for repool_info.\n");
+ return -ENOMEM;
+ }
+
+ pool_info->pages = atomisp_kernel_malloc(
+ sizeof(struct page *) * pool_size);
+ if (unlikely(!pool_info->pages)) {
+ dev_err(atomisp_dev, "out of memory for repool_info->pages.\n");
+ atomisp_kernel_free(pool_info);
+ return -ENOMEM;
+ }
+
+ pool_info->index = 0;
+ pool_info->pgnr = 0;
+ spin_lock_init(&pool_info->list_lock);
+ pool_info->initialized = true;
+
+ *repool_info = pool_info;
+
+ return 0;
+}
+
+static int hmm_reserved_pool_init(void **pool, unsigned int pool_size)
+{
+ int ret;
+ unsigned int blk_pgnr;
+ unsigned int pgnr = pool_size;
+ unsigned int order = 0;
+ unsigned int i = 0;
+ int fail_number = 0;
+ struct page *pages;
+ int j;
+ struct hmm_reserved_pool_info *repool_info;
+ if (pool_size == 0)
+ return 0;
+
+ ret = hmm_reserved_pool_setup(&repool_info, pool_size);
+ if (ret) {
+ dev_err(atomisp_dev, "hmm_reserved_pool_setup failed.\n");
+ return ret;
+ }
+
+ pgnr = pool_size;
+
+ i = 0;
+ order = MAX_ORDER;
+
+ while (pgnr) {
+ blk_pgnr = 1U << order;
+ while (blk_pgnr > pgnr) {
+ order--;
+ blk_pgnr >>= 1U;
+ }
+ BUG_ON(order > MAX_ORDER);
+
+ pages = alloc_pages(GFP_KERNEL | __GFP_NOWARN, order);
+ if (unlikely(!pages)) {
+ if (order == 0) {
+ fail_number++;
+ dev_err(atomisp_dev, "%s: alloc_pages failed: %d\n",
+ __func__, fail_number);
+ /* if fail five times, will goto end */
+
+ /* FIXME: whether is the mechanism is ok? */
+ if (fail_number == ALLOC_PAGE_FAIL_NUM)
+ goto end;
+ } else {
+ order--;
+ }
+ } else {
+ blk_pgnr = 1U << order;
+
+ ret = set_pages_uc(pages, blk_pgnr);
+ if (ret) {
+ dev_err(atomisp_dev,
+ "set pages uncached failed\n");
+ __free_pages(pages, order);
+ goto end;
+ }
+
+ for (j = 0; j < blk_pgnr; j++)
+ repool_info->pages[i++] = pages + j;
+
+ repool_info->index += blk_pgnr;
+ repool_info->pgnr += blk_pgnr;
+
+ pgnr -= blk_pgnr;
+
+ fail_number = 0;
+ }
+ }
+
+end:
+ repool_info->initialized = true;
+
+ *pool = repool_info;
+
+ dev_info(atomisp_dev,
+ "hmm_reserved_pool init successfully,"
+ "hmm_reserved_pool is with %d pages.\n",
+ repool_info->pgnr);
+ return 0;
+}
+
+static void hmm_reserved_pool_exit(void **pool)
+{
+ unsigned long flags;
+ int i, ret;
+ unsigned int pgnr;
+ struct hmm_reserved_pool_info *repool_info = *pool;
+
+ if (!repool_info)
+ return;
+
+ spin_lock_irqsave(&repool_info->list_lock, flags);
+ if (!repool_info->initialized) {
+ spin_unlock_irqrestore(&repool_info->list_lock, flags);
+ return;
+ }
+ pgnr = repool_info->pgnr;
+ repool_info->index = 0;
+ repool_info->pgnr = 0;
+ repool_info->initialized = false;
+ spin_unlock_irqrestore(&repool_info->list_lock, flags);
+
+ for (i = 0; i < pgnr; i++) {
+ ret = set_pages_wb(repool_info->pages[i], 1);
+ if (ret)
+ dev_err(atomisp_dev,
+ "set page to WB err...ret=%d\n", ret);
+ /*
+ W/A: set_pages_wb seldom return value = -EFAULT
+ indicate that address of page is not in valid
+ range(0xffff880000000000~0xffffc7ffffffffff)
+ then, _free_pages would panic; Do not know why
+ page address be valid, it maybe memory corruption by lowmemory
+ */
+ if (!ret)
+ __free_pages(repool_info->pages[i], 0);
+ }
+
+ atomisp_kernel_free(repool_info->pages);
+ atomisp_kernel_free(repool_info);
+
+ *pool = NULL;
+}
+
+static int hmm_reserved_pool_inited(void *pool)
+{
+ struct hmm_reserved_pool_info *repool_info = pool;
+
+ if (!repool_info)
+ return 0;
+
+ return repool_info->initialized;
+}
+
+struct hmm_pool_ops reserved_pops = {
+ .pool_init = hmm_reserved_pool_init,
+ .pool_exit = hmm_reserved_pool_exit,
+ .pool_alloc_pages = get_pages_from_reserved_pool,
+ .pool_free_pages = free_pages_to_reserved_pool,
+ .pool_inited = hmm_reserved_pool_inited,
+};
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_vm.c b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_vm.c
new file mode 100644
index 000000000000..0722a68a49e7
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_vm.c
@@ -0,0 +1,218 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+/*
+ * This file contains function for ISP virtual address management in ISP driver
+ */
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <asm/page.h>
+
+#include "atomisp_internal.h"
+#include "mmu/isp_mmu.h"
+#include "hmm/hmm_vm.h"
+#include "hmm/hmm_common.h"
+
+static unsigned int vm_node_end(unsigned int start, unsigned int pgnr)
+{
+ return start + pgnr_to_size(pgnr);
+}
+
+static int addr_in_vm_node(unsigned int addr,
+ struct hmm_vm_node *node)
+{
+ return (addr >= node->start) && (addr < (node->start + node->size));
+}
+
+int hmm_vm_init(struct hmm_vm *vm, unsigned int start,
+ unsigned int size)
+{
+ if (!vm)
+ return -1;
+
+ vm->start = start;
+ vm->pgnr = size_to_pgnr_ceil(size);
+ vm->size = pgnr_to_size(vm->pgnr);
+
+ INIT_LIST_HEAD(&vm->vm_node_list);
+ spin_lock_init(&vm->lock);
+ vm->cache = kmem_cache_create("atomisp_vm", sizeof(struct hmm_vm_node),
+ 0, 0, NULL);
+
+ return vm->cache != NULL ? 0 : -ENOMEM;
+}
+
+void hmm_vm_clean(struct hmm_vm *vm)
+{
+ struct hmm_vm_node *node, *tmp;
+ struct list_head new_head;
+
+ if (!vm)
+ return;
+
+ spin_lock(&vm->lock);
+ list_replace_init(&vm->vm_node_list, &new_head);
+ spin_unlock(&vm->lock);
+
+ list_for_each_entry_safe(node, tmp, &new_head, list) {
+ list_del(&node->list);
+ kmem_cache_free(vm->cache, node);
+ }
+
+ kmem_cache_destroy(vm->cache);
+}
+
+static struct hmm_vm_node *alloc_hmm_vm_node(unsigned int pgnr,
+ struct hmm_vm *vm)
+{
+ struct hmm_vm_node *node;
+
+ node = kmem_cache_alloc(vm->cache, GFP_KERNEL);
+ if (!node) {
+ dev_err(atomisp_dev, "out of memory.\n");
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&node->list);
+ node->pgnr = pgnr;
+ node->size = pgnr_to_size(pgnr);
+ node->vm = vm;
+
+ return node;
+}
+
+struct hmm_vm_node *hmm_vm_alloc_node(struct hmm_vm *vm, unsigned int pgnr)
+{
+ struct list_head *head;
+ struct hmm_vm_node *node, *cur, *next;
+ unsigned int vm_start, vm_end;
+ unsigned int addr;
+ unsigned int size;
+
+ if (!vm)
+ return NULL;
+
+ vm_start = vm->start;
+ vm_end = vm_node_end(vm->start, vm->pgnr);
+ size = pgnr_to_size(pgnr);
+
+ addr = vm_start;
+ head = &vm->vm_node_list;
+
+ node = alloc_hmm_vm_node(pgnr, vm);
+ if (!node) {
+ dev_err(atomisp_dev, "no memory to allocate hmm vm node.\n");
+ return NULL;
+ }
+
+ spin_lock(&vm->lock);
+ /*
+ * if list is empty, the loop code will not be executed.
+ */
+ list_for_each_entry(cur, head, list) {
+ /* Add gap between vm areas as helper to not hide overflow */
+ addr = PAGE_ALIGN(vm_node_end(cur->start, cur->pgnr) + 1);
+
+ if (list_is_last(&cur->list, head)) {
+ if (addr + size > vm_end) {
+ /* vm area does not have space anymore */
+ spin_unlock(&vm->lock);
+ kmem_cache_free(vm->cache, node);
+ dev_err(atomisp_dev,
+ "no enough virtual address space.\n");
+ return NULL;
+ }
+
+ /* We still have vm space to add new node to tail */
+ break;
+ }
+
+ next = list_entry(cur->list.next, struct hmm_vm_node, list);
+ if ((next->start - addr) > size)
+ break;
+ }
+ node->start = addr;
+ node->vm = vm;
+ list_add(&node->list, &cur->list);
+ spin_unlock(&vm->lock);
+
+ return node;
+}
+
+void hmm_vm_free_node(struct hmm_vm_node *node)
+{
+ struct hmm_vm *vm;
+
+ if (!node)
+ return;
+
+ vm = node->vm;
+
+ spin_lock(&vm->lock);
+ list_del(&node->list);
+ spin_unlock(&vm->lock);
+
+ kmem_cache_free(vm->cache, node);
+}
+
+struct hmm_vm_node *hmm_vm_find_node_start(struct hmm_vm *vm, unsigned int addr)
+{
+ struct hmm_vm_node *node;
+
+ if (!vm)
+ return NULL;
+
+ spin_lock(&vm->lock);
+
+ list_for_each_entry(node, &vm->vm_node_list, list) {
+ if (node->start == addr) {
+ spin_unlock(&vm->lock);
+ return node;
+ }
+ }
+
+ spin_unlock(&vm->lock);
+ return NULL;
+}
+
+struct hmm_vm_node *hmm_vm_find_node_in_range(struct hmm_vm *vm,
+ unsigned int addr)
+{
+ struct hmm_vm_node *node;
+
+ if (!vm)
+ return NULL;
+
+ spin_lock(&vm->lock);
+
+ list_for_each_entry(node, &vm->vm_node_list, list) {
+ if (addr_in_vm_node(addr, node)) {
+ spin_unlock(&vm->lock);
+ return node;
+ }
+ }
+
+ spin_unlock(&vm->lock);
+ return NULL;
+}