diff options
Diffstat (limited to 'drivers/gpu/drm/omapdrm/omap_gem.c')
| -rw-r--r-- | drivers/gpu/drm/omapdrm/omap_gem.c | 1602 |
1 files changed, 809 insertions, 793 deletions
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c index ebbdf4132e9c..71e79f53489a 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem.c +++ b/drivers/gpu/drm/omapdrm/omap_gem.c @@ -1,79 +1,81 @@ +// SPDX-License-Identifier: GPL-2.0-only /* - * drivers/gpu/drm/omapdrm/omap_gem.c - * - * Copyright (C) 2011 Texas Instruments + * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/ * Author: Rob Clark <rob.clark@linaro.org> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see <http://www.gnu.org/licenses/>. */ - -#include <linux/spinlock.h> +#include <linux/dma-mapping.h> +#include <linux/seq_file.h> #include <linux/shmem_fs.h> +#include <linux/spinlock.h> +#include <linux/vmalloc.h> + +#include <drm/drm_dumb_buffers.h> +#include <drm/drm_prime.h> +#include <drm/drm_print.h> +#include <drm/drm_vma_manager.h> #include "omap_drv.h" #include "omap_dmm_tiler.h" -/* remove these once drm core helpers are merged */ -struct page **_drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); -void _drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, - bool dirty, bool accessed); -int _drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size); - /* * GEM buffer object implementation. */ -#define to_omap_bo(x) container_of(x, struct omap_gem_object, base) - /* note: we use upper 8 bits of flags for driver-internal flags: */ -#define OMAP_BO_DMA 0x01000000 /* actually is physically contiguous */ -#define OMAP_BO_EXT_SYNC 0x02000000 /* externally allocated sync object */ -#define OMAP_BO_EXT_MEM 0x04000000 /* externally allocated memory */ - +#define OMAP_BO_MEM_DMA_API 0x01000000 /* memory allocated with the dma_alloc_* API */ +#define OMAP_BO_MEM_SHMEM 0x02000000 /* memory allocated through shmem backing */ +#define OMAP_BO_MEM_DMABUF 0x08000000 /* memory imported from a dmabuf */ struct omap_gem_object { struct drm_gem_object base; struct list_head mm_list; - uint32_t flags; + u32 flags; /** width/height for tiled formats (rounded up to slot boundaries) */ - uint16_t width, height; + u16 width, height; /** roll applied when mapping to DMM */ - uint32_t roll; + u32 roll; + + /** protects pin_cnt, block, pages, dma_addrs and vaddr */ + struct mutex lock; /** - * If buffer is allocated physically contiguous, the OMAP_BO_DMA flag - * is set and the paddr is valid. Also if the buffer is remapped in - * TILER and paddr_cnt > 0, then paddr is valid. But if you are using - * the physical address and OMAP_BO_DMA is not set, then you should - * be going thru omap_gem_{get,put}_paddr() to ensure the mapping is - * not removed from under your feet. + * dma_addr contains the buffer DMA address. It is valid for + * + * - buffers allocated through the DMA mapping API (with the + * OMAP_BO_MEM_DMA_API flag set) + * + * - buffers imported from dmabuf (with the OMAP_BO_MEM_DMABUF flag set) + * if they are physically contiguous + * + * - buffers mapped through the TILER when pin_cnt is not zero, in which + * case the DMA address points to the TILER aperture + * + * Physically contiguous buffers have their DMA address equal to the + * physical address as we don't remap those buffers through the TILER. * - * Note that OMAP_BO_SCANOUT is a hint from userspace that DMA capable - * buffer is requested, but doesn't mean that it is. Use the - * OMAP_BO_DMA flag to determine if the buffer has a DMA capable - * physical address. + * Buffers mapped to the TILER have their DMA address pointing to the + * TILER aperture. As TILER mappings are refcounted (through pin_cnt) + * the DMA address must be accessed through omap_gem_pin() to ensure + * that the mapping won't disappear unexpectedly. References must be + * released with omap_gem_unpin(). */ - dma_addr_t paddr; + dma_addr_t dma_addr; /** - * # of users of paddr + * # of users */ - uint32_t paddr_cnt; + refcount_t pin_cnt; + + /** + * If the buffer has been imported from a dmabuf the OMAP_DB_DMABUF flag + * is set and the sgt field is valid. + */ + struct sg_table *sgt; /** * tiler block used when buffer is remapped in DMM/TILER. @@ -87,39 +89,15 @@ struct omap_gem_object { struct page **pages; /** addresses corresponding to pages in above array */ - dma_addr_t *addrs; + dma_addr_t *dma_addrs; /** * Virtual address, if mapped. */ void *vaddr; - - /** - * sync-object allocated on demand (if needed) - * - * Per-buffer sync-object for tracking pending and completed hw/dma - * read and write operations. The layout in memory is dictated by - * the SGX firmware, which uses this information to stall the command - * stream if a surface is not ready yet. - * - * Note that when buffer is used by SGX, the sync-object needs to be - * allocated from a special heap of sync-objects. This way many sync - * objects can be packed in a page, and not waste GPU virtual address - * space. Because of this we have to have a omap_gem_set_sync_object() - * API to allow replacement of the syncobj after it has (potentially) - * already been allocated. A bit ugly but I haven't thought of a - * better alternative. - */ - struct { - uint32_t write_pending; - uint32_t write_complete; - uint32_t read_pending; - uint32_t read_complete; - } *sync; }; -static int get_pages(struct drm_gem_object *obj, struct page ***pages); -static uint64_t mmap_offset(struct drm_gem_object *obj); +#define to_omap_bo(x) container_of(x, struct omap_gem_object, base) /* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are * not necessarily pinned in TILER all the time, and (b) when they are @@ -133,94 +111,121 @@ static uint64_t mmap_offset(struct drm_gem_object *obj); * for later.. */ #define NUM_USERGART_ENTRIES 2 -struct usergart_entry { +struct omap_drm_usergart_entry { struct tiler_block *block; /* the reserved tiler block */ - dma_addr_t paddr; + dma_addr_t dma_addr; struct drm_gem_object *obj; /* the current pinned obj */ pgoff_t obj_pgoff; /* page offset of obj currently mapped in */ }; -static struct { - struct usergart_entry entry[NUM_USERGART_ENTRIES]; + +struct omap_drm_usergart { + struct omap_drm_usergart_entry entry[NUM_USERGART_ENTRIES]; int height; /* height in rows */ int height_shift; /* ilog2(height in rows) */ int slot_shift; /* ilog2(width per slot) */ int stride_pfn; /* stride in pages */ int last; /* index of last used entry */ -} *usergart; +}; + +/* ----------------------------------------------------------------------------- + * Helpers + */ -static void evict_entry(struct drm_gem_object *obj, - enum tiler_fmt fmt, struct usergart_entry *entry) +/** get mmap offset */ +u64 omap_gem_mmap_offset(struct drm_gem_object *obj) { - if (obj->dev->dev_mapping) { - struct omap_gem_object *omap_obj = to_omap_bo(obj); - int n = usergart[fmt].height; - size_t size = PAGE_SIZE * n; - loff_t off = mmap_offset(obj) + - (entry->obj_pgoff << PAGE_SHIFT); - const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE); - if (m > 1) { - int i; - /* if stride > than PAGE_SIZE then sparse mapping: */ - for (i = n; i > 0; i--) { - unmap_mapping_range(obj->dev->dev_mapping, - off, PAGE_SIZE, 1); - off += PAGE_SIZE * m; - } - } else { - unmap_mapping_range(obj->dev->dev_mapping, off, size, 1); + struct drm_device *dev = obj->dev; + int ret; + size_t size; + + /* Make it mmapable */ + size = omap_gem_mmap_size(obj); + ret = drm_gem_create_mmap_offset_size(obj, size); + if (ret) { + dev_err(dev->dev, "could not allocate mmap offset\n"); + return 0; + } + + return drm_vma_node_offset_addr(&obj->vma_node); +} + +static bool omap_gem_sgt_is_contiguous(struct sg_table *sgt, size_t size) +{ + return !(drm_prime_get_contiguous_size(sgt) < size); +} + +static bool omap_gem_is_contiguous(struct omap_gem_object *omap_obj) +{ + if (omap_obj->flags & OMAP_BO_MEM_DMA_API) + return true; + + if ((omap_obj->flags & OMAP_BO_MEM_DMABUF) && + omap_gem_sgt_is_contiguous(omap_obj->sgt, omap_obj->base.size)) + return true; + + return false; +} + +/* ----------------------------------------------------------------------------- + * Eviction + */ + +static void omap_gem_evict_entry(struct drm_gem_object *obj, + enum tiler_fmt fmt, struct omap_drm_usergart_entry *entry) +{ + struct omap_gem_object *omap_obj = to_omap_bo(obj); + struct omap_drm_private *priv = obj->dev->dev_private; + int n = priv->usergart[fmt].height; + size_t size = PAGE_SIZE * n; + loff_t off = omap_gem_mmap_offset(obj) + + (entry->obj_pgoff << PAGE_SHIFT); + const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE); + + if (m > 1) { + int i; + /* if stride > than PAGE_SIZE then sparse mapping: */ + for (i = n; i > 0; i--) { + unmap_mapping_range(obj->dev->anon_inode->i_mapping, + off, PAGE_SIZE, 1); + off += PAGE_SIZE * m; } + } else { + unmap_mapping_range(obj->dev->anon_inode->i_mapping, + off, size, 1); } entry->obj = NULL; } /* Evict a buffer from usergart, if it is mapped there */ -static void evict(struct drm_gem_object *obj) +static void omap_gem_evict(struct drm_gem_object *obj) { struct omap_gem_object *omap_obj = to_omap_bo(obj); + struct omap_drm_private *priv = obj->dev->dev_private; - if (omap_obj->flags & OMAP_BO_TILED) { + if (omap_obj->flags & OMAP_BO_TILED_MASK) { enum tiler_fmt fmt = gem2fmt(omap_obj->flags); int i; - if (!usergart) - return; - for (i = 0; i < NUM_USERGART_ENTRIES; i++) { - struct usergart_entry *entry = &usergart[fmt].entry[i]; + struct omap_drm_usergart_entry *entry = + &priv->usergart[fmt].entry[i]; + if (entry->obj == obj) - evict_entry(obj, fmt, entry); + omap_gem_evict_entry(obj, fmt, entry); } } } -/* GEM objects can either be allocated from contiguous memory (in which - * case obj->filp==NULL), or w/ shmem backing (obj->filp!=NULL). But non - * contiguous buffers can be remapped in TILER/DMM if they need to be - * contiguous... but we don't do this all the time to reduce pressure - * on TILER/DMM space when we know at allocation time that the buffer - * will need to be scanned out. +/* ----------------------------------------------------------------------------- + * Page Management */ -static inline bool is_shmem(struct drm_gem_object *obj) -{ - return obj->filp != NULL; -} -/** - * shmem buffers that are mapped cached can simulate coherency via using - * page faulting to keep track of dirty pages +/* + * Ensure backing pages are allocated. Must be called with the omap_obj.lock + * held. */ -static inline bool is_cached_coherent(struct drm_gem_object *obj) -{ - struct omap_gem_object *omap_obj = to_omap_bo(obj); - return is_shmem(obj) && - ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED); -} - -static DEFINE_SPINLOCK(sync_lock); - -/** ensure backing pages are allocated */ static int omap_gem_attach_pages(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; @@ -230,13 +235,16 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj) int i, ret; dma_addr_t *addrs; - WARN_ON(omap_obj->pages); + lockdep_assert_held(&omap_obj->lock); - /* TODO: __GFP_DMA32 .. but somehow GFP_HIGHMEM is coming from the - * mapping_gfp_mask(mapping) which conflicts w/ GFP_DMA32.. probably - * we actually want CMA memory for it all anyways.. + /* + * If not using shmem (in which case backing pages don't need to be + * allocated) or if pages are already allocated we're done. */ - pages = _drm_gem_get_pages(obj, GFP_KERNEL); + if (!(omap_obj->flags & OMAP_BO_MEM_SHMEM) || omap_obj->pages) + return 0; + + pages = drm_gem_get_pages(obj); if (IS_ERR(pages)) { dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages)); return PTR_ERR(pages); @@ -246,7 +254,7 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj) * DSS, GPU, etc. are not cache coherent: */ if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) { - addrs = kmalloc(npages * sizeof(*addrs), GFP_KERNEL); + addrs = kmalloc_array(npages, sizeof(*addrs), GFP_KERNEL); if (!addrs) { ret = -ENOMEM; goto free_pages; @@ -254,93 +262,77 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj) for (i = 0; i < npages; i++) { addrs[i] = dma_map_page(dev->dev, pages[i], - 0, PAGE_SIZE, DMA_BIDIRECTIONAL); + 0, PAGE_SIZE, DMA_TO_DEVICE); + + if (dma_mapping_error(dev->dev, addrs[i])) { + dev_warn(dev->dev, + "%s: failed to map page\n", __func__); + + for (i = i - 1; i >= 0; --i) { + dma_unmap_page(dev->dev, addrs[i], + PAGE_SIZE, DMA_TO_DEVICE); + } + + ret = -ENOMEM; + goto free_addrs; + } } } else { - addrs = kzalloc(npages * sizeof(*addrs), GFP_KERNEL); + addrs = kcalloc(npages, sizeof(*addrs), GFP_KERNEL); if (!addrs) { ret = -ENOMEM; goto free_pages; } } - omap_obj->addrs = addrs; + omap_obj->dma_addrs = addrs; omap_obj->pages = pages; return 0; +free_addrs: + kfree(addrs); free_pages: - _drm_gem_put_pages(obj, pages, true, false); + drm_gem_put_pages(obj, pages, true, false); return ret; } -/** release backing pages */ +/* Release backing pages. Must be called with the omap_obj.lock held. */ static void omap_gem_detach_pages(struct drm_gem_object *obj) { struct omap_gem_object *omap_obj = to_omap_bo(obj); + unsigned int npages = obj->size >> PAGE_SHIFT; + unsigned int i; - /* for non-cached buffers, ensure the new pages are clean because - * DSS, GPU, etc. are not cache coherent: - */ - if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) { - int i, npages = obj->size >> PAGE_SHIFT; - for (i = 0; i < npages; i++) { - dma_unmap_page(obj->dev->dev, omap_obj->addrs[i], - PAGE_SIZE, DMA_BIDIRECTIONAL); - } + lockdep_assert_held(&omap_obj->lock); + + for (i = 0; i < npages; i++) { + if (omap_obj->dma_addrs[i]) + dma_unmap_page(obj->dev->dev, omap_obj->dma_addrs[i], + PAGE_SIZE, DMA_TO_DEVICE); } - kfree(omap_obj->addrs); - omap_obj->addrs = NULL; + kfree(omap_obj->dma_addrs); + omap_obj->dma_addrs = NULL; - _drm_gem_put_pages(obj, omap_obj->pages, true, false); + drm_gem_put_pages(obj, omap_obj->pages, true, false); omap_obj->pages = NULL; } /* get buffer flags */ -uint32_t omap_gem_flags(struct drm_gem_object *obj) +u32 omap_gem_flags(struct drm_gem_object *obj) { return to_omap_bo(obj)->flags; } -/** get mmap offset */ -static uint64_t mmap_offset(struct drm_gem_object *obj) -{ - struct drm_device *dev = obj->dev; - - WARN_ON(!mutex_is_locked(&dev->struct_mutex)); - - if (!obj->map_list.map) { - /* Make it mmapable */ - size_t size = omap_gem_mmap_size(obj); - int ret = _drm_gem_create_mmap_offset_size(obj, size); - - if (ret) { - dev_err(dev->dev, "could not allocate mmap offset\n"); - return 0; - } - } - - return (uint64_t)obj->map_list.hash.key << PAGE_SHIFT; -} - -uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj) -{ - uint64_t offset; - mutex_lock(&obj->dev->struct_mutex); - offset = mmap_offset(obj); - mutex_unlock(&obj->dev->struct_mutex); - return offset; -} - /** get mmap size */ size_t omap_gem_mmap_size(struct drm_gem_object *obj) { struct omap_gem_object *omap_obj = to_omap_bo(obj); size_t size = obj->size; - if (omap_obj->flags & OMAP_BO_TILED) { + if (omap_obj->flags & OMAP_BO_TILED_MASK) { /* for tiled buffers, the virtual size has stride rounded up * to 4kb.. (to hide the fact that row n+1 might start 16kb or * 32kb later!). But we don't back the entire buffer with @@ -354,20 +346,12 @@ size_t omap_gem_mmap_size(struct drm_gem_object *obj) return size; } -/* get tiled size, returns -EINVAL if not tiled buffer */ -int omap_gem_tiled_size(struct drm_gem_object *obj, uint16_t *w, uint16_t *h) -{ - struct omap_gem_object *omap_obj = to_omap_bo(obj); - if (omap_obj->flags & OMAP_BO_TILED) { - *w = omap_obj->width; - *h = omap_obj->height; - return 0; - } - return -EINVAL; -} +/* ----------------------------------------------------------------------------- + * Fault Handling + */ /* Normal handling for the case of faulting in non-tiled buffers */ -static int fault_1d(struct drm_gem_object *obj, +static vm_fault_t omap_gem_fault_1d(struct drm_gem_object *obj, struct vm_area_struct *vma, struct vm_fault *vmf) { struct omap_gem_object *omap_obj = to_omap_bo(obj); @@ -375,43 +359,44 @@ static int fault_1d(struct drm_gem_object *obj, pgoff_t pgoff; /* We don't use vmf->pgoff since that has the fake offset: */ - pgoff = ((unsigned long)vmf->virtual_address - - vma->vm_start) >> PAGE_SHIFT; + pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; if (omap_obj->pages) { - omap_gem_cpu_sync(obj, pgoff); + omap_gem_cpu_sync_page(obj, pgoff); pfn = page_to_pfn(omap_obj->pages[pgoff]); } else { - BUG_ON(!(omap_obj->flags & OMAP_BO_DMA)); - pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff; + BUG_ON(!omap_gem_is_contiguous(omap_obj)); + pfn = (omap_obj->dma_addr >> PAGE_SHIFT) + pgoff; } - VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address, + VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address, pfn, pfn << PAGE_SHIFT); - return vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn); + return vmf_insert_mixed(vma, vmf->address, pfn); } /* Special handling for the case of faulting in 2d tiled buffers */ -static int fault_2d(struct drm_gem_object *obj, +static vm_fault_t omap_gem_fault_2d(struct drm_gem_object *obj, struct vm_area_struct *vma, struct vm_fault *vmf) { struct omap_gem_object *omap_obj = to_omap_bo(obj); - struct usergart_entry *entry; + struct omap_drm_private *priv = obj->dev->dev_private; + struct omap_drm_usergart_entry *entry; enum tiler_fmt fmt = gem2fmt(omap_obj->flags); struct page *pages[64]; /* XXX is this too much to have on stack? */ unsigned long pfn; pgoff_t pgoff, base_pgoff; - void __user *vaddr; - int i, ret, slots; + unsigned long vaddr; + int i, err, slots; + vm_fault_t ret = VM_FAULT_NOPAGE; /* * Note the height of the slot is also equal to the number of pages * that need to be mapped in to fill 4kb wide CPU page. If the slot * height is 64, then 64 pages fill a 4kb wide by 64 row region. */ - const int n = usergart[fmt].height; - const int n_shift = usergart[fmt].height_shift; + const int n = priv->usergart[fmt].height; + const int n_shift = priv->usergart[fmt].height_shift; /* * If buffer width in bytes > PAGE_SIZE then the virtual stride is @@ -419,11 +404,10 @@ static int fault_2d(struct drm_gem_object *obj, * into account in some of the math, so figure out virtual stride * in pages */ - const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE); + const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE); /* We don't use vmf->pgoff since that has the fake offset: */ - pgoff = ((unsigned long)vmf->virtual_address - - vma->vm_start) >> PAGE_SHIFT; + pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; /* * Actual address we start mapping at is rounded down to previous slot @@ -432,15 +416,15 @@ static int fault_2d(struct drm_gem_object *obj, base_pgoff = round_down(pgoff, m << n_shift); /* figure out buffer width in slots */ - slots = omap_obj->width >> usergart[fmt].slot_shift; + slots = omap_obj->width >> priv->usergart[fmt].slot_shift; - vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT); + vaddr = vmf->address - ((pgoff - base_pgoff) << PAGE_SHIFT); - entry = &usergart[fmt].entry[usergart[fmt].last]; + entry = &priv->usergart[fmt].entry[priv->usergart[fmt].last]; /* evict previous buffer using this usergart entry, if any: */ if (entry->obj) - evict_entry(entry->obj, fmt, entry); + omap_gem_evict_entry(entry->obj, fmt, entry); entry->obj = obj; entry->obj_pgoff = base_pgoff; @@ -470,32 +454,35 @@ static int fault_2d(struct drm_gem_object *obj, memset(pages + slots, 0, sizeof(struct page *) * (n - slots)); - ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true); - if (ret) { - dev_err(obj->dev->dev, "failed to pin: %d\n", ret); + err = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true); + if (err) { + ret = vmf_error(err); + dev_err(obj->dev->dev, "failed to pin: %d\n", err); return ret; } - pfn = entry->paddr >> PAGE_SHIFT; + pfn = entry->dma_addr >> PAGE_SHIFT; - VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address, + VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address, pfn, pfn << PAGE_SHIFT); for (i = n; i > 0; i--) { - vm_insert_mixed(vma, (unsigned long)vaddr, pfn); - pfn += usergart[fmt].stride_pfn; + ret = vmf_insert_mixed(vma, vaddr, pfn); + if (ret & VM_FAULT_ERROR) + break; + pfn += priv->usergart[fmt].stride_pfn; vaddr += PAGE_SIZE * m; } /* simple round-robin: */ - usergart[fmt].last = (usergart[fmt].last + 1) % NUM_USERGART_ENTRIES; + priv->usergart[fmt].last = (priv->usergart[fmt].last + 1) + % NUM_USERGART_ENTRIES; - return 0; + return ret; } /** * omap_gem_fault - pagefault handler for GEM objects - * @vma: the VMA of the GEM object * @vmf: fault detail * * Invoked when a fault occurs on an mmap of a GEM managed area. GEM @@ -506,23 +493,25 @@ static int fault_2d(struct drm_gem_object *obj, * vma->vm_private_data points to the GEM object that is backing this * mapping. */ -int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +static vm_fault_t omap_gem_fault(struct vm_fault *vmf) { + struct vm_area_struct *vma = vmf->vma; struct drm_gem_object *obj = vma->vm_private_data; struct omap_gem_object *omap_obj = to_omap_bo(obj); - struct drm_device *dev = obj->dev; - struct page **pages; - int ret; + int err; + vm_fault_t ret; /* Make sure we don't parallel update on a fault, nor move or remove * something from beneath our feet */ - mutex_lock(&dev->struct_mutex); + mutex_lock(&omap_obj->lock); /* if a shmem backed object, make sure we have pages attached now */ - ret = get_pages(obj, &pages); - if (ret) + err = omap_gem_attach_pages(obj); + if (err) { + ret = vmf_error(err); goto fail; + } /* where should we do corresponding put_pages().. we are mapping * the original page, rather than thru a GART, so we can't rely @@ -530,47 +519,22 @@ int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) * probably trigger put_pages()? */ - if (omap_obj->flags & OMAP_BO_TILED) - ret = fault_2d(obj, vma, vmf); + if (omap_obj->flags & OMAP_BO_TILED_MASK) + ret = omap_gem_fault_2d(obj, vma, vmf); else - ret = fault_1d(obj, vma, vmf); + ret = omap_gem_fault_1d(obj, vma, vmf); fail: - mutex_unlock(&dev->struct_mutex); - switch (ret) { - case 0: - case -ERESTARTSYS: - case -EINTR: - return VM_FAULT_NOPAGE; - case -ENOMEM: - return VM_FAULT_OOM; - default: - return VM_FAULT_SIGBUS; - } -} - -/** We override mainly to fix up some of the vm mapping flags.. */ -int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma) -{ - int ret; - - ret = drm_gem_mmap(filp, vma); - if (ret) { - DBG("mmap failed: %d", ret); - return ret; - } - - return omap_gem_mmap_obj(vma->vm_private_data, vma); + mutex_unlock(&omap_obj->lock); + return ret; } -int omap_gem_mmap_obj(struct drm_gem_object *obj, - struct vm_area_struct *vma) +static int omap_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) { struct omap_gem_object *omap_obj = to_omap_bo(obj); - vma->vm_flags &= ~VM_PFNMAP; - vma->vm_flags |= VM_MIXEDMAP; + vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP | VM_IO | VM_MIXEDMAP); if (omap_obj->flags & OMAP_BO_WC) { vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); @@ -590,20 +554,24 @@ int omap_gem_mmap_obj(struct drm_gem_object *obj, * address_space (so unmap_mapping_range does what we want, * in particular in the case of mmap'd dmabufs) */ - fput(vma->vm_file); - vma->vm_pgoff = 0; - vma->vm_file = get_file(obj->filp); + vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node); + vma_set_file(vma, obj->filp); vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); } + vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); + return 0; } +/* ----------------------------------------------------------------------------- + * Dumb Buffers + */ /** * omap_gem_dumb_create - create a dumb buffer - * @drm_file: our client file + * @file: our client file * @dev: our device * @args: the requested arguments copied from userspace * @@ -614,52 +582,36 @@ int omap_gem_mmap_obj(struct drm_gem_object *obj, int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev, struct drm_mode_create_dumb *args) { - union omap_gem_size gsize; - - /* in case someone tries to feed us a completely bogus stride: */ - args->pitch = align_pitch(args->pitch, args->width, args->bpp); - args->size = PAGE_ALIGN(args->pitch * args->height); + union omap_gem_size gsize = { }; + int ret; - gsize = (union omap_gem_size){ - .bytes = args->size, - }; + ret = drm_mode_size_dumb(dev, args, SZ_8, 0); + if (ret) + return ret; + gsize.bytes = args->size; return omap_gem_new_handle(dev, file, gsize, OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle); } /** - * omap_gem_dumb_destroy - destroy a dumb buffer - * @file: client file - * @dev: our DRM device - * @handle: the object handle - * - * Destroy a handle that was created via omap_gem_dumb_create. - */ -int omap_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev, - uint32_t handle) -{ - /* No special work needed, drop the reference and see what falls out */ - return drm_gem_handle_delete(file, handle); -} - -/** - * omap_gem_dumb_map - buffer mapping for dumb interface + * omap_gem_dumb_map_offset - create an offset for a dumb buffer * @file: our drm client file * @dev: drm device * @handle: GEM handle to the object (from dumb_create) + * @offset: memory map offset placeholder * * Do the necessary setup to allow the mapping of the frame buffer * into user memory. We don't have to do much here at the moment. */ int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, - uint32_t handle, uint64_t *offset) + u32 handle, u64 *offset) { struct drm_gem_object *obj; int ret = 0; /* GEM does all our handle to object mapping */ - obj = drm_gem_object_lookup(dev, file, handle); + obj = drm_gem_object_lookup(file, handle); if (obj == NULL) { ret = -ENOENT; goto fail; @@ -667,21 +619,22 @@ int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, *offset = omap_gem_mmap_offset(obj); - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put(obj); fail: return ret; } +#ifdef CONFIG_DRM_FBDEV_EMULATION /* Set scrolling position. This allows us to implement fast scrolling * for console. * * Call only from non-atomic contexts. */ -int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll) +int omap_gem_roll(struct drm_gem_object *obj, u32 roll) { struct omap_gem_object *omap_obj = to_omap_bo(obj); - uint32_t npages = obj->size >> PAGE_SHIFT; + u32 npages = obj->size >> PAGE_SHIFT; int ret = 0; if (roll > npages) { @@ -691,247 +644,318 @@ int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll) omap_obj->roll = roll; - mutex_lock(&obj->dev->struct_mutex); + mutex_lock(&omap_obj->lock); /* if we aren't mapped yet, we don't need to do anything */ if (omap_obj->block) { - struct page **pages; - ret = get_pages(obj, &pages); + ret = omap_gem_attach_pages(obj); if (ret) goto fail; - ret = tiler_pin(omap_obj->block, pages, npages, roll, true); + + ret = tiler_pin(omap_obj->block, omap_obj->pages, npages, + roll, true); if (ret) dev_err(obj->dev->dev, "could not repin: %d\n", ret); } fail: - mutex_unlock(&obj->dev->struct_mutex); + mutex_unlock(&omap_obj->lock); return ret; } +#endif + +/* ----------------------------------------------------------------------------- + * Memory Management & DMA Sync + */ + +/* + * shmem buffers that are mapped cached are not coherent. + * + * We keep track of dirty pages using page faulting to perform cache management. + * When a page is mapped to the CPU in read/write mode the device can't access + * it and omap_obj->dma_addrs[i] is NULL. When a page is mapped to the device + * the omap_obj->dma_addrs[i] is set to the DMA address, and the page is + * unmapped from the CPU. + */ +static inline bool omap_gem_is_cached_coherent(struct drm_gem_object *obj) +{ + struct omap_gem_object *omap_obj = to_omap_bo(obj); + + return !((omap_obj->flags & OMAP_BO_MEM_SHMEM) && + ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED)); +} /* Sync the buffer for CPU access.. note pages should already be * attached, ie. omap_gem_get_pages() */ -void omap_gem_cpu_sync(struct drm_gem_object *obj, int pgoff) +void omap_gem_cpu_sync_page(struct drm_gem_object *obj, int pgoff) { struct drm_device *dev = obj->dev; struct omap_gem_object *omap_obj = to_omap_bo(obj); - if (is_cached_coherent(obj) && omap_obj->addrs[pgoff]) { - dma_unmap_page(dev->dev, omap_obj->addrs[pgoff], - PAGE_SIZE, DMA_BIDIRECTIONAL); - omap_obj->addrs[pgoff] = 0; + if (omap_gem_is_cached_coherent(obj)) + return; + + if (omap_obj->dma_addrs[pgoff]) { + dma_unmap_page(dev->dev, omap_obj->dma_addrs[pgoff], + PAGE_SIZE, DMA_TO_DEVICE); + omap_obj->dma_addrs[pgoff] = 0; } } /* sync the buffer for DMA access */ -void omap_gem_dma_sync(struct drm_gem_object *obj, +void omap_gem_dma_sync_buffer(struct drm_gem_object *obj, enum dma_data_direction dir) { struct drm_device *dev = obj->dev; struct omap_gem_object *omap_obj = to_omap_bo(obj); + int i, npages = obj->size >> PAGE_SHIFT; + struct page **pages = omap_obj->pages; + bool dirty = false; - if (is_cached_coherent(obj)) { - int i, npages = obj->size >> PAGE_SHIFT; - struct page **pages = omap_obj->pages; - bool dirty = false; + if (omap_gem_is_cached_coherent(obj)) + return; - for (i = 0; i < npages; i++) { - if (!omap_obj->addrs[i]) { - omap_obj->addrs[i] = dma_map_page(dev->dev, pages[i], 0, - PAGE_SIZE, DMA_BIDIRECTIONAL); - dirty = true; + for (i = 0; i < npages; i++) { + if (!omap_obj->dma_addrs[i]) { + dma_addr_t addr; + + addr = dma_map_page(dev->dev, pages[i], 0, + PAGE_SIZE, dir); + if (dma_mapping_error(dev->dev, addr)) { + dev_warn(dev->dev, "%s: failed to map page\n", + __func__); + break; } - } - if (dirty) { - unmap_mapping_range(obj->filp->f_mapping, 0, - omap_gem_mmap_size(obj), 1); + dirty = true; + omap_obj->dma_addrs[i] = addr; } } + + if (dirty) { + unmap_mapping_range(obj->filp->f_mapping, 0, + omap_gem_mmap_size(obj), 1); + } } -/* Get physical address for DMA.. if 'remap' is true, and the buffer is not - * already contiguous, remap it to pin in physically contiguous memory.. (ie. - * map in TILER) +static int omap_gem_pin_tiler(struct drm_gem_object *obj) +{ + struct omap_gem_object *omap_obj = to_omap_bo(obj); + u32 npages = obj->size >> PAGE_SHIFT; + enum tiler_fmt fmt = gem2fmt(omap_obj->flags); + struct tiler_block *block; + int ret; + + BUG_ON(omap_obj->block); + + if (omap_obj->flags & OMAP_BO_TILED_MASK) { + block = tiler_reserve_2d(fmt, omap_obj->width, omap_obj->height, + PAGE_SIZE); + } else { + block = tiler_reserve_1d(obj->size); + } + + if (IS_ERR(block)) { + ret = PTR_ERR(block); + dev_err(obj->dev->dev, "could not remap: %d (%d)\n", ret, fmt); + goto fail; + } + + /* TODO: enable async refill.. */ + ret = tiler_pin(block, omap_obj->pages, npages, omap_obj->roll, true); + if (ret) { + tiler_release(block); + dev_err(obj->dev->dev, "could not pin: %d\n", ret); + goto fail; + } + + omap_obj->dma_addr = tiler_ssptr(block); + omap_obj->block = block; + + DBG("got dma address: %pad", &omap_obj->dma_addr); + +fail: + return ret; +} + +/** + * omap_gem_pin() - Pin a GEM object in memory + * @obj: the GEM object + * @dma_addr: the DMA address + * + * Pin the given GEM object in memory and fill the dma_addr pointer with the + * object's DMA address. If the buffer is not physically contiguous it will be + * remapped through the TILER to provide a contiguous view. + * + * Pins are reference-counted, calling this function multiple times is allowed + * as long the corresponding omap_gem_unpin() calls are balanced. + * + * Return 0 on success or a negative error code otherwise. */ -int omap_gem_get_paddr(struct drm_gem_object *obj, - dma_addr_t *paddr, bool remap) +int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr) { struct omap_drm_private *priv = obj->dev->dev_private; struct omap_gem_object *omap_obj = to_omap_bo(obj); int ret = 0; - mutex_lock(&obj->dev->struct_mutex); + mutex_lock(&omap_obj->lock); - if (remap && is_shmem(obj) && priv->has_dmm) { - if (omap_obj->paddr_cnt == 0) { - struct page **pages; - uint32_t npages = obj->size >> PAGE_SHIFT; - enum tiler_fmt fmt = gem2fmt(omap_obj->flags); - struct tiler_block *block; + if (!omap_gem_is_contiguous(omap_obj)) { + if (refcount_read(&omap_obj->pin_cnt) == 0) { - BUG_ON(omap_obj->block); + refcount_set(&omap_obj->pin_cnt, 1); - ret = get_pages(obj, &pages); + ret = omap_gem_attach_pages(obj); if (ret) goto fail; - if (omap_obj->flags & OMAP_BO_TILED) { - block = tiler_reserve_2d(fmt, - omap_obj->width, - omap_obj->height, 0); - } else { - block = tiler_reserve_1d(obj->size); + if (omap_obj->flags & OMAP_BO_SCANOUT) { + if (priv->has_dmm) { + ret = omap_gem_pin_tiler(obj); + if (ret) + goto fail; + } } - - if (IS_ERR(block)) { - ret = PTR_ERR(block); - dev_err(obj->dev->dev, - "could not remap: %d (%d)\n", ret, fmt); - goto fail; - } - - /* TODO: enable async refill.. */ - ret = tiler_pin(block, pages, npages, - omap_obj->roll, true); - if (ret) { - tiler_release(block); - dev_err(obj->dev->dev, - "could not pin: %d\n", ret); - goto fail; - } - - omap_obj->paddr = tiler_ssptr(block); - omap_obj->block = block; - - DBG("got paddr: %08x", omap_obj->paddr); + } else { + refcount_inc(&omap_obj->pin_cnt); } - - omap_obj->paddr_cnt++; - - *paddr = omap_obj->paddr; - } else if (omap_obj->flags & OMAP_BO_DMA) { - *paddr = omap_obj->paddr; - } else { - ret = -EINVAL; - goto fail; } + if (dma_addr) + *dma_addr = omap_obj->dma_addr; + fail: - mutex_unlock(&obj->dev->struct_mutex); + mutex_unlock(&omap_obj->lock); return ret; } -/* Release physical address, when DMA is no longer being performed.. this - * could potentially unpin and unmap buffers from TILER +/** + * omap_gem_unpin_locked() - Unpin a GEM object from memory + * @obj: the GEM object + * + * omap_gem_unpin() without locking. */ -int omap_gem_put_paddr(struct drm_gem_object *obj) +static void omap_gem_unpin_locked(struct drm_gem_object *obj) { + struct omap_drm_private *priv = obj->dev->dev_private; struct omap_gem_object *omap_obj = to_omap_bo(obj); - int ret = 0; + int ret; + + if (omap_gem_is_contiguous(omap_obj)) + return; - mutex_lock(&obj->dev->struct_mutex); - if (omap_obj->paddr_cnt > 0) { - omap_obj->paddr_cnt--; - if (omap_obj->paddr_cnt == 0) { + if (refcount_dec_and_test(&omap_obj->pin_cnt)) { + if (omap_obj->sgt) { + sg_free_table(omap_obj->sgt); + kfree(omap_obj->sgt); + omap_obj->sgt = NULL; + } + if (!(omap_obj->flags & OMAP_BO_SCANOUT)) + return; + if (priv->has_dmm) { ret = tiler_unpin(omap_obj->block); if (ret) { dev_err(obj->dev->dev, "could not unpin pages: %d\n", ret); - goto fail; } ret = tiler_release(omap_obj->block); if (ret) { dev_err(obj->dev->dev, "could not release unmap: %d\n", ret); } + omap_obj->dma_addr = 0; omap_obj->block = NULL; } } -fail: - mutex_unlock(&obj->dev->struct_mutex); - return ret; +} + +/** + * omap_gem_unpin() - Unpin a GEM object from memory + * @obj: the GEM object + * + * Unpin the given GEM object previously pinned with omap_gem_pin(). Pins are + * reference-counted, the actual unpin will only be performed when the number + * of calls to this function matches the number of calls to omap_gem_pin(). + */ +void omap_gem_unpin(struct drm_gem_object *obj) +{ + struct omap_gem_object *omap_obj = to_omap_bo(obj); + + mutex_lock(&omap_obj->lock); + omap_gem_unpin_locked(obj); + mutex_unlock(&omap_obj->lock); } /* Get rotated scanout address (only valid if already pinned), at the * specified orientation and x,y offset from top-left corner of buffer * (only valid for tiled 2d buffers) */ -int omap_gem_rotated_paddr(struct drm_gem_object *obj, uint32_t orient, - int x, int y, dma_addr_t *paddr) +int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, u32 orient, + int x, int y, dma_addr_t *dma_addr) { struct omap_gem_object *omap_obj = to_omap_bo(obj); int ret = -EINVAL; - mutex_lock(&obj->dev->struct_mutex); - if ((omap_obj->paddr_cnt > 0) && omap_obj->block && - (omap_obj->flags & OMAP_BO_TILED)) { - *paddr = tiler_tsptr(omap_obj->block, orient, x, y); + mutex_lock(&omap_obj->lock); + + if ((refcount_read(&omap_obj->pin_cnt) > 0) && omap_obj->block && + (omap_obj->flags & OMAP_BO_TILED_MASK)) { + *dma_addr = tiler_tsptr(omap_obj->block, orient, x, y); ret = 0; } - mutex_unlock(&obj->dev->struct_mutex); + + mutex_unlock(&omap_obj->lock); + return ret; } /* Get tiler stride for the buffer (only valid for 2d tiled buffers) */ -int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient) +int omap_gem_tiled_stride(struct drm_gem_object *obj, u32 orient) { struct omap_gem_object *omap_obj = to_omap_bo(obj); int ret = -EINVAL; - if (omap_obj->flags & OMAP_BO_TILED) + if (omap_obj->flags & OMAP_BO_TILED_MASK) ret = tiler_stride(gem2fmt(omap_obj->flags), orient); return ret; } -/* acquire pages when needed (for example, for DMA where physically - * contiguous buffer is not required - */ -static int get_pages(struct drm_gem_object *obj, struct page ***pages) -{ - struct omap_gem_object *omap_obj = to_omap_bo(obj); - int ret = 0; - - if (is_shmem(obj) && !omap_obj->pages) { - ret = omap_gem_attach_pages(obj); - if (ret) { - dev_err(obj->dev->dev, "could not attach pages\n"); - return ret; - } - } - - /* TODO: even phys-contig.. we should have a list of pages? */ - *pages = omap_obj->pages; - - return 0; -} - /* if !remap, and we don't have pages backing, then fail, rather than * increasing the pin count (which we don't really do yet anyways, * because we don't support swapping pages back out). And 'remap' * might not be quite the right name, but I wanted to keep it working - * similarly to omap_gem_get_paddr(). Note though that mutex is not + * similarly to omap_gem_pin(). Note though that mutex is not * aquired if !remap (because this can be called in atomic ctxt), - * but probably omap_gem_get_paddr() should be changed to work in the + * but probably omap_gem_unpin() should be changed to work in the * same way. If !remap, a matching omap_gem_put_pages() call is not * required (and should not be made). */ int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages, bool remap) { - int ret; - if (!remap) { - struct omap_gem_object *omap_obj = to_omap_bo(obj); - if (!omap_obj->pages) - return -ENOMEM; - *pages = omap_obj->pages; - return 0; + struct omap_gem_object *omap_obj = to_omap_bo(obj); + int ret = 0; + + mutex_lock(&omap_obj->lock); + + if (remap) { + ret = omap_gem_attach_pages(obj); + if (ret) + goto unlock; } - mutex_lock(&obj->dev->struct_mutex); - ret = get_pages(obj, pages); - mutex_unlock(&obj->dev->struct_mutex); + + if (!omap_obj->pages) { + ret = -ENOMEM; + goto unlock; + } + + *pages = omap_obj->pages; + +unlock: + mutex_unlock(&omap_obj->lock); + return ret; } @@ -945,71 +969,183 @@ int omap_gem_put_pages(struct drm_gem_object *obj) return 0; } -/* Get kernel virtual address for CPU access.. this more or less only - * exists for omap_fbdev. This should be called with struct_mutex - * held. +struct sg_table *omap_gem_get_sg(struct drm_gem_object *obj, + enum dma_data_direction dir) +{ + struct omap_gem_object *omap_obj = to_omap_bo(obj); + dma_addr_t addr; + struct sg_table *sgt; + struct scatterlist *sg; + unsigned int count, len, stride, i; + int ret; + + ret = omap_gem_pin(obj, &addr); + if (ret) + return ERR_PTR(ret); + + mutex_lock(&omap_obj->lock); + + sgt = omap_obj->sgt; + if (sgt) + goto out; + + sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); + if (!sgt) { + ret = -ENOMEM; + goto err_unpin; + } + + if (addr) { + if (omap_obj->flags & OMAP_BO_TILED_MASK) { + enum tiler_fmt fmt = gem2fmt(omap_obj->flags); + + len = omap_obj->width << (int)fmt; + count = omap_obj->height; + stride = tiler_stride(fmt, 0); + } else { + len = obj->size; + count = 1; + stride = 0; + } + } else { + count = obj->size >> PAGE_SHIFT; + } + + ret = sg_alloc_table(sgt, count, GFP_KERNEL); + if (ret) + goto err_free; + + /* this must be after omap_gem_pin() to ensure we have pages attached */ + omap_gem_dma_sync_buffer(obj, dir); + + if (addr) { + for_each_sg(sgt->sgl, sg, count, i) { + sg_set_page(sg, pfn_to_page(__phys_to_pfn(addr)), + len, offset_in_page(addr)); + sg_dma_address(sg) = addr; + sg_dma_len(sg) = len; + + addr += stride; + } + } else { + for_each_sg(sgt->sgl, sg, count, i) { + sg_set_page(sg, omap_obj->pages[i], PAGE_SIZE, 0); + sg_dma_address(sg) = omap_obj->dma_addrs[i]; + sg_dma_len(sg) = PAGE_SIZE; + } + } + + omap_obj->sgt = sgt; +out: + mutex_unlock(&omap_obj->lock); + return sgt; + +err_free: + kfree(sgt); +err_unpin: + mutex_unlock(&omap_obj->lock); + omap_gem_unpin(obj); + return ERR_PTR(ret); +} + +void omap_gem_put_sg(struct drm_gem_object *obj, struct sg_table *sgt) +{ + struct omap_gem_object *omap_obj = to_omap_bo(obj); + + if (WARN_ON(omap_obj->sgt != sgt)) + return; + + omap_gem_unpin(obj); +} + +#ifdef CONFIG_DRM_FBDEV_EMULATION +/* + * Get kernel virtual address for CPU access.. this more or less only + * exists for omap_fbdev. */ void *omap_gem_vaddr(struct drm_gem_object *obj) { struct omap_gem_object *omap_obj = to_omap_bo(obj); - WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); + void *vaddr; + int ret; + + mutex_lock(&omap_obj->lock); + if (!omap_obj->vaddr) { - struct page **pages; - int ret = get_pages(obj, &pages); - if (ret) - return ERR_PTR(ret); - omap_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, + ret = omap_gem_attach_pages(obj); + if (ret) { + vaddr = ERR_PTR(ret); + goto unlock; + } + + omap_obj->vaddr = vmap(omap_obj->pages, obj->size >> PAGE_SHIFT, VM_MAP, pgprot_writecombine(PAGE_KERNEL)); } - return omap_obj->vaddr; + + vaddr = omap_obj->vaddr; + +unlock: + mutex_unlock(&omap_obj->lock); + return vaddr; } +#endif + +/* ----------------------------------------------------------------------------- + * Power Management + */ #ifdef CONFIG_PM /* re-pin objects in DMM in resume path: */ -int omap_gem_resume(struct device *dev) +int omap_gem_resume(struct drm_device *dev) { - struct drm_device *drm_dev = dev_get_drvdata(dev); - struct omap_drm_private *priv = drm_dev->dev_private; + struct omap_drm_private *priv = dev->dev_private; struct omap_gem_object *omap_obj; int ret = 0; + mutex_lock(&priv->list_lock); list_for_each_entry(omap_obj, &priv->obj_list, mm_list) { if (omap_obj->block) { struct drm_gem_object *obj = &omap_obj->base; - uint32_t npages = obj->size >> PAGE_SHIFT; + u32 npages = obj->size >> PAGE_SHIFT; + WARN_ON(!omap_obj->pages); /* this can't happen */ ret = tiler_pin(omap_obj->block, omap_obj->pages, npages, omap_obj->roll, true); if (ret) { - dev_err(dev, "could not repin: %d\n", ret); - return ret; + dev_err(dev->dev, "could not repin: %d\n", ret); + goto done; } } } - return 0; +done: + mutex_unlock(&priv->list_lock); + return ret; } #endif +/* ----------------------------------------------------------------------------- + * DebugFS + */ + #ifdef CONFIG_DEBUG_FS void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m) { - struct drm_device *dev = obj->dev; struct omap_gem_object *omap_obj = to_omap_bo(obj); - uint64_t off = 0; + u64 off; - WARN_ON(!mutex_is_locked(&dev->struct_mutex)); + off = drm_vma_node_start(&obj->vma_node); - if (obj->map_list.map) - off = (uint64_t)obj->map_list.hash.key; + mutex_lock(&omap_obj->lock); - seq_printf(m, "%08x: %2d (%2d) %08llx %08Zx (%2d) %p %4d", - omap_obj->flags, obj->name, obj->refcount.refcount.counter, - off, omap_obj->paddr, omap_obj->paddr_cnt, + seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d", + omap_obj->flags, obj->name, kref_read(&obj->refcount), + off, &omap_obj->dma_addr, + refcount_read(&omap_obj->pin_cnt), omap_obj->vaddr, omap_obj->roll); - if (omap_obj->flags & OMAP_BO_TILED) { + if (omap_obj->flags & OMAP_BO_TILED_MASK) { seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height); if (omap_obj->block) { struct tcm_area *area = &omap_obj->block->area; @@ -1018,9 +1154,11 @@ void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m) area->p1.x, area->p1.y); } } else { - seq_printf(m, " %d", obj->size); + seq_printf(m, " %zu", obj->size); } + mutex_unlock(&omap_obj->lock); + seq_printf(m, "\n"); } @@ -1042,306 +1180,256 @@ void omap_gem_describe_objects(struct list_head *list, struct seq_file *m) } #endif -/* Buffer Synchronization: - */ - -struct omap_gem_sync_waiter { - struct list_head list; - struct omap_gem_object *omap_obj; - enum omap_gem_op op; - uint32_t read_target, write_target; - /* notify called w/ sync_lock held */ - void (*notify)(void *arg); - void *arg; -}; - -/* list of omap_gem_sync_waiter.. the notify fxn gets called back when - * the read and/or write target count is achieved which can call a user - * callback (ex. to kick 3d and/or 2d), wakeup blocked task (prep for - * cpu access), etc. +/* ----------------------------------------------------------------------------- + * Constructor & Destructor */ -static LIST_HEAD(waiters); -static inline bool is_waiting(struct omap_gem_sync_waiter *waiter) +static void omap_gem_free_object(struct drm_gem_object *obj) { - struct omap_gem_object *omap_obj = waiter->omap_obj; - if ((waiter->op & OMAP_GEM_READ) && - (omap_obj->sync->read_complete < waiter->read_target)) - return true; - if ((waiter->op & OMAP_GEM_WRITE) && - (omap_obj->sync->write_complete < waiter->write_target)) - return true; - return false; -} - -/* macro for sync debug.. */ -#define SYNCDBG 0 -#define SYNC(fmt, ...) do { if (SYNCDBG) \ - printk(KERN_ERR "%s:%d: "fmt"\n", \ - __func__, __LINE__, ##__VA_ARGS__); \ - } while (0) + struct drm_device *dev = obj->dev; + struct omap_drm_private *priv = dev->dev_private; + struct omap_gem_object *omap_obj = to_omap_bo(obj); + omap_gem_evict(obj); -static void sync_op_update(void) -{ - struct omap_gem_sync_waiter *waiter, *n; - list_for_each_entry_safe(waiter, n, &waiters, list) { - if (!is_waiting(waiter)) { - list_del(&waiter->list); - SYNC("notify: %p", waiter); - waiter->notify(waiter->arg); - kfree(waiter); - } - } -} + mutex_lock(&priv->list_lock); + list_del(&omap_obj->mm_list); + mutex_unlock(&priv->list_lock); -static inline int sync_op(struct drm_gem_object *obj, - enum omap_gem_op op, bool start) -{ - struct omap_gem_object *omap_obj = to_omap_bo(obj); - int ret = 0; + /* + * We own the sole reference to the object at this point, but to keep + * lockdep happy, we must still take the omap_obj_lock to call + * omap_gem_detach_pages(). This should hardly make any difference as + * there can't be any lock contention. + */ + mutex_lock(&omap_obj->lock); - spin_lock(&sync_lock); + /* The object should not be pinned. */ + WARN_ON(refcount_read(&omap_obj->pin_cnt) > 0); - if (!omap_obj->sync) { - omap_obj->sync = kzalloc(sizeof(*omap_obj->sync), GFP_ATOMIC); - if (!omap_obj->sync) { - ret = -ENOMEM; - goto unlock; - } + if (omap_obj->pages) { + if (omap_obj->flags & OMAP_BO_MEM_DMABUF) + kfree(omap_obj->pages); + else + omap_gem_detach_pages(obj); } - if (start) { - if (op & OMAP_GEM_READ) - omap_obj->sync->read_pending++; - if (op & OMAP_GEM_WRITE) - omap_obj->sync->write_pending++; - } else { - if (op & OMAP_GEM_READ) - omap_obj->sync->read_complete++; - if (op & OMAP_GEM_WRITE) - omap_obj->sync->write_complete++; - sync_op_update(); + if (omap_obj->flags & OMAP_BO_MEM_DMA_API) { + dma_free_wc(dev->dev, obj->size, omap_obj->vaddr, + omap_obj->dma_addr); + } else if (omap_obj->vaddr) { + vunmap(omap_obj->vaddr); + } else if (obj->import_attach) { + drm_prime_gem_destroy(obj, omap_obj->sgt); } -unlock: - spin_unlock(&sync_lock); + mutex_unlock(&omap_obj->lock); - return ret; -} + drm_gem_object_release(obj); -/* it is a bit lame to handle updates in this sort of polling way, but - * in case of PVR, the GPU can directly update read/write complete - * values, and not really tell us which ones it updated.. this also - * means that sync_lock is not quite sufficient. So we'll need to - * do something a bit better when it comes time to add support for - * separate 2d hw.. - */ -void omap_gem_op_update(void) -{ - spin_lock(&sync_lock); - sync_op_update(); - spin_unlock(&sync_lock); -} + mutex_destroy(&omap_obj->lock); -/* mark the start of read and/or write operation */ -int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op) -{ - return sync_op(obj, op, true); + kfree(omap_obj); } -int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op) +static bool omap_gem_validate_flags(struct drm_device *dev, u32 flags) { - return sync_op(obj, op, false); -} + struct omap_drm_private *priv = dev->dev_private; -static DECLARE_WAIT_QUEUE_HEAD(sync_event); + switch (flags & OMAP_BO_CACHE_MASK) { + case OMAP_BO_CACHED: + case OMAP_BO_WC: + case OMAP_BO_CACHE_MASK: + break; -static void sync_notify(void *arg) -{ - struct task_struct **waiter_task = arg; - *waiter_task = NULL; - wake_up_all(&sync_event); -} + default: + return false; + } -int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op) -{ - struct omap_gem_object *omap_obj = to_omap_bo(obj); - int ret = 0; - if (omap_obj->sync) { - struct task_struct *waiter_task = current; - struct omap_gem_sync_waiter *waiter = - kzalloc(sizeof(*waiter), GFP_KERNEL); - - if (!waiter) - return -ENOMEM; - - waiter->omap_obj = omap_obj; - waiter->op = op; - waiter->read_target = omap_obj->sync->read_pending; - waiter->write_target = omap_obj->sync->write_pending; - waiter->notify = sync_notify; - waiter->arg = &waiter_task; - - spin_lock(&sync_lock); - if (is_waiting(waiter)) { - SYNC("waited: %p", waiter); - list_add_tail(&waiter->list, &waiters); - spin_unlock(&sync_lock); - ret = wait_event_interruptible(sync_event, - (waiter_task == NULL)); - spin_lock(&sync_lock); - if (waiter_task) { - SYNC("interrupted: %p", waiter); - /* we were interrupted */ - list_del(&waiter->list); - waiter_task = NULL; - } else { - /* freed in sync_op_update() */ - waiter = NULL; - } - } - spin_unlock(&sync_lock); + if (flags & OMAP_BO_TILED_MASK) { + if (!priv->usergart) + return false; - if (waiter) - kfree(waiter); + switch (flags & OMAP_BO_TILED_MASK) { + case OMAP_BO_TILED_8: + case OMAP_BO_TILED_16: + case OMAP_BO_TILED_32: + break; + + default: + return false; + } } - return ret; + + return true; } -/* call fxn(arg), either synchronously or asynchronously if the op - * is currently blocked.. fxn() can be called from any context - * - * (TODO for now fxn is called back from whichever context calls - * omap_gem_op_update().. but this could be better defined later - * if needed) - * - * TODO more code in common w/ _sync().. - */ -int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op, - void (*fxn)(void *arg), void *arg) +static const struct vm_operations_struct omap_gem_vm_ops = { + .fault = omap_gem_fault, + .open = drm_gem_vm_open, + .close = drm_gem_vm_close, +}; + +static const struct drm_gem_object_funcs omap_gem_object_funcs = { + .free = omap_gem_free_object, + .export = omap_gem_prime_export, + .mmap = omap_gem_object_mmap, + .vm_ops = &omap_gem_vm_ops, +}; + +/* GEM buffer object constructor */ +struct drm_gem_object *omap_gem_new(struct drm_device *dev, + union omap_gem_size gsize, u32 flags) { - struct omap_gem_object *omap_obj = to_omap_bo(obj); - if (omap_obj->sync) { - struct omap_gem_sync_waiter *waiter = - kzalloc(sizeof(*waiter), GFP_ATOMIC); - - if (!waiter) - return -ENOMEM; - - waiter->omap_obj = omap_obj; - waiter->op = op; - waiter->read_target = omap_obj->sync->read_pending; - waiter->write_target = omap_obj->sync->write_pending; - waiter->notify = fxn; - waiter->arg = arg; - - spin_lock(&sync_lock); - if (is_waiting(waiter)) { - SYNC("waited: %p", waiter); - list_add_tail(&waiter->list, &waiters); - spin_unlock(&sync_lock); - return 0; - } + struct omap_drm_private *priv = dev->dev_private; + struct omap_gem_object *omap_obj; + struct drm_gem_object *obj; + struct address_space *mapping; + size_t size; + int ret; + + if (!omap_gem_validate_flags(dev, flags)) + return NULL; - spin_unlock(&sync_lock); + /* Validate the flags and compute the memory and cache flags. */ + if (flags & OMAP_BO_TILED_MASK) { + /* + * Tiled buffers are always shmem paged backed. When they are + * scanned out, they are remapped into DMM/TILER. + */ + flags |= OMAP_BO_MEM_SHMEM; + + /* + * Currently don't allow cached buffers. There is some caching + * stuff that needs to be handled better. + */ + flags &= ~(OMAP_BO_CACHED|OMAP_BO_WC|OMAP_BO_UNCACHED); + flags |= tiler_get_cpu_cache_flags(); + } else if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) { + /* + * If we don't have DMM, we must allocate scanout buffers + * from contiguous DMA memory. + */ + flags |= OMAP_BO_MEM_DMA_API; + } else if (!(flags & OMAP_BO_MEM_DMABUF)) { + /* + * All other buffers not backed by dma_buf are shmem-backed. + */ + flags |= OMAP_BO_MEM_SHMEM; } - /* no waiting.. */ - fxn(arg); + /* Allocate the initialize the OMAP GEM object. */ + omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL); + if (!omap_obj) + return NULL; - return 0; -} + obj = &omap_obj->base; + omap_obj->flags = flags; + mutex_init(&omap_obj->lock); -/* special API so PVR can update the buffer to use a sync-object allocated - * from it's sync-obj heap. Only used for a newly allocated (from PVR's - * perspective) sync-object, so we overwrite the new syncobj w/ values - * from the already allocated syncobj (if there is one) - */ -int omap_gem_set_sync_object(struct drm_gem_object *obj, void *syncobj) -{ - struct omap_gem_object *omap_obj = to_omap_bo(obj); - int ret = 0; + if (flags & OMAP_BO_TILED_MASK) { + /* + * For tiled buffers align dimensions to slot boundaries and + * calculate size based on aligned dimensions. + */ + tiler_align(gem2fmt(flags), &gsize.tiled.width, + &gsize.tiled.height); - spin_lock(&sync_lock); + size = tiler_size(gem2fmt(flags), gsize.tiled.width, + gsize.tiled.height); - if ((omap_obj->flags & OMAP_BO_EXT_SYNC) && !syncobj) { - /* clearing a previously set syncobj */ - syncobj = kmemdup(omap_obj->sync, sizeof(*omap_obj->sync), - GFP_ATOMIC); - if (!syncobj) { - ret = -ENOMEM; - goto unlock; - } - omap_obj->flags &= ~OMAP_BO_EXT_SYNC; - omap_obj->sync = syncobj; - } else if (syncobj && !(omap_obj->flags & OMAP_BO_EXT_SYNC)) { - /* replacing an existing syncobj */ - if (omap_obj->sync) { - memcpy(syncobj, omap_obj->sync, sizeof(*omap_obj->sync)); - kfree(omap_obj->sync); - } - omap_obj->flags |= OMAP_BO_EXT_SYNC; - omap_obj->sync = syncobj; + omap_obj->width = gsize.tiled.width; + omap_obj->height = gsize.tiled.height; + } else { + size = PAGE_ALIGN(gsize.bytes); } -unlock: - spin_unlock(&sync_lock); - return ret; -} + obj->funcs = &omap_gem_object_funcs; -int omap_gem_init_object(struct drm_gem_object *obj) -{ - return -EINVAL; /* unused */ + /* Initialize the GEM object. */ + if (!(flags & OMAP_BO_MEM_SHMEM)) { + drm_gem_private_object_init(dev, obj, size); + } else { + ret = drm_gem_object_init(dev, obj, size); + if (ret) + goto err_free; + + mapping = obj->filp->f_mapping; + mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32); + } + + /* Allocate memory if needed. */ + if (flags & OMAP_BO_MEM_DMA_API) { + omap_obj->vaddr = dma_alloc_wc(dev->dev, size, + &omap_obj->dma_addr, + GFP_KERNEL); + if (!omap_obj->vaddr) + goto err_release; + } + + mutex_lock(&priv->list_lock); + list_add(&omap_obj->mm_list, &priv->obj_list); + mutex_unlock(&priv->list_lock); + + return obj; + +err_release: + drm_gem_object_release(obj); +err_free: + kfree(omap_obj); + return NULL; } -/* don't call directly.. called from GEM core when it is time to actually - * free the object.. - */ -void omap_gem_free_object(struct drm_gem_object *obj) +struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size, + struct sg_table *sgt) { - struct drm_device *dev = obj->dev; - struct omap_gem_object *omap_obj = to_omap_bo(obj); - - evict(obj); + struct omap_drm_private *priv = dev->dev_private; + struct omap_gem_object *omap_obj; + struct drm_gem_object *obj; + union omap_gem_size gsize; - WARN_ON(!mutex_is_locked(&dev->struct_mutex)); + /* Without a DMM only physically contiguous buffers can be supported. */ + if (!omap_gem_sgt_is_contiguous(sgt, size) && !priv->has_dmm) + return ERR_PTR(-EINVAL); - list_del(&omap_obj->mm_list); + gsize.bytes = PAGE_ALIGN(size); + obj = omap_gem_new(dev, gsize, OMAP_BO_MEM_DMABUF | OMAP_BO_WC); + if (!obj) + return ERR_PTR(-ENOMEM); - if (obj->map_list.map) - drm_gem_free_mmap_offset(obj); + omap_obj = to_omap_bo(obj); - /* this means the object is still pinned.. which really should - * not happen. I think.. - */ - WARN_ON(omap_obj->paddr_cnt > 0); + omap_obj->sgt = sgt; - /* don't free externally allocated backing memory */ - if (!(omap_obj->flags & OMAP_BO_EXT_MEM)) { - if (omap_obj->pages) - omap_gem_detach_pages(obj); + if (omap_gem_sgt_is_contiguous(sgt, size)) { + omap_obj->dma_addr = sg_dma_address(sgt->sgl); + } else { + /* Create pages list from sgt */ + struct page **pages; + unsigned int npages; + unsigned int ret; + + npages = DIV_ROUND_UP(size, PAGE_SIZE); + pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL); + if (!pages) { + omap_gem_free_object(obj); + return ERR_PTR(-ENOMEM); + } - if (!is_shmem(obj)) { - dma_free_writecombine(dev->dev, obj->size, - omap_obj->vaddr, omap_obj->paddr); - } else if (omap_obj->vaddr) { - vunmap(omap_obj->vaddr); + omap_obj->pages = pages; + ret = drm_prime_sg_to_page_array(sgt, pages, npages); + if (ret) { + omap_gem_free_object(obj); + return ERR_PTR(-ENOMEM); } } - /* don't free externally allocated syncobj */ - if (!(omap_obj->flags & OMAP_BO_EXT_SYNC)) - kfree(omap_obj->sync); - - drm_gem_object_release(obj); - - kfree(obj); + return obj; } /* convenience method to construct a GEM buffer object, and userspace handle */ int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file, - union omap_gem_size gsize, uint32_t flags, uint32_t *handle) + union omap_gem_size gsize, u32 flags, u32 *handle) { struct drm_gem_object *obj; int ret; @@ -1352,102 +1440,25 @@ int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file, ret = drm_gem_handle_create(file, obj, handle); if (ret) { - drm_gem_object_release(obj); - kfree(obj); /* TODO isn't there a dtor to call? just copying i915 */ + omap_gem_free_object(obj); return ret; } /* drop reference from allocate - handle holds it now */ - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put(obj); return 0; } -/* GEM buffer object constructor */ -struct drm_gem_object *omap_gem_new(struct drm_device *dev, - union omap_gem_size gsize, uint32_t flags) -{ - struct omap_drm_private *priv = dev->dev_private; - struct omap_gem_object *omap_obj; - struct drm_gem_object *obj = NULL; - size_t size; - int ret; - - if (flags & OMAP_BO_TILED) { - if (!usergart) { - dev_err(dev->dev, "Tiled buffers require DMM\n"); - goto fail; - } - - /* tiled buffers are always shmem paged backed.. when they are - * scanned out, they are remapped into DMM/TILER - */ - flags &= ~OMAP_BO_SCANOUT; - - /* currently don't allow cached buffers.. there is some caching - * stuff that needs to be handled better - */ - flags &= ~(OMAP_BO_CACHED|OMAP_BO_UNCACHED); - flags |= OMAP_BO_WC; - - /* align dimensions to slot boundaries... */ - tiler_align(gem2fmt(flags), - &gsize.tiled.width, &gsize.tiled.height); - - /* ...and calculate size based on aligned dimensions */ - size = tiler_size(gem2fmt(flags), - gsize.tiled.width, gsize.tiled.height); - } else { - size = PAGE_ALIGN(gsize.bytes); - } - - omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL); - if (!omap_obj) - goto fail; - - list_add(&omap_obj->mm_list, &priv->obj_list); - - obj = &omap_obj->base; - - if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) { - /* attempt to allocate contiguous memory if we don't - * have DMM for remappign discontiguous buffers - */ - omap_obj->vaddr = dma_alloc_writecombine(dev->dev, size, - &omap_obj->paddr, GFP_KERNEL); - if (omap_obj->vaddr) - flags |= OMAP_BO_DMA; - - } - - omap_obj->flags = flags; - - if (flags & OMAP_BO_TILED) { - omap_obj->width = gsize.tiled.width; - omap_obj->height = gsize.tiled.height; - } - - if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM)) - ret = drm_gem_private_object_init(dev, obj, size); - else - ret = drm_gem_object_init(dev, obj, size); - - if (ret) - goto fail; - - return obj; - -fail: - if (obj) - omap_gem_free_object(obj); - - return NULL; -} +/* ----------------------------------------------------------------------------- + * Init & Cleanup + */ -/* init/cleanup.. if DMM is used, we need to set some stuff up.. */ +/* If DMM is used, we need to set some stuff up.. */ void omap_gem_init(struct drm_device *dev) { struct omap_drm_private *priv = dev->dev_private; + struct omap_drm_usergart *usergart; const enum tiler_fmt fmts[] = { TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT }; @@ -1465,7 +1476,8 @@ void omap_gem_init(struct drm_device *dev) /* reserve 4k aligned/wide regions for userspace mappings: */ for (i = 0; i < ARRAY_SIZE(fmts); i++) { - uint16_t h = 1, w = PAGE_SIZE >> i; + u16 h = 1, w = PAGE_SIZE >> i; + tiler_align(fmts[i], &w, &h); /* note: since each region is 1 4kb page wide, and minimum * number of rows, the height ends up being the same as the @@ -1476,32 +1488,36 @@ void omap_gem_init(struct drm_device *dev) usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT; usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i); for (j = 0; j < NUM_USERGART_ENTRIES; j++) { - struct usergart_entry *entry = &usergart[i].entry[j]; - struct tiler_block *block = - tiler_reserve_2d(fmts[i], w, h, - PAGE_SIZE); + struct omap_drm_usergart_entry *entry; + struct tiler_block *block; + + entry = &usergart[i].entry[j]; + block = tiler_reserve_2d(fmts[i], w, h, PAGE_SIZE); if (IS_ERR(block)) { dev_err(dev->dev, "reserve failed: %d, %d, %ld\n", i, j, PTR_ERR(block)); return; } - entry->paddr = tiler_ssptr(block); + entry->dma_addr = tiler_ssptr(block); entry->block = block; - DBG("%d:%d: %dx%d: paddr=%08x stride=%d", i, j, w, h, - entry->paddr, + DBG("%d:%d: %dx%d: dma_addr=%pad stride=%d", i, j, w, h, + &entry->dma_addr, usergart[i].stride_pfn << PAGE_SHIFT); } } + priv->usergart = usergart; priv->has_dmm = true; } void omap_gem_deinit(struct drm_device *dev) { + struct omap_drm_private *priv = dev->dev_private; + /* I believe we can rely on there being no more outstanding GEM * objects which could depend on usergart/dmm at this point. */ - kfree(usergart); + kfree(priv->usergart); } |
