summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/gem/i915_gem_pages.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/gem/i915_gem_pages.c')
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c522
1 files changed, 378 insertions, 144 deletions
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index af9e48ee4a33..c2f8e5f95696 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -1,9 +1,19 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2014-2016 Intel Corporation
*/
+#include <linux/vmalloc.h>
+
+#include <drm/drm_cache.h>
+#include <drm/drm_panic.h>
+#include <drm/drm_print.h>
+
+#include "display/intel_fb.h"
+#include "display/intel_display_types.h"
+#include "gt/intel_gt.h"
+#include "gt/intel_tlb.h"
+
#include "i915_drv.h"
#include "i915_gem_object.h"
#include "i915_scatterlist.h"
@@ -11,20 +21,21 @@
#include "i915_gem_mman.h"
void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
- struct sg_table *pages,
- unsigned int sg_page_sizes)
+ struct sg_table *pages)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- unsigned long supported = INTEL_INFO(i915)->page_sizes;
+ unsigned long supported = RUNTIME_INFO(i915)->page_sizes;
+ bool shrinkable;
int i;
- lockdep_assert_held(&obj->mm.lock);
+ assert_object_held_shared(obj);
if (i915_gem_object_is_volatile(obj))
obj->mm.madv = I915_MADV_DONTNEED;
/* Make the pages coherent with the GPU (flushing any swapin). */
if (obj->cache_dirty) {
+ WARN_ON_ONCE(IS_DGFX(i915));
obj->write_domain = 0;
if (i915_gem_object_has_struct_page(obj))
drm_clflush_sg(pages);
@@ -33,18 +44,13 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
obj->mm.get_page.sg_pos = pages->sgl;
obj->mm.get_page.sg_idx = 0;
+ obj->mm.get_dma_page.sg_pos = pages->sgl;
+ obj->mm.get_dma_page.sg_idx = 0;
obj->mm.pages = pages;
- if (i915_gem_object_is_tiled(obj) &&
- i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
- GEM_BUG_ON(obj->mm.quirked);
- __i915_gem_object_pin_pages(obj);
- obj->mm.quirked = true;
- }
-
- GEM_BUG_ON(!sg_page_sizes);
- obj->mm.page_sizes.phys = sg_page_sizes;
+ obj->mm.page_sizes.phys = i915_sg_dma_sizes(pages->sgl);
+ GEM_BUG_ON(!obj->mm.page_sizes.phys);
/*
* Calculate the supported page-sizes which fit into the given
@@ -61,10 +67,22 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
}
GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
- if (i915_gem_object_is_shrinkable(obj)) {
+ shrinkable = i915_gem_object_is_shrinkable(obj);
+
+ if (i915_gem_object_is_tiled(obj) &&
+ i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) {
+ GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
+ i915_gem_object_set_tiling_quirk(obj);
+ GEM_BUG_ON(!list_empty(&obj->mm.link));
+ atomic_inc(&obj->mm.shrink_pin);
+ shrinkable = false;
+ }
+
+ if (shrinkable && !i915_gem_object_has_self_managed_shrink_list(obj)) {
struct list_head *list;
unsigned long flags;
+ assert_object_held(obj);
spin_lock_irqsave(&i915->mm.obj_lock, flags);
i915->mm.shrink_count++;
@@ -86,6 +104,8 @@ int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
struct drm_i915_private *i915 = to_i915(obj->base.dev);
int err;
+ assert_object_held_shared(obj);
+
if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
drm_dbg(&i915->drm,
"Attempting to obtain a purgeable object\n");
@@ -109,42 +129,51 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
{
int err;
- err = mutex_lock_interruptible_nested(&obj->mm.lock, I915_MM_GET_PAGES);
- if (err)
- return err;
+ assert_object_held(obj);
+
+ assert_object_held_shared(obj);
if (unlikely(!i915_gem_object_has_pages(obj))) {
GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
err = ____i915_gem_object_get_pages(obj);
if (err)
- goto unlock;
+ return err;
smp_mb__before_atomic();
}
atomic_inc(&obj->mm.pages_pin_count);
-unlock:
- mutex_unlock(&obj->mm.lock);
- return err;
+ return 0;
}
-/* Immediately discard the backing storage */
-void i915_gem_object_truncate(struct drm_i915_gem_object *obj)
+int i915_gem_object_pin_pages_unlocked(struct drm_i915_gem_object *obj)
{
- drm_gem_free_mmap_offset(&obj->base);
- if (obj->ops->truncate)
- obj->ops->truncate(obj);
+ struct i915_gem_ww_ctx ww;
+ int err;
+
+ i915_gem_ww_ctx_init(&ww, true);
+retry:
+ err = i915_gem_object_lock(obj, &ww);
+ if (!err)
+ err = i915_gem_object_pin_pages(obj);
+
+ if (err == -EDEADLK) {
+ err = i915_gem_ww_ctx_backoff(&ww);
+ if (!err)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
+ return err;
}
-/* Try to discard unwanted pages */
-void i915_gem_object_writeback(struct drm_i915_gem_object *obj)
+/* Immediately discard the backing storage */
+int i915_gem_object_truncate(struct drm_i915_gem_object *obj)
{
- lockdep_assert_held(&obj->mm.lock);
- GEM_BUG_ON(i915_gem_object_has_pages(obj));
+ if (obj->ops->truncate)
+ return obj->ops->truncate(obj);
- if (obj->ops->writeback)
- obj->ops->writeback(obj);
+ return 0;
}
static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
@@ -155,6 +184,8 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
rcu_read_lock();
radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
radix_tree_delete(&obj->mm.get_page.radix, iter.index);
+ radix_tree_for_each_slot(slot, &obj->mm.get_dma_page.radix, &iter, 0)
+ radix_tree_delete(&obj->mm.get_dma_page.radix, iter.index);
rcu_read_unlock();
}
@@ -162,8 +193,21 @@ static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
{
if (is_vmalloc_addr(ptr))
vunmap(ptr);
- else
- kunmap(kmap_to_page(ptr));
+}
+
+static void flush_tlb_invalidate(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct intel_gt *gt;
+ int id;
+
+ for_each_gt(gt, i915, id) {
+ if (!obj->mm.tlb[id])
+ continue;
+
+ intel_gt_invalidate_tlb_full(gt, obj->mm.tlb[id]);
+ obj->mm.tlb[id] = 0;
+ }
}
struct sg_table *
@@ -171,6 +215,8 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
{
struct sg_table *pages;
+ assert_object_held_shared(obj);
+
pages = fetch_and_zero(&obj->mm.pages);
if (IS_ERR_OR_NULL(pages))
return pages;
@@ -178,7 +224,8 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
if (i915_gem_object_is_volatile(obj))
obj->mm.madv = I915_MADV_WILLNEED;
- i915_gem_object_make_unshrinkable(obj);
+ if (!i915_gem_object_has_self_managed_shrink_list(obj))
+ i915_gem_object_make_unshrinkable(obj);
if (obj->mm.mapping) {
unmap_object(obj, page_mask_bits(obj->mm.mapping));
@@ -188,23 +235,20 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
__i915_gem_object_reset_page_iter(obj);
obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
+ flush_tlb_invalidate(obj);
+
return pages;
}
int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
{
struct sg_table *pages;
- int err;
if (i915_gem_object_has_pinned_pages(obj))
return -EBUSY;
/* May be called by shrinker from within get_pages() (on another bo) */
- mutex_lock(&obj->mm.lock);
- if (unlikely(atomic_read(&obj->mm.pages_pin_count))) {
- err = -EBUSY;
- goto unlock;
- }
+ assert_object_held_shared(obj);
i915_gem_object_release_mmap_offset(obj);
@@ -221,63 +265,46 @@ int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
* get_pages backends we should be better able to handle the
* cancellation of the async task in a more uniform manner.
*/
- if (!pages && !i915_gem_object_needs_async_cancel(obj))
- pages = ERR_PTR(-EINVAL);
-
- if (!IS_ERR(pages))
+ if (!IS_ERR_OR_NULL(pages))
obj->ops->put_pages(obj, pages);
- err = 0;
-unlock:
- mutex_unlock(&obj->mm.lock);
-
- return err;
-}
-
-static inline pte_t iomap_pte(resource_size_t base,
- dma_addr_t offset,
- pgprot_t prot)
-{
- return pte_mkspecial(pfn_pte((base + offset) >> PAGE_SHIFT, prot));
+ return 0;
}
/* The 'mapping' part of i915_gem_object_pin_map() below */
-static void *i915_gem_object_map(struct drm_i915_gem_object *obj,
- enum i915_map_type type)
+static void *i915_gem_object_map_page(struct drm_i915_gem_object *obj,
+ enum i915_map_type type)
{
- unsigned long n_pte = obj->base.size >> PAGE_SHIFT;
- struct sg_table *sgt = obj->mm.pages;
- pte_t *stack[32], **mem;
- struct vm_struct *area;
+ unsigned long n_pages = obj->base.size >> PAGE_SHIFT, i;
+ struct page *stack[32], **pages = stack, *page;
+ struct sgt_iter iter;
pgprot_t pgprot;
-
- if (!i915_gem_object_has_struct_page(obj) && type != I915_MAP_WC)
- return NULL;
-
- /* A single page can always be kmapped */
- if (n_pte == 1 && type == I915_MAP_WB)
- return kmap(sg_page(sgt->sgl));
-
- mem = stack;
- if (n_pte > ARRAY_SIZE(stack)) {
- /* Too big for stack -- allocate temporary array instead */
- mem = kvmalloc_array(n_pte, sizeof(*mem), GFP_KERNEL);
- if (!mem)
- return NULL;
- }
-
- area = alloc_vm_area(obj->base.size, mem);
- if (!area) {
- if (mem != stack)
- kvfree(mem);
- return NULL;
- }
+ void *vaddr;
switch (type) {
default:
MISSING_CASE(type);
- /* fallthrough - to use PAGE_KERNEL anyway */
+ fallthrough; /* to use PAGE_KERNEL anyway */
case I915_MAP_WB:
+ /*
+ * On 32b, highmem using a finite set of indirect PTE (i.e.
+ * vmap) to provide virtual mappings of the high pages.
+ * As these are finite, map_new_virtual() must wait for some
+ * other kmap() to finish when it runs out. If we map a large
+ * number of objects, there is no method for it to tell us
+ * to release the mappings, and we deadlock.
+ *
+ * However, if we make an explicit vmap of the page, that
+ * uses a larger vmalloc arena, and also has the ability
+ * to tell us to release unwanted mappings. Most importantly,
+ * it will fail and propagate an error instead of waiting
+ * forever.
+ *
+ * So if the page is beyond the 32b boundary, make an explicit
+ * vmap.
+ */
+ if (n_pages == 1 && !PageHighMem(sg_page(obj->mm.pages->sgl)))
+ return page_address(sg_page(obj->mm.pages->sgl));
pgprot = PAGE_KERNEL;
break;
case I915_MAP_WC:
@@ -285,30 +312,176 @@ static void *i915_gem_object_map(struct drm_i915_gem_object *obj,
break;
}
- if (i915_gem_object_has_struct_page(obj)) {
- struct sgt_iter iter;
- struct page *page;
- pte_t **ptes = mem;
-
- for_each_sgt_page(page, iter, sgt)
- **ptes++ = mk_pte(page, pgprot);
- } else {
- resource_size_t iomap;
- struct sgt_iter iter;
- pte_t **ptes = mem;
- dma_addr_t addr;
-
- iomap = obj->mm.region->iomap.base;
- iomap -= obj->mm.region->region.start;
-
- for_each_sgt_daddr(addr, iter, sgt)
- **ptes++ = iomap_pte(iomap, addr, pgprot);
+ if (n_pages > ARRAY_SIZE(stack)) {
+ /* Too big for stack -- allocate temporary array instead */
+ pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
+ if (!pages)
+ return ERR_PTR(-ENOMEM);
+ }
+
+ i = 0;
+ for_each_sgt_page(page, iter, obj->mm.pages)
+ pages[i++] = page;
+ vaddr = vmap(pages, n_pages, 0, pgprot);
+ if (pages != stack)
+ kvfree(pages);
+
+ return vaddr ?: ERR_PTR(-ENOMEM);
+}
+
+static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj,
+ enum i915_map_type type)
+{
+ resource_size_t iomap = obj->mm.region->iomap.base -
+ obj->mm.region->region.start;
+ unsigned long n_pfn = obj->base.size >> PAGE_SHIFT;
+ unsigned long stack[32], *pfns = stack, i;
+ struct sgt_iter iter;
+ dma_addr_t addr;
+ void *vaddr;
+
+ GEM_BUG_ON(type != I915_MAP_WC);
+
+ if (n_pfn > ARRAY_SIZE(stack)) {
+ /* Too big for stack -- allocate temporary array instead */
+ pfns = kvmalloc_array(n_pfn, sizeof(*pfns), GFP_KERNEL);
+ if (!pfns)
+ return ERR_PTR(-ENOMEM);
+ }
+
+ i = 0;
+ for_each_sgt_daddr(addr, iter, obj->mm.pages)
+ pfns[i++] = (iomap + addr) >> PAGE_SHIFT;
+ vaddr = vmap_pfn(pfns, n_pfn, pgprot_writecombine(PAGE_KERNEL_IO));
+ if (pfns != stack)
+ kvfree(pfns);
+
+ return vaddr ?: ERR_PTR(-ENOMEM);
+}
+
+struct intel_panic {
+ struct page **pages;
+ int page;
+ void *vaddr;
+};
+
+static void i915_panic_kunmap(struct intel_panic *panic)
+{
+ if (panic->vaddr) {
+ drm_clflush_virt_range(panic->vaddr, PAGE_SIZE);
+ kunmap_local(panic->vaddr);
+ panic->vaddr = NULL;
+ }
+}
+
+static struct page **i915_gem_object_panic_pages(struct drm_i915_gem_object *obj)
+{
+ unsigned long n_pages = obj->base.size >> PAGE_SHIFT, i;
+ struct page *page;
+ struct page **pages;
+ struct sgt_iter iter;
+
+ /* For a 3840x2160 32 bits Framebuffer, this should require ~64K */
+ pages = kmalloc_array(n_pages, sizeof(*pages), GFP_ATOMIC);
+ if (!pages)
+ return NULL;
+
+ i = 0;
+ for_each_sgt_page(page, iter, obj->mm.pages)
+ pages[i++] = page;
+ return pages;
+}
+
+static void i915_gem_object_panic_map_set_pixel(struct drm_scanout_buffer *sb, unsigned int x,
+ unsigned int y, u32 color)
+{
+ struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private;
+ unsigned int offset = fb->panic_tiling(sb->width, x, y);
+
+ iosys_map_wr(&sb->map[0], offset, u32, color);
+}
+
+/*
+ * The scanout buffer pages are not mapped, so for each pixel,
+ * use kmap_local_page_try_from_panic() to map the page, and write the pixel.
+ * Try to keep the map from the previous pixel, to avoid too much map/unmap.
+ */
+static void i915_gem_object_panic_page_set_pixel(struct drm_scanout_buffer *sb, unsigned int x,
+ unsigned int y, u32 color)
+{
+ unsigned int new_page;
+ unsigned int offset;
+ struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private;
+ struct intel_panic *panic = fb->panic;
+
+ if (fb->panic_tiling)
+ offset = fb->panic_tiling(sb->width, x, y);
+ else
+ offset = y * sb->pitch[0] + x * sb->format->cpp[0];
+
+ new_page = offset >> PAGE_SHIFT;
+ offset = offset % PAGE_SIZE;
+ if (new_page != panic->page) {
+ i915_panic_kunmap(panic);
+ panic->page = new_page;
+ panic->vaddr =
+ kmap_local_page_try_from_panic(panic->pages[panic->page]);
+ }
+ if (panic->vaddr) {
+ u32 *pix = panic->vaddr + offset;
+ *pix = color;
}
+}
+
+struct intel_panic *i915_gem_object_alloc_panic(void)
+{
+ struct intel_panic *panic;
+
+ panic = kzalloc(sizeof(*panic), GFP_KERNEL);
+
+ return panic;
+}
+
+/*
+ * Setup the gem framebuffer for drm_panic access.
+ * Use current vaddr if it exists, or setup a list of pages.
+ * pfn is not supported yet.
+ */
+int i915_gem_object_panic_setup(struct intel_panic *panic, struct drm_scanout_buffer *sb,
+ struct drm_gem_object *_obj, bool panic_tiling)
+{
+ enum i915_map_type has_type;
+ struct drm_i915_gem_object *obj = to_intel_bo(_obj);
+ void *ptr;
- if (mem != stack)
- kvfree(mem);
+ ptr = page_unpack_bits(obj->mm.mapping, &has_type);
+ if (ptr) {
+ if (i915_gem_object_has_iomem(obj))
+ iosys_map_set_vaddr_iomem(&sb->map[0], (void __iomem *)ptr);
+ else
+ iosys_map_set_vaddr(&sb->map[0], ptr);
+
+ if (panic_tiling)
+ sb->set_pixel = i915_gem_object_panic_map_set_pixel;
+ return 0;
+ }
+ if (i915_gem_object_has_struct_page(obj)) {
+ panic->pages = i915_gem_object_panic_pages(obj);
+ if (!panic->pages)
+ return -ENOMEM;
+ panic->page = -1;
+ sb->set_pixel = i915_gem_object_panic_page_set_pixel;
+ return 0;
+ }
+ return -EOPNOTSUPP;
+}
- return area->addr;
+void i915_gem_object_panic_finish(struct intel_panic *panic)
+{
+ i915_panic_kunmap(panic);
+ panic->page = -1;
+ kfree(panic->pages);
+ panic->pages = NULL;
}
/* get, pin, and map the pages of the object into kernel space */
@@ -316,18 +489,18 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
enum i915_map_type type)
{
enum i915_map_type has_type;
- unsigned int flags;
bool pinned;
void *ptr;
int err;
- flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | I915_GEM_OBJECT_HAS_IOMEM;
- if (!i915_gem_object_type_has(obj, flags))
+ if (!i915_gem_object_has_struct_page(obj) &&
+ !i915_gem_object_has_iomem(obj))
return ERR_PTR(-ENXIO);
- err = mutex_lock_interruptible_nested(&obj->mm.lock, I915_MM_GET_PAGES);
- if (err)
- return ERR_PTR(err);
+ if (WARN_ON_ONCE(obj->flags & I915_BO_ALLOC_GPU_ONLY))
+ return ERR_PTR(-EINVAL);
+
+ assert_object_held(obj);
pinned = !(type & I915_MAP_OVERRIDE);
type &= ~I915_MAP_OVERRIDE;
@@ -338,7 +511,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
err = ____i915_gem_object_get_pages(obj);
if (err)
- goto err_unlock;
+ return ERR_PTR(err);
smp_mb__before_atomic();
}
@@ -347,10 +520,38 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
}
GEM_BUG_ON(!i915_gem_object_has_pages(obj));
+ /*
+ * For discrete our CPU mappings needs to be consistent in order to
+ * function correctly on !x86. When mapping things through TTM, we use
+ * the same rules to determine the caching type.
+ *
+ * The caching rules, starting from DG1:
+ *
+ * - If the object can be placed in device local-memory, then the
+ * pages should be allocated and mapped as write-combined only.
+ *
+ * - Everything else is always allocated and mapped as write-back,
+ * with the guarantee that everything is also coherent with the
+ * GPU.
+ *
+ * Internal users of lmem are already expected to get this right, so no
+ * fudging needed there.
+ */
+ if (i915_gem_object_placement_possible(obj, INTEL_MEMORY_LOCAL)) {
+ if (type != I915_MAP_WC && !obj->mm.n_placements) {
+ ptr = ERR_PTR(-ENODEV);
+ goto err_unpin;
+ }
+
+ type = I915_MAP_WC;
+ } else if (IS_DGFX(to_i915(obj->base.dev))) {
+ type = I915_MAP_WB;
+ }
+
ptr = page_unpack_bits(obj->mm.mapping, &has_type);
if (ptr && has_type != type) {
if (pinned) {
- err = -EBUSY;
+ ptr = ERR_PTR(-EBUSY);
goto err_unpin;
}
@@ -360,24 +561,41 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
}
if (!ptr) {
- ptr = i915_gem_object_map(obj, type);
- if (!ptr) {
- err = -ENOMEM;
+ err = i915_gem_object_wait_moving_fence(obj, true);
+ if (err) {
+ ptr = ERR_PTR(err);
goto err_unpin;
}
+ if (GEM_WARN_ON(type == I915_MAP_WC && !pat_enabled()))
+ ptr = ERR_PTR(-ENODEV);
+ else if (i915_gem_object_has_struct_page(obj))
+ ptr = i915_gem_object_map_page(obj, type);
+ else
+ ptr = i915_gem_object_map_pfn(obj, type);
+ if (IS_ERR(ptr))
+ goto err_unpin;
+
obj->mm.mapping = page_pack_bits(ptr, type);
}
-out_unlock:
- mutex_unlock(&obj->mm.lock);
return ptr;
err_unpin:
atomic_dec(&obj->mm.pages_pin_count);
-err_unlock:
- ptr = ERR_PTR(err);
- goto out_unlock;
+ return ptr;
+}
+
+void *i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object *obj,
+ enum i915_map_type type)
+{
+ void *ret;
+
+ i915_gem_object_lock(obj, NULL);
+ ret = i915_gem_object_pin_map(obj, type);
+ i915_gem_object_unlock(obj);
+
+ return ret;
}
void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
@@ -408,18 +626,37 @@ void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
}
}
+void __i915_gem_object_release_map(struct drm_i915_gem_object *obj)
+{
+ GEM_BUG_ON(!obj->mm.mapping);
+
+ /*
+ * We allow removing the mapping from underneath pinned pages!
+ *
+ * Furthermore, since this is an unsafe operation reserved only
+ * for construction time manipulation, we ignore locking prudence.
+ */
+ unmap_object(obj, page_mask_bits(fetch_and_zero(&obj->mm.mapping)));
+
+ i915_gem_object_unpin_map(obj);
+}
+
struct scatterlist *
-i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
- unsigned int n,
- unsigned int *offset)
+__i915_gem_object_page_iter_get_sg(struct drm_i915_gem_object *obj,
+ struct i915_gem_object_page_iter *iter,
+ pgoff_t n,
+ unsigned int *offset)
+
{
- struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
- struct scatterlist *sg;
+ const bool dma = iter == &obj->mm.get_dma_page ||
+ iter == &obj->ttm.get_io_page;
unsigned int idx, count;
+ struct scatterlist *sg;
might_sleep();
GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
- GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+ if (!i915_gem_object_has_pinned_pages(obj))
+ assert_object_held(obj);
/* As we iterate forward through the sg, we record each entry in a
* radixtree for quick repeated (backwards) lookups. If we have seen
@@ -442,7 +679,7 @@ i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
sg = iter->sg_pos;
idx = iter->sg_idx;
- count = __sg_page_count(sg);
+ count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
while (idx + count <= n) {
void *entry;
@@ -470,7 +707,7 @@ i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
idx += count;
sg = ____sg_next(sg);
- count = __sg_page_count(sg);
+ count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
}
scan:
@@ -488,7 +725,7 @@ scan:
while (idx + count <= n) {
idx += count;
sg = ____sg_next(sg);
- count = __sg_page_count(sg);
+ count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
}
*offset = n - idx;
@@ -522,7 +759,7 @@ lookup:
}
struct page *
-i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
+__i915_gem_object_get_page(struct drm_i915_gem_object *obj, pgoff_t n)
{
struct scatterlist *sg;
unsigned int offset;
@@ -530,13 +767,12 @@ i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
sg = i915_gem_object_get_sg(obj, n, &offset);
- return nth_page(sg_page(sg), offset);
+ return sg_page(sg) + offset;
}
/* Like i915_gem_object_get_page(), but mark the returned page dirty */
struct page *
-i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
- unsigned int n)
+__i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, pgoff_t n)
{
struct page *page;
@@ -548,14 +784,13 @@ i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
}
dma_addr_t
-i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
- unsigned long n,
- unsigned int *len)
+__i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
+ pgoff_t n, unsigned int *len)
{
struct scatterlist *sg;
unsigned int offset;
- sg = i915_gem_object_get_sg(obj, n, &offset);
+ sg = i915_gem_object_get_sg_dma(obj, n, &offset);
if (len)
*len = sg_dma_len(sg) - (offset << PAGE_SHIFT);
@@ -564,8 +799,7 @@ i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
}
dma_addr_t
-i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
- unsigned long n)
+__i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, pgoff_t n)
{
return i915_gem_object_get_dma_address_len(obj, n, NULL);
}