diff options
Diffstat (limited to 'drivers/gpu/drm/i915/gem')
-rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_domain.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_mman.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_object.h | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_pages.c | 142 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_shmem.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_wait.c | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c | 1 |
8 files changed, 152 insertions, 11 deletions
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c index 7a0cc51923b3..ef3b14ae2e0d 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c @@ -3,7 +3,6 @@ * Copyright © 2014-2016 Intel Corporation */ -#include "display/intel_display.h" #include "gt/intel_gt.h" #include "i915_drv.h" diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index f6d37dff320d..75f5b0e871ef 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -5,7 +5,6 @@ #include <linux/anon_inodes.h> #include <linux/mman.h> -#include <linux/pfn_t.h> #include <linux/sizes.h> #include <drm/drm_cache.h> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h index c34f41605b46..565f8fa330db 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h @@ -16,7 +16,9 @@ #include "i915_gem_ww.h" #include "i915_vma_types.h" +struct drm_scanout_buffer; enum intel_region_id; +struct intel_framebuffer; #define obj_to_i915(obj__) to_i915((obj__)->base.dev) @@ -691,6 +693,10 @@ i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj); int i915_gem_object_truncate(struct drm_i915_gem_object *obj); +struct intel_framebuffer *i915_gem_object_alloc_framebuffer(void); +int i915_gem_object_panic_setup(struct drm_scanout_buffer *sb); +void i915_gem_object_panic_finish(struct intel_framebuffer *fb); + /** * i915_gem_object_pin_map - return a contiguous mapping of the entire object * @obj: the object to map into kernel address space diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h b/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h index 9fbf14867a2a..b6dc3d1b9bb1 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h @@ -77,7 +77,7 @@ i915_gem_object_get_frontbuffer(const struct drm_i915_gem_object *obj) * Set object's frontbuffer pointer. If frontbuffer is already set for the * object keep it and return it's pointer to the caller. Please note that RCU * mechanism is used to handle e.g. ongoing removal of frontbuffer pointer. This - * function is protected by i915->display.fb_tracking.lock + * function is protected by i915->display->fb_tracking.lock * * Return: pointer to frontbuffer which was set. */ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index 7f83f8bdc8fb..c16a57160b26 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -4,8 +4,11 @@ */ #include <drm/drm_cache.h> +#include <drm/drm_panic.h> #include <linux/vmalloc.h> +#include "display/intel_fb.h" +#include "display/intel_display_types.h" #include "gt/intel_gt.h" #include "gt/intel_tlb.h" @@ -354,6 +357,145 @@ static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj, return vaddr ?: ERR_PTR(-ENOMEM); } +struct i915_panic_data { + struct page **pages; + int page; + void *vaddr; +}; + +struct i915_framebuffer { + struct intel_framebuffer base; + struct i915_panic_data panic; +}; + +static inline struct i915_panic_data *to_i915_panic_data(struct intel_framebuffer *fb) +{ + return &container_of_const(fb, struct i915_framebuffer, base)->panic; +} + +static void i915_panic_kunmap(struct i915_panic_data *panic) +{ + if (panic->vaddr) { + drm_clflush_virt_range(panic->vaddr, PAGE_SIZE); + kunmap_local(panic->vaddr); + panic->vaddr = NULL; + } +} + +static struct page **i915_gem_object_panic_pages(struct drm_i915_gem_object *obj) +{ + unsigned long n_pages = obj->base.size >> PAGE_SHIFT, i; + struct page *page; + struct page **pages; + struct sgt_iter iter; + + /* For a 3840x2160 32 bits Framebuffer, this should require ~64K */ + pages = kmalloc_array(n_pages, sizeof(*pages), GFP_ATOMIC); + if (!pages) + return NULL; + + i = 0; + for_each_sgt_page(page, iter, obj->mm.pages) + pages[i++] = page; + return pages; +} + +static void i915_gem_object_panic_map_set_pixel(struct drm_scanout_buffer *sb, unsigned int x, + unsigned int y, u32 color) +{ + struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private; + unsigned int offset = fb->panic_tiling(sb->width, x, y); + + iosys_map_wr(&sb->map[0], offset, u32, color); +} + +/* + * The scanout buffer pages are not mapped, so for each pixel, + * use kmap_local_page_try_from_panic() to map the page, and write the pixel. + * Try to keep the map from the previous pixel, to avoid too much map/unmap. + */ +static void i915_gem_object_panic_page_set_pixel(struct drm_scanout_buffer *sb, unsigned int x, + unsigned int y, u32 color) +{ + unsigned int new_page; + unsigned int offset; + struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private; + struct i915_panic_data *panic = to_i915_panic_data(fb); + + if (fb->panic_tiling) + offset = fb->panic_tiling(sb->width, x, y); + else + offset = y * sb->pitch[0] + x * sb->format->cpp[0]; + + new_page = offset >> PAGE_SHIFT; + offset = offset % PAGE_SIZE; + if (new_page != panic->page) { + i915_panic_kunmap(panic); + panic->page = new_page; + panic->vaddr = + kmap_local_page_try_from_panic(panic->pages[panic->page]); + } + if (panic->vaddr) { + u32 *pix = panic->vaddr + offset; + *pix = color; + } +} + +struct intel_framebuffer *i915_gem_object_alloc_framebuffer(void) +{ + struct i915_framebuffer *i915_fb; + + i915_fb = kzalloc(sizeof(*i915_fb), GFP_KERNEL); + if (i915_fb) + return &i915_fb->base; + return NULL; +} + +/* + * Setup the gem framebuffer for drm_panic access. + * Use current vaddr if it exists, or setup a list of pages. + * pfn is not supported yet. + */ +int i915_gem_object_panic_setup(struct drm_scanout_buffer *sb) +{ + enum i915_map_type has_type; + struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private; + struct i915_panic_data *panic = to_i915_panic_data(fb); + struct drm_i915_gem_object *obj = to_intel_bo(intel_fb_bo(&fb->base)); + void *ptr; + + ptr = page_unpack_bits(obj->mm.mapping, &has_type); + if (ptr) { + if (i915_gem_object_has_iomem(obj)) + iosys_map_set_vaddr_iomem(&sb->map[0], (void __iomem *)ptr); + else + iosys_map_set_vaddr(&sb->map[0], ptr); + + if (fb->panic_tiling) + sb->set_pixel = i915_gem_object_panic_map_set_pixel; + return 0; + } + if (i915_gem_object_has_struct_page(obj)) { + panic->pages = i915_gem_object_panic_pages(obj); + if (!panic->pages) + return -ENOMEM; + panic->page = -1; + sb->set_pixel = i915_gem_object_panic_page_set_pixel; + return 0; + } + return -EOPNOTSUPP; +} + +void i915_gem_object_panic_finish(struct intel_framebuffer *fb) +{ + struct i915_panic_data *panic = to_i915_panic_data(fb); + + i915_panic_kunmap(panic); + panic->page = -1; + kfree(panic->pages); + panic->pages = NULL; +} + /* get, pin, and map the pages of the object into kernel space */ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, enum i915_map_type type) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c index 9cbb0f68a5bb..e3d188455f67 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c @@ -303,7 +303,6 @@ void __shmem_writeback(size_t size, struct address_space *mapping) .nr_to_write = SWAP_CLUSTER_MAX, .range_start = 0, .range_end = LLONG_MAX, - .for_reclaim = 1, }; struct folio *folio = NULL; int error = 0; @@ -318,7 +317,7 @@ void __shmem_writeback(size_t size, struct address_space *mapping) if (folio_mapped(folio)) folio_redirty_for_writepage(&wbc, folio); else - error = shmem_writeout(folio, &wbc); + error = shmem_writeout(folio, NULL, NULL); } } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c index 7127e90c1a8f..991666fd9f85 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c @@ -106,11 +106,6 @@ static void fence_set_priority(struct dma_fence *fence, rcu_read_unlock(); } -static inline bool __dma_fence_is_chain(const struct dma_fence *fence) -{ - return fence->ops == &dma_fence_chain_ops; -} - void i915_gem_fence_wait_priority(struct dma_fence *fence, const struct i915_sched_attr *attr) { @@ -126,7 +121,7 @@ void i915_gem_fence_wait_priority(struct dma_fence *fence, for (i = 0; i < array->num_fences; i++) fence_set_priority(array->fences[i], attr); - } else if (__dma_fence_is_chain(fence)) { + } else if (dma_fence_is_chain(fence)) { struct dma_fence *iter; /* The chain is ordered; if we boost the last, we boost all */ diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c index bac15196b4d2..86d9d2fcb6a6 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c @@ -5,6 +5,7 @@ #include "i915_selftest.h" +#include "display/intel_display_core.h" #include "gt/intel_context.h" #include "gt/intel_engine_regs.h" #include "gt/intel_engine_user.h" |