diff options
Diffstat (limited to 'drivers/gpu/drm/drm_cache.c')
| -rw-r--r-- | drivers/gpu/drm/drm_cache.c | 217 |
1 files changed, 197 insertions, 20 deletions
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c index 3bd76e918b5d..ea1d2d5d2c66 100644 --- a/drivers/gpu/drm/drm_cache.c +++ b/drivers/gpu/drm/drm_cache.c @@ -27,12 +27,18 @@ /* * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com> */ - +#include <linux/cc_platform.h> #include <linux/export.h> #include <linux/highmem.h> +#include <linux/ioport.h> +#include <linux/iosys-map.h> +#include <xen/xen.h> #include <drm/drm_cache.h> +/* A small bounce buffer that fits on the stack. */ +#define MEMCPY_BOUNCE_SIZE 128 + #if defined(CONFIG_X86) #include <asm/smp.h> @@ -62,10 +68,10 @@ static void drm_cache_flush_clflush(struct page *pages[], { unsigned long i; - mb(); + mb(); /*Full memory barrier used before so that CLFLUSH is ordered*/ for (i = 0; i < num_pages; i++) drm_clflush_page(*pages++); - mb(); + mb(); /*Also used after CLFLUSH so that all cache is flushed*/ } #endif @@ -87,11 +93,11 @@ drm_clflush_pages(struct page *pages[], unsigned long num_pages) return; } - if (wbinvd_on_all_cpus()) - pr_err("Timed out waiting for cache flush\n"); + wbinvd_on_all_cpus(); #elif defined(__powerpc__) unsigned long i; + for (i = 0; i < num_pages; i++) { struct page *page = pages[i]; void *page_virtual; @@ -105,8 +111,7 @@ drm_clflush_pages(struct page *pages[], unsigned long num_pages) kunmap_atomic(page_virtual); } #else - pr_err("Architecture has no drm_cache.c support\n"); - WARN_ON_ONCE(1); + WARN_ONCE(1, "Architecture has no drm_cache.c support\n"); #endif } EXPORT_SYMBOL(drm_clflush_pages); @@ -125,19 +130,17 @@ drm_clflush_sg(struct sg_table *st) if (static_cpu_has(X86_FEATURE_CLFLUSH)) { struct sg_page_iter sg_iter; - mb(); - for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) + mb(); /*CLFLUSH is ordered only by using memory barriers*/ + for_each_sgtable_page(st, &sg_iter, 0) drm_clflush_page(sg_page_iter_page(&sg_iter)); - mb(); + mb(); /*Make sure that all cache line entry is flushed*/ return; } - if (wbinvd_on_all_cpus()) - pr_err("Timed out waiting for cache flush\n"); + wbinvd_on_all_cpus(); #else - pr_err("Architecture has no drm_cache.c support\n"); - WARN_ON_ONCE(1); + WARN_ONCE(1, "Architecture has no drm_cache.c support\n"); #endif } EXPORT_SYMBOL(drm_clflush_sg); @@ -157,20 +160,194 @@ drm_clflush_virt_range(void *addr, unsigned long length) if (static_cpu_has(X86_FEATURE_CLFLUSH)) { const int size = boot_cpu_data.x86_clflush_size; void *end = addr + length; + addr = (void *)(((unsigned long)addr) & -size); - mb(); + mb(); /*CLFLUSH is only ordered with a full memory barrier*/ for (; addr < end; addr += size) clflushopt(addr); clflushopt(end - 1); /* force serialisation */ - mb(); + mb(); /*Ensure that every data cache line entry is flushed*/ return; } - if (wbinvd_on_all_cpus()) - pr_err("Timed out waiting for cache flush\n"); + wbinvd_on_all_cpus(); #else - pr_err("Architecture has no drm_cache.c support\n"); - WARN_ON_ONCE(1); + WARN_ONCE(1, "Architecture has no drm_cache.c support\n"); #endif } EXPORT_SYMBOL(drm_clflush_virt_range); + +bool drm_need_swiotlb(int dma_bits) +{ + struct resource *tmp; + resource_size_t max_iomem = 0; + + /* + * Xen paravirtual hosts require swiotlb regardless of requested dma + * transfer size. + * + * NOTE: Really, what it requires is use of the dma_alloc_coherent + * allocator used in ttm_dma_populate() instead of + * ttm_populate_and_map_pages(), which bounce buffers so much in + * Xen it leads to swiotlb buffer exhaustion. + */ + if (xen_pv_domain()) + return true; + + /* + * Enforce dma_alloc_coherent when memory encryption is active as well + * for the same reasons as for Xen paravirtual hosts. + */ + if (cc_platform_has(CC_ATTR_MEM_ENCRYPT)) + return true; + + for (tmp = iomem_resource.child; tmp; tmp = tmp->sibling) + max_iomem = max(max_iomem, tmp->end); + + return max_iomem > ((u64)1 << dma_bits); +} +EXPORT_SYMBOL(drm_need_swiotlb); + +static void memcpy_fallback(struct iosys_map *dst, + const struct iosys_map *src, + unsigned long len) +{ + if (!dst->is_iomem && !src->is_iomem) { + memcpy(dst->vaddr, src->vaddr, len); + } else if (!src->is_iomem) { + iosys_map_memcpy_to(dst, 0, src->vaddr, len); + } else if (!dst->is_iomem) { + memcpy_fromio(dst->vaddr, src->vaddr_iomem, len); + } else { + /* + * Bounce size is not performance tuned, but using a + * bounce buffer like this is significantly faster than + * resorting to ioreadxx() + iowritexx(). + */ + char bounce[MEMCPY_BOUNCE_SIZE]; + void __iomem *_src = src->vaddr_iomem; + void __iomem *_dst = dst->vaddr_iomem; + + while (len >= MEMCPY_BOUNCE_SIZE) { + memcpy_fromio(bounce, _src, MEMCPY_BOUNCE_SIZE); + memcpy_toio(_dst, bounce, MEMCPY_BOUNCE_SIZE); + _src += MEMCPY_BOUNCE_SIZE; + _dst += MEMCPY_BOUNCE_SIZE; + len -= MEMCPY_BOUNCE_SIZE; + } + if (len) { + memcpy_fromio(bounce, _src, MEMCPY_BOUNCE_SIZE); + memcpy_toio(_dst, bounce, MEMCPY_BOUNCE_SIZE); + } + } +} + +#ifdef CONFIG_X86 + +static DEFINE_STATIC_KEY_FALSE(has_movntdqa); + +static void __memcpy_ntdqa(void *dst, const void *src, unsigned long len) +{ + kernel_fpu_begin(); + + while (len >= 4) { + asm("movntdqa (%0), %%xmm0\n" + "movntdqa 16(%0), %%xmm1\n" + "movntdqa 32(%0), %%xmm2\n" + "movntdqa 48(%0), %%xmm3\n" + "movaps %%xmm0, (%1)\n" + "movaps %%xmm1, 16(%1)\n" + "movaps %%xmm2, 32(%1)\n" + "movaps %%xmm3, 48(%1)\n" + :: "r" (src), "r" (dst) : "memory"); + src += 64; + dst += 64; + len -= 4; + } + while (len--) { + asm("movntdqa (%0), %%xmm0\n" + "movaps %%xmm0, (%1)\n" + :: "r" (src), "r" (dst) : "memory"); + src += 16; + dst += 16; + } + + kernel_fpu_end(); +} + +/* + * __drm_memcpy_from_wc copies @len bytes from @src to @dst using + * non-temporal instructions where available. Note that all arguments + * (@src, @dst) must be aligned to 16 bytes and @len must be a multiple + * of 16. + */ +static void __drm_memcpy_from_wc(void *dst, const void *src, unsigned long len) +{ + if (unlikely(((unsigned long)dst | (unsigned long)src | len) & 15)) + memcpy(dst, src, len); + else if (likely(len)) + __memcpy_ntdqa(dst, src, len >> 4); +} + +/** + * drm_memcpy_from_wc - Perform the fastest available memcpy from a source + * that may be WC. + * @dst: The destination pointer + * @src: The source pointer + * @len: The size of the area o transfer in bytes + * + * Tries an arch optimized memcpy for prefetching reading out of a WC region, + * and if no such beast is available, falls back to a normal memcpy. + */ +void drm_memcpy_from_wc(struct iosys_map *dst, + const struct iosys_map *src, + unsigned long len) +{ + if (WARN_ON(in_interrupt())) { + memcpy_fallback(dst, src, len); + return; + } + + if (static_branch_likely(&has_movntdqa)) { + __drm_memcpy_from_wc(dst->is_iomem ? + (void __force *)dst->vaddr_iomem : + dst->vaddr, + src->is_iomem ? + (void const __force *)src->vaddr_iomem : + src->vaddr, + len); + return; + } + + memcpy_fallback(dst, src, len); +} +EXPORT_SYMBOL(drm_memcpy_from_wc); + +/* + * drm_memcpy_init_early - One time initialization of the WC memcpy code + */ +void drm_memcpy_init_early(void) +{ + /* + * Some hypervisors (e.g. KVM) don't support VEX-prefix instructions + * emulation. So don't enable movntdqa in hypervisor guest. + */ + if (static_cpu_has(X86_FEATURE_XMM4_1) && + !boot_cpu_has(X86_FEATURE_HYPERVISOR)) + static_branch_enable(&has_movntdqa); +} +#else +void drm_memcpy_from_wc(struct iosys_map *dst, + const struct iosys_map *src, + unsigned long len) +{ + WARN_ON(in_interrupt()); + + memcpy_fallback(dst, src, len); +} +EXPORT_SYMBOL(drm_memcpy_from_wc); + +void drm_memcpy_init_early(void) +{ +} +#endif /* CONFIG_X86 */ |
