diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c | 455 |
1 files changed, 283 insertions, 172 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index a57abc1a25fb..d2237ce9da70 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c @@ -25,12 +25,18 @@ * Alex Deucher * Jerome Glisse */ -#include <drm/drmP.h> + +#include <linux/pci.h> +#include <linux/vmalloc.h> + #include <drm/amdgpu_drm.h> #ifdef CONFIG_X86 #include <asm/set_memory.h> #endif #include "amdgpu.h" +#include "amdgpu_reset.h" +#include <drm/drm_drv.h> +#include <drm/ttm/ttm_tt.h> /* * GART @@ -55,144 +61,213 @@ /* * Common GART table functions. */ + /** - * amdgpu_gart_table_ram_alloc - allocate system ram for gart page table + * amdgpu_gart_dummy_page_init - init dummy page used by the driver * * @adev: amdgpu_device pointer * - * Allocate system memory for GART page table - * (r1xx-r3xx, non-pcie r4xx, rs400). These asics require the - * gart table to be in system memory. - * Returns 0 for success, -ENOMEM for failure. + * Allocate the dummy page used by the driver (all asics). + * This dummy page is used by the driver as a filler for gart entries + * when pages are taken out of the GART + * Returns 0 on sucess, -ENOMEM on failure. */ -int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev) +static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev) { - void *ptr; + struct page *dummy_page = ttm_glob.dummy_read_page; - ptr = pci_alloc_consistent(adev->pdev, adev->gart.table_size, - &adev->gart.table_addr); - if (ptr == NULL) { + if (adev->dummy_page_addr) + return 0; + adev->dummy_page_addr = dma_map_page_attrs(&adev->pdev->dev, dummy_page, 0, + PAGE_SIZE, DMA_BIDIRECTIONAL, + DMA_ATTR_SKIP_CPU_SYNC); + if (dma_mapping_error(&adev->pdev->dev, adev->dummy_page_addr)) { + dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n"); + adev->dummy_page_addr = 0; return -ENOMEM; } -#ifdef CONFIG_X86 - if (0) { - set_memory_uc((unsigned long)ptr, - adev->gart.table_size >> PAGE_SHIFT); - } -#endif - adev->gart.ptr = ptr; - memset((void *)adev->gart.ptr, 0, adev->gart.table_size); return 0; } /** - * amdgpu_gart_table_ram_free - free system ram for gart page table + * amdgpu_gart_dummy_page_fini - free dummy page used by the driver * * @adev: amdgpu_device pointer * - * Free system memory for GART page table - * (r1xx-r3xx, non-pcie r4xx, rs400). These asics require the - * gart table to be in system memory. + * Frees the dummy page used by the driver (all asics). */ -void amdgpu_gart_table_ram_free(struct amdgpu_device *adev) +void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev) { - if (adev->gart.ptr == NULL) { + if (!adev->dummy_page_addr) return; - } -#ifdef CONFIG_X86 - if (0) { - set_memory_wb((unsigned long)adev->gart.ptr, - adev->gart.table_size >> PAGE_SHIFT); - } -#endif - pci_free_consistent(adev->pdev, adev->gart.table_size, - (void *)adev->gart.ptr, - adev->gart.table_addr); - adev->gart.ptr = NULL; - adev->gart.table_addr = 0; + dma_unmap_page_attrs(&adev->pdev->dev, adev->dummy_page_addr, PAGE_SIZE, + DMA_BIDIRECTIONAL, + DMA_ATTR_SKIP_CPU_SYNC); + adev->dummy_page_addr = 0; } /** - * amdgpu_gart_table_vram_alloc - allocate vram for gart page table + * amdgpu_gart_table_ram_alloc - allocate system ram for gart page table * * @adev: amdgpu_device pointer * - * Allocate video memory for GART page table - * (pcie r4xx, r5xx+). These asics require the - * gart table to be in video memory. + * Allocate system memory for GART page table for ASICs that don't have + * dedicated VRAM. * Returns 0 for success, error for failure. */ -int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev) +int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev) { - int r; + unsigned int order = get_order(adev->gart.table_size); + gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO; + struct amdgpu_bo *bo = NULL; + struct sg_table *sg = NULL; + struct amdgpu_bo_param bp; + dma_addr_t dma_addr; + struct page *p; + unsigned long x; + int ret; - if (adev->gart.robj == NULL) { - r = amdgpu_bo_create(adev, adev->gart.table_size, - PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | - AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, - NULL, NULL, &adev->gart.robj); - if (r) { - return r; - } + if (adev->gart.bo != NULL) + return 0; + + p = alloc_pages(gfp_flags, order); + if (!p) + return -ENOMEM; + + /* assign pages to this device */ + for (x = 0; x < (1UL << order); x++) + p[x].mapping = adev->mman.bdev.dev_mapping; + + /* If the hardware does not support UTCL2 snooping of the CPU caches + * then set_memory_wc() could be used as a workaround to mark the pages + * as write combine memory. + */ + dma_addr = dma_map_page(&adev->pdev->dev, p, 0, adev->gart.table_size, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(&adev->pdev->dev, dma_addr)) { + dev_err(&adev->pdev->dev, "Failed to DMA MAP the GART BO page\n"); + __free_pages(p, order); + p = NULL; + return -EFAULT; + } + + dev_info(adev->dev, "%s dma_addr:%pad\n", __func__, &dma_addr); + /* Create SG table */ + sg = kmalloc(sizeof(*sg), GFP_KERNEL); + if (!sg) { + ret = -ENOMEM; + goto error; + } + ret = sg_alloc_table(sg, 1, GFP_KERNEL); + if (ret) + goto error; + + sg_dma_address(sg->sgl) = dma_addr; + sg->sgl->length = adev->gart.table_size; +#ifdef CONFIG_NEED_SG_DMA_LENGTH + sg->sgl->dma_length = adev->gart.table_size; +#endif + /* Create SG BO */ + memset(&bp, 0, sizeof(bp)); + bp.size = adev->gart.table_size; + bp.byte_align = PAGE_SIZE; + bp.domain = AMDGPU_GEM_DOMAIN_CPU; + bp.type = ttm_bo_type_sg; + bp.resv = NULL; + bp.bo_ptr_size = sizeof(struct amdgpu_bo); + bp.flags = 0; + ret = amdgpu_bo_create(adev, &bp, &bo); + if (ret) + goto error; + + bo->tbo.sg = sg; + bo->tbo.ttm->sg = sg; + bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; + bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT; + + ret = amdgpu_bo_reserve(bo, true); + if (ret) { + dev_err(adev->dev, "(%d) failed to reserve bo for GART system bo\n", ret); + goto error; } + + ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT); + WARN(ret, "Pinning the GART table failed"); + if (ret) + goto error_resv; + + adev->gart.bo = bo; + adev->gart.ptr = page_to_virt(p); + /* Make GART table accessible in VMID0 */ + ret = amdgpu_ttm_alloc_gart(&adev->gart.bo->tbo); + if (ret) + amdgpu_gart_table_ram_free(adev); + amdgpu_bo_unreserve(bo); + return 0; + +error_resv: + amdgpu_bo_unreserve(bo); +error: + amdgpu_bo_unref(&bo); + if (sg) { + sg_free_table(sg); + kfree(sg); + } + __free_pages(p, order); + return ret; } /** - * amdgpu_gart_table_vram_pin - pin gart page table in vram + * amdgpu_gart_table_ram_free - free gart page table system ram * * @adev: amdgpu_device pointer * - * Pin the GART page table in vram so it will not be moved - * by the memory manager (pcie r4xx, r5xx+). These asics require the - * gart table to be in video memory. - * Returns 0 for success, error for failure. + * Free the system memory used for the GART page tableon ASICs that don't + * have dedicated VRAM. */ -int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev) +void amdgpu_gart_table_ram_free(struct amdgpu_device *adev) { - uint64_t gpu_addr; - int r; + unsigned int order = get_order(adev->gart.table_size); + struct sg_table *sg = adev->gart.bo->tbo.sg; + struct page *p; + unsigned long x; + int ret; - r = amdgpu_bo_reserve(adev->gart.robj, false); - if (unlikely(r != 0)) - return r; - r = amdgpu_bo_pin(adev->gart.robj, - AMDGPU_GEM_DOMAIN_VRAM, &gpu_addr); - if (r) { - amdgpu_bo_unreserve(adev->gart.robj); - return r; + ret = amdgpu_bo_reserve(adev->gart.bo, false); + if (!ret) { + amdgpu_bo_unpin(adev->gart.bo); + amdgpu_bo_unreserve(adev->gart.bo); } - r = amdgpu_bo_kmap(adev->gart.robj, &adev->gart.ptr); - if (r) - amdgpu_bo_unpin(adev->gart.robj); - amdgpu_bo_unreserve(adev->gart.robj); - adev->gart.table_addr = gpu_addr; - return r; + amdgpu_bo_unref(&adev->gart.bo); + sg_free_table(sg); + kfree(sg); + p = virt_to_page(adev->gart.ptr); + for (x = 0; x < (1UL << order); x++) + p[x].mapping = NULL; + __free_pages(p, order); + + adev->gart.ptr = NULL; } /** - * amdgpu_gart_table_vram_unpin - unpin gart page table in vram + * amdgpu_gart_table_vram_alloc - allocate vram for gart page table * * @adev: amdgpu_device pointer * - * Unpin the GART page table in vram (pcie r4xx, r5xx+). - * These asics require the gart table to be in video memory. + * Allocate video memory for GART page table + * (pcie r4xx, r5xx+). These asics require the + * gart table to be in video memory. + * Returns 0 for success, error for failure. */ -void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev) +int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev) { - int r; + if (adev->gart.bo != NULL) + return 0; - if (adev->gart.robj == NULL) { - return; - } - r = amdgpu_bo_reserve(adev->gart.robj, true); - if (likely(r == 0)) { - amdgpu_bo_kunmap(adev->gart.robj); - amdgpu_bo_unpin(adev->gart.robj); - amdgpu_bo_unreserve(adev->gart.robj); - adev->gart.ptr = NULL; - } + return amdgpu_bo_create_kernel(adev, adev->gart.table_size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, &adev->gart.bo, + NULL, (void *)&adev->gart.ptr); } /** @@ -206,10 +281,7 @@ void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev) */ void amdgpu_gart_table_vram_free(struct amdgpu_device *adev) { - if (adev->gart.robj == NULL) { - return; - } - amdgpu_bo_unref(&adev->gart.robj); + amdgpu_bo_free_kernel(&adev->gart.bo, NULL, (void *)&adev->gart.ptr); } /* @@ -226,87 +298,156 @@ void amdgpu_gart_table_vram_free(struct amdgpu_device *adev) * replaces them with the dummy page (all asics). * Returns 0 for success, -EINVAL for failure. */ -int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, +void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, int pages) { unsigned t; - unsigned p; int i, j; u64 page_base; /* Starting from VEGA10, system bit must be 0 to mean invalid. */ uint64_t flags = 0; + int idx; - if (!adev->gart.ready) { - WARN(1, "trying to unbind memory from uninitialized GART !\n"); - return -EINVAL; - } + if (!adev->gart.ptr) + return; + + if (!drm_dev_enter(adev_to_drm(adev), &idx)) + return; t = offset / AMDGPU_GPU_PAGE_SIZE; - p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); - for (i = 0; i < pages; i++, p++) { -#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS - adev->gart.pages[p] = NULL; -#endif - page_base = adev->dummy_page.addr; + for (i = 0; i < pages; i++) { + page_base = adev->dummy_page_addr; if (!adev->gart.ptr) continue; - for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) { - amdgpu_gart_set_pte_pde(adev, adev->gart.ptr, - t, page_base, flags); + for (j = 0; j < AMDGPU_GPU_PAGES_IN_CPU_PAGE; j++, t++) { + amdgpu_gmc_set_pte_pde(adev, adev->gart.ptr, + t, page_base, flags); page_base += AMDGPU_GPU_PAGE_SIZE; } } - mb(); - amdgpu_gart_flush_gpu_tlb(adev, 0); - return 0; + amdgpu_gart_invalidate_tlb(adev); + + drm_dev_exit(idx); } /** - * amdgpu_gart_bind - bind pages into the gart page table + * amdgpu_gart_map - map dma_addresses into GART entries * * @adev: amdgpu_device pointer * @offset: offset into the GPU's gart aperture * @pages: number of pages to bind - * @pagelist: pages to bind * @dma_addr: DMA addresses of pages + * @flags: page table entry flags + * @dst: CPU address of the gart table * - * Binds the requested pages to the gart page table - * (all asics). + * Map the dma_addresses into GART entries (all asics). * Returns 0 for success, -EINVAL for failure. */ -int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset, - int pages, struct page **pagelist, dma_addr_t *dma_addr, - uint64_t flags) +void amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset, + int pages, dma_addr_t *dma_addr, uint64_t flags, + void *dst) { - unsigned t; - unsigned p; uint64_t page_base; - int i, j; + unsigned i, j, t; + int idx; - if (!adev->gart.ready) { - WARN(1, "trying to bind memory to uninitialized GART !\n"); - return -EINVAL; - } + if (!drm_dev_enter(adev_to_drm(adev), &idx)) + return; t = offset / AMDGPU_GPU_PAGE_SIZE; - p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); - for (i = 0; i < pages; i++, p++) { -#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS - adev->gart.pages[p] = pagelist[i]; -#endif - if (adev->gart.ptr) { - page_base = dma_addr[i]; - for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) { - amdgpu_gart_set_pte_pde(adev, adev->gart.ptr, t, page_base, flags); - page_base += AMDGPU_GPU_PAGE_SIZE; - } + for (i = 0; i < pages; i++) { + page_base = dma_addr[i]; + for (j = 0; j < AMDGPU_GPU_PAGES_IN_CPU_PAGE; j++, t++) { + amdgpu_gmc_set_pte_pde(adev, dst, t, page_base, flags); + page_base += AMDGPU_GPU_PAGE_SIZE; } } + drm_dev_exit(idx); +} + +/** + * amdgpu_gart_map_vram_range - map VRAM pages into the GART page table + * + * @adev: amdgpu_device pointer + * @pa: physical address of the first page to be mapped + * @start_page: first page to map in the GART aperture + * @num_pages: number of pages to be mapped + * @flags: page table entry flags + * @dst: CPU address of the GART table + * + * Binds a BO that is allocated in VRAM to the GART page table + * (all ASICs). + * + * Useful when a kernel BO is located in VRAM but + * needs to be accessed from the GART address space. + */ +void amdgpu_gart_map_vram_range(struct amdgpu_device *adev, uint64_t pa, + uint64_t start_page, uint64_t num_pages, + uint64_t flags, void *dst) +{ + u32 i, idx; + + /* The SYSTEM flag indicates the pages aren't in VRAM. */ + WARN_ON_ONCE(flags & AMDGPU_PTE_SYSTEM); + + if (!drm_dev_enter(adev_to_drm(adev), &idx)) + return; + + for (i = 0; i < num_pages; ++i) { + amdgpu_gmc_set_pte_pde(adev, adev->gart.ptr, + start_page + i, pa + AMDGPU_GPU_PAGE_SIZE * i, flags); + } + + drm_dev_exit(idx); +} + +/** + * amdgpu_gart_bind - bind pages into the gart page table + * + * @adev: amdgpu_device pointer + * @offset: offset into the GPU's gart aperture + * @pages: number of pages to bind + * @dma_addr: DMA addresses of pages + * @flags: page table entry flags + * + * Binds the requested pages to the gart page table + * (all asics). + * Returns 0 for success, -EINVAL for failure. + */ +void amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset, + int pages, dma_addr_t *dma_addr, + uint64_t flags) +{ + if (!adev->gart.ptr) + return; + + amdgpu_gart_map(adev, offset, pages, dma_addr, flags, adev->gart.ptr); +} + +/** + * amdgpu_gart_invalidate_tlb - invalidate gart TLB + * + * @adev: amdgpu device driver pointer + * + * Invalidate gart TLB which can be use as a way to flush gart changes + * + */ +void amdgpu_gart_invalidate_tlb(struct amdgpu_device *adev) +{ + int i; + + if (!adev->gart.ptr) + return; + mb(); - amdgpu_gart_flush_gpu_tlb(adev, 0); - return 0; + if (down_read_trylock(&adev->reset_domain->sem)) { + amdgpu_device_flush_hdp(adev, NULL); + up_read(&adev->reset_domain->sem); + } + for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) + amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0); } /** @@ -321,7 +462,7 @@ int amdgpu_gart_init(struct amdgpu_device *adev) { int r; - if (adev->dummy_page.page) + if (adev->dummy_page_addr) return 0; /* We need PAGE_SIZE >= AMDGPU_GPU_PAGE_SIZE */ @@ -329,44 +470,14 @@ int amdgpu_gart_init(struct amdgpu_device *adev) DRM_ERROR("Page size is smaller than GPU page size!\n"); return -EINVAL; } - r = amdgpu_dummy_page_init(adev); + r = amdgpu_gart_dummy_page_init(adev); if (r) return r; /* Compute table size */ - adev->gart.num_cpu_pages = adev->mc.gtt_size / PAGE_SIZE; - adev->gart.num_gpu_pages = adev->mc.gtt_size / AMDGPU_GPU_PAGE_SIZE; + adev->gart.num_cpu_pages = adev->gmc.gart_size / PAGE_SIZE; + adev->gart.num_gpu_pages = adev->gmc.gart_size / AMDGPU_GPU_PAGE_SIZE; DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n", adev->gart.num_cpu_pages, adev->gart.num_gpu_pages); -#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS - /* Allocate pages table */ - adev->gart.pages = vzalloc(sizeof(void *) * adev->gart.num_cpu_pages); - if (adev->gart.pages == NULL) { - amdgpu_gart_fini(adev); - return -ENOMEM; - } -#endif - return 0; } - -/** - * amdgpu_gart_fini - tear down the driver info for managing the gart - * - * @adev: amdgpu_device pointer - * - * Tear down the gart driver info and free the dummy page (all asics). - */ -void amdgpu_gart_fini(struct amdgpu_device *adev) -{ - if (adev->gart.ready) { - /* unbind pages */ - amdgpu_gart_unbind(adev, 0, adev->gart.num_cpu_pages); - } - adev->gart.ready = false; -#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS - vfree(adev->gart.pages); - adev->gart.pages = NULL; -#endif - amdgpu_dummy_page_fini(adev); -} |
