diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_migrate.c')
| -rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 676 |
1 files changed, 418 insertions, 258 deletions
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index dab290a4d19d..af53e796ea1b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -20,36 +20,40 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ - #include <linux/types.h> -#include <linux/hmm.h> #include <linux/dma-direction.h> #include <linux/dma-mapping.h> +#include <linux/migrate.h> #include "amdgpu_sync.h" #include "amdgpu_object.h" #include "amdgpu_vm.h" -#include "amdgpu_mn.h" #include "amdgpu_res_cursor.h" #include "kfd_priv.h" #include "kfd_svm.h" #include "kfd_migrate.h" +#include "kfd_smi_events.h" + +#ifdef dev_fmt +#undef dev_fmt +#endif +#define dev_fmt(fmt) "kfd_migrate: " fmt -static uint64_t -svm_migrate_direct_mapping_addr(struct amdgpu_device *adev, uint64_t addr) +static u64 +svm_migrate_direct_mapping_addr(struct amdgpu_device *adev, u64 addr) { return addr + amdgpu_ttm_domain_start(adev, TTM_PL_VRAM); } static int -svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages, - dma_addr_t *addr, uint64_t *gart_addr, uint64_t flags) +svm_migrate_gart_map(struct amdgpu_ring *ring, u64 npages, + dma_addr_t *addr, u64 *gart_addr, u64 flags) { struct amdgpu_device *adev = ring->adev; struct amdgpu_job *job; unsigned int num_dw, num_bytes; struct dma_fence *fence; - uint64_t src_addr, dst_addr; - uint64_t pte_flags; + u64 src_addr, dst_addr; + u64 pte_flags; void *cpu_addr; int r; @@ -59,8 +63,12 @@ svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages, num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8); num_bytes = npages * 8; - r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, - AMDGPU_IB_POOL_DELAYED, &job); + r = amdgpu_job_alloc_with_ib(adev, &adev->mman.high_pr, + AMDGPU_FENCE_OWNER_UNDEFINED, + num_dw * 4 + num_bytes, + AMDGPU_IB_POOL_DELAYED, + &job, + AMDGPU_KERNEL_JOB_ID_KFD_GART_MAP); if (r) return r; @@ -69,7 +77,7 @@ svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages, dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo); amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, - dst_addr, num_bytes, false); + dst_addr, num_bytes, 0); amdgpu_ring_pad_ib(ring, &job->ibs[0]); WARN_ON(job->ibs[0].length_dw > num_dw); @@ -82,30 +90,19 @@ svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages, cpu_addr = &job->ibs[0].ptr[num_dw]; - r = amdgpu_gart_map(adev, 0, npages, addr, pte_flags, cpu_addr); - if (r) - goto error_free; - - r = amdgpu_job_submit(job, &adev->mman.entity, - AMDGPU_FENCE_OWNER_UNDEFINED, &fence); - if (r) - goto error_free; - + amdgpu_gart_map(adev, 0, npages, addr, pte_flags, cpu_addr); + fence = amdgpu_job_submit(job); dma_fence_put(fence); return r; - -error_free: - amdgpu_job_free(job); - return r; } /** * svm_migrate_copy_memory_gart - sdma copy data between ram and vram * * @adev: amdgpu device the sdma ring running - * @src: source page address array - * @dst: destination page address array + * @sys: system DMA pointer to be copied + * @vram: vram destination DMA pointer * @npages: number of pages to copy * @direction: enum MIGRATION_COPY_DIR * @mfence: output, sdma fence to signal after sdma is done @@ -125,15 +122,15 @@ error_free: static int svm_migrate_copy_memory_gart(struct amdgpu_device *adev, dma_addr_t *sys, - uint64_t *vram, uint64_t npages, + u64 *vram, u64 npages, enum MIGRATION_COPY_DIR direction, struct dma_fence **mfence) { - const uint64_t GTT_MAX_PAGES = AMDGPU_GTT_MAX_TRANSFER_SIZE; + const u64 GTT_MAX_PAGES = AMDGPU_GTT_MAX_TRANSFER_SIZE; struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; - uint64_t gart_s, gart_d; + u64 gart_s, gart_d; struct dma_fence *next; - uint64_t size; + u64 size; int r; mutex_lock(&adev->mman.gtt_window_lock); @@ -151,14 +148,14 @@ svm_migrate_copy_memory_gart(struct amdgpu_device *adev, dma_addr_t *sys, gart_d = svm_migrate_direct_mapping_addr(adev, *vram); } if (r) { - pr_debug("failed %d to create gart mapping\n", r); + dev_err(adev->dev, "fail %d create gart mapping\n", r); goto out_unlock; } r = amdgpu_copy_buffer(ring, gart_s, gart_d, size * PAGE_SIZE, - NULL, &next, false, true, false); + NULL, &next, false, true, 0); if (r) { - pr_debug("failed %d to copy memory\n", r); + dev_err(adev->dev, "fail %d to copy memory\n", r); goto out_unlock; } @@ -209,7 +206,7 @@ svm_migrate_copy_done(struct amdgpu_device *adev, struct dma_fence *mfence) unsigned long svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr) { - return (addr + adev->kfd.dev->pgmap.range.start) >> PAGE_SHIFT; + return (addr + adev->kfd.pgmap.range.start) >> PAGE_SHIFT; } static void @@ -220,8 +217,7 @@ svm_migrate_get_vram_page(struct svm_range *prange, unsigned long pfn) page = pfn_to_page(pfn); svm_range_bo_ref(prange->svm_bo); page->zone_device_data = prange->svm_bo; - get_page(page); - lock_page(page); + zone_device_page_init(page, 0); } static void @@ -240,7 +236,7 @@ svm_migrate_addr(struct amdgpu_device *adev, struct page *page) unsigned long addr; addr = page_to_pfn(page) << PAGE_SHIFT; - return (addr - adev->kfd.dev->pgmap.range.start); + return (addr - adev->kfd.pgmap.range.start); } static struct page * @@ -264,48 +260,60 @@ static void svm_migrate_put_sys_page(unsigned long addr) put_page(page); } +static unsigned long svm_migrate_successful_pages(struct migrate_vma *migrate) +{ + unsigned long mpages = 0; + unsigned long i; + + for (i = 0; i < migrate->npages; i++) { + if (migrate->dst[i] & MIGRATE_PFN_VALID && + migrate->src[i] & MIGRATE_PFN_MIGRATE) + mpages++; + } + return mpages; +} + static int -svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange, +svm_migrate_copy_to_vram(struct kfd_node *node, struct svm_range *prange, struct migrate_vma *migrate, struct dma_fence **mfence, - dma_addr_t *scratch) + dma_addr_t *scratch, u64 ttm_res_offset) { - uint64_t npages = migrate->cpages; + u64 npages = migrate->npages; + struct amdgpu_device *adev = node->adev; struct device *dev = adev->dev; struct amdgpu_res_cursor cursor; + u64 mpages = 0; dma_addr_t *src; - uint64_t *dst; - uint64_t i, j; + u64 *dst; + u64 i, j; int r; - pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start, - prange->last); + pr_debug("svms 0x%p [0x%lx 0x%lx 0x%llx]\n", prange->svms, prange->start, + prange->last, ttm_res_offset); src = scratch; - dst = (uint64_t *)(scratch + npages); + dst = (u64 *)(scratch + npages); - r = svm_range_vram_node_new(adev, prange, true); - if (r) { - pr_debug("failed %d get 0x%llx pages from vram\n", r, npages); - goto out; - } - - amdgpu_res_first(prange->ttm_res, prange->offset << PAGE_SHIFT, + amdgpu_res_first(prange->ttm_res, ttm_res_offset, npages << PAGE_SHIFT, &cursor); - for (i = j = 0; i < npages; i++) { + for (i = j = 0; (i < npages) && (mpages < migrate->cpages); i++) { struct page *spage; - spage = migrate_pfn_to_page(migrate->src[i]); - if (spage && !is_zone_device_page(spage)) { + if (migrate->src[i] & MIGRATE_PFN_MIGRATE) { dst[i] = cursor.start + (j << PAGE_SHIFT); migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]); svm_migrate_get_vram_page(prange, migrate->dst[i]); migrate->dst[i] = migrate_pfn(migrate->dst[i]); - migrate->dst[i] |= MIGRATE_PFN_LOCKED; + mpages++; + } + spage = migrate_pfn_to_page(migrate->src[i]); + if (spage && !is_zone_device_page(spage)) { src[i] = dma_map_page(dev, spage, 0, PAGE_SIZE, - DMA_TO_DEVICE); + DMA_BIDIRECTIONAL); r = dma_mapping_error(dev, src[i]); if (r) { - pr_debug("failed %d dma_map_page\n", r); + dev_err(dev, "%s: fail %d dma_map_page\n", + __func__, r); goto out_free_vram_pages; } } else { @@ -317,7 +325,7 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange, mfence); if (r) goto out_free_vram_pages; - amdgpu_res_next(&cursor, j << PAGE_SHIFT); + amdgpu_res_next(&cursor, (j + 1) << PAGE_SHIFT); j = 0; } else { amdgpu_res_next(&cursor, PAGE_SIZE); @@ -325,8 +333,8 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange, continue; } - pr_debug("dma mapping src to 0x%llx, page_to_pfn 0x%lx\n", - src[i] >> PAGE_SHIFT, page_to_pfn(spage)); + pr_debug_ratelimited("dma mapping src to 0x%llx, pfn 0x%lx\n", + src[i] >> PAGE_SHIFT, page_to_pfn(spage)); if (j >= (cursor.size >> PAGE_SHIFT) - 1 && i < npages - 1) { r = svm_migrate_copy_memory_gart(adev, src + i - j, @@ -336,7 +344,7 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange, if (r) goto out_free_vram_pages; amdgpu_res_next(&cursor, (j + 1) * PAGE_SIZE); - j= 0; + j = 0; } else { j++; } @@ -348,9 +356,12 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange, out_free_vram_pages: if (r) { pr_debug("failed %d to copy memory to vram\n", r); - while (i--) { + for (i = 0; i < npages && mpages; i++) { + if (!dst[i]) + continue; svm_migrate_put_vram_page(adev, dst[i]); migrate->dst[i] = 0; + mpages--; } } @@ -368,21 +379,24 @@ out_free_vram_pages: migrate->dst[i + 3] = 0; } #endif -out: + return r; } -static int -svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange, - struct vm_area_struct *vma, uint64_t start, - uint64_t end) +static long +svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange, + struct vm_area_struct *vma, u64 start, + u64 end, uint32_t trigger, u64 ttm_res_offset) { - uint64_t npages = (end - start) >> PAGE_SHIFT; + struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms); + u64 npages = (end - start) >> PAGE_SHIFT; + struct amdgpu_device *adev = node->adev; struct kfd_process_device *pdd; struct dma_fence *mfence = NULL; - struct migrate_vma migrate; + struct migrate_vma migrate = { 0 }; + unsigned long cpages = 0; + unsigned long mpages = 0; dma_addr_t *scratch; - size_t size; void *buf; int r = -ENOMEM; @@ -393,9 +407,9 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange, migrate.flags = MIGRATE_VMA_SELECT_SYSTEM; migrate.pgmap_owner = SVM_ADEV_PGMAP_OWNER(adev); - size = 2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t); - size *= npages; - buf = kvmalloc(size, GFP_KERNEL | __GFP_ZERO); + buf = kvcalloc(npages, + 2 * sizeof(*migrate.src) + sizeof(u64) + sizeof(dma_addr_t), + GFP_KERNEL); if (!buf) goto out; @@ -403,38 +417,55 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange, migrate.dst = migrate.src + npages; scratch = (dma_addr_t *)(migrate.dst + npages); + kfd_smi_event_migration_start(node, p->lead_thread->pid, + start >> PAGE_SHIFT, end >> PAGE_SHIFT, + 0, node->id, prange->prefetch_loc, + prange->preferred_loc, trigger); + r = migrate_vma_setup(&migrate); if (r) { - pr_debug("failed %d prepare migrate svms 0x%p [0x%lx 0x%lx]\n", - r, prange->svms, prange->start, prange->last); + dev_err(adev->dev, "%s: vma setup fail %d range [0x%lx 0x%lx]\n", + __func__, r, prange->start, prange->last); goto out_free; } - if (migrate.cpages != npages) { - pr_debug("Partial migration. 0x%lx/0x%llx pages can be migrated\n", - migrate.cpages, - npages); - } - if (migrate.cpages) { - r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence, - scratch); - migrate_vma_pages(&migrate); - svm_migrate_copy_done(adev, mfence); - migrate_vma_finalize(&migrate); + cpages = migrate.cpages; + if (!cpages) { + pr_debug("failed collect migrate sys pages [0x%lx 0x%lx]\n", + prange->start, prange->last); + goto out_free; } + if (cpages != npages) + pr_debug("partial migration, 0x%lx/0x%llx pages collected\n", + cpages, npages); + else + pr_debug("0x%lx pages collected\n", cpages); - svm_range_dma_unmap(adev->dev, scratch, 0, npages); - svm_range_free_dma_mappings(prange); + r = svm_migrate_copy_to_vram(node, prange, &migrate, &mfence, scratch, ttm_res_offset); + migrate_vma_pages(&migrate); + + svm_migrate_copy_done(adev, mfence); + migrate_vma_finalize(&migrate); + + mpages = svm_migrate_successful_pages(&migrate); + pr_debug("migrated/collected/requested 0x%lx/0x%lx/0x%lx\n", + mpages, cpages, migrate.npages); + + svm_range_dma_unmap_dev(adev->dev, scratch, 0, npages); out_free: kvfree(buf); + kfd_smi_event_migration_end(node, p->lead_thread->pid, + start >> PAGE_SHIFT, end >> PAGE_SHIFT, + 0, node->id, trigger, r); out: - if (!r) { - pdd = svm_range_get_pdd_by_adev(prange, adev); + if (!r && mpages) { + pdd = svm_range_get_pdd_by_node(prange, node); if (pdd) - WRITE_ONCE(pdd->page_in, pdd->page_in + migrate.cpages); - } + WRITE_ONCE(pdd->page_in, pdd->page_in + mpages); + return mpages; + } return r; } @@ -442,7 +473,10 @@ out: * svm_migrate_ram_to_vram - migrate svm range from system to device * @prange: range structure * @best_loc: the device to migrate to + * @start_mgr: start page to migrate + * @last_mgr: last page to migrate * @mm: the process mm structure + * @trigger: reason of migration * * Context: Process context, caller hold mmap read lock, svms lock, prange lock * @@ -451,85 +485,118 @@ out: */ static int svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc, - struct mm_struct *mm) + unsigned long start_mgr, unsigned long last_mgr, + struct mm_struct *mm, uint32_t trigger) { unsigned long addr, start, end; struct vm_area_struct *vma; - struct amdgpu_device *adev; - int r = 0; - - if (prange->actual_loc == best_loc) { - pr_debug("svms 0x%p [0x%lx 0x%lx] already on best_loc 0x%x\n", - prange->svms, prange->start, prange->last, best_loc); - return 0; + u64 ttm_res_offset; + struct kfd_node *node; + unsigned long mpages = 0; + long r = 0; + + if (start_mgr < prange->start || last_mgr > prange->last) { + pr_debug("range [0x%lx 0x%lx] out prange [0x%lx 0x%lx]\n", + start_mgr, last_mgr, prange->start, prange->last); + return -EFAULT; } - adev = svm_range_get_adev_by_id(prange, best_loc); - if (!adev) { - pr_debug("failed to get device by id 0x%x\n", best_loc); + node = svm_range_get_node_by_id(prange, best_loc); + if (!node) { + pr_debug("failed to get kfd node by id 0x%x\n", best_loc); return -ENODEV; } - pr_debug("svms 0x%p [0x%lx 0x%lx] to gpu 0x%x\n", prange->svms, - prange->start, prange->last, best_loc); + pr_debug("svms 0x%p [0x%lx 0x%lx] in [0x%lx 0x%lx] to gpu 0x%x\n", + prange->svms, start_mgr, last_mgr, prange->start, prange->last, + best_loc); - /* FIXME: workaround for page locking bug with invalid pages */ - svm_range_prefault(prange, mm, SVM_ADEV_PGMAP_OWNER(adev)); + start = start_mgr << PAGE_SHIFT; + end = (last_mgr + 1) << PAGE_SHIFT; + + r = amdgpu_amdkfd_reserve_mem_limit(node->adev, + prange->npages * PAGE_SIZE, + KFD_IOC_ALLOC_MEM_FLAGS_VRAM, + node->xcp ? node->xcp->id : 0); + if (r) { + dev_dbg(node->adev->dev, "failed to reserve VRAM, r: %ld\n", r); + return -ENOSPC; + } - start = prange->start << PAGE_SHIFT; - end = (prange->last + 1) << PAGE_SHIFT; + r = svm_range_vram_node_new(node, prange, true); + if (r) { + dev_dbg(node->adev->dev, "fail %ld to alloc vram\n", r); + goto out; + } + ttm_res_offset = (start_mgr - prange->start + prange->offset) << PAGE_SHIFT; for (addr = start; addr < end;) { unsigned long next; - vma = find_vma(mm, addr); - if (!vma || addr < vma->vm_start) + vma = vma_lookup(mm, addr); + if (!vma) break; next = min(vma->vm_end, end); - r = svm_migrate_vma_to_vram(adev, prange, vma, addr, next); - if (r) { - pr_debug("failed to migrate\n"); + r = svm_migrate_vma_to_vram(node, prange, vma, addr, next, trigger, ttm_res_offset); + if (r < 0) { + pr_debug("failed %ld to migrate\n", r); break; + } else { + mpages += r; } + ttm_res_offset += next - addr; addr = next; } - if (!r) + if (mpages) { prange->actual_loc = best_loc; + prange->vram_pages += mpages; + } else if (!prange->actual_loc) { + /* if no page migrated and all pages from prange are at + * sys ram drop svm_bo got from svm_range_vram_node_new + */ + svm_range_vram_node_free(prange); + } - return r; +out: + amdgpu_amdkfd_unreserve_mem_limit(node->adev, + prange->npages * PAGE_SIZE, + KFD_IOC_ALLOC_MEM_FLAGS_VRAM, + node->xcp ? node->xcp->id : 0); + return r < 0 ? r : 0; } -static void svm_migrate_page_free(struct page *page) +static void svm_migrate_folio_free(struct folio *folio) { + struct page *page = &folio->page; struct svm_range_bo *svm_bo = page->zone_device_data; if (svm_bo) { - pr_debug("svm_bo ref left: %d\n", kref_read(&svm_bo->kref)); - svm_range_bo_unref(svm_bo); + pr_debug_ratelimited("ref: %d\n", kref_read(&svm_bo->kref)); + svm_range_bo_unref_async(svm_bo); } } static int svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange, struct migrate_vma *migrate, struct dma_fence **mfence, - dma_addr_t *scratch, uint64_t npages) + dma_addr_t *scratch, u64 npages) { struct device *dev = adev->dev; - uint64_t *src; + u64 *src; dma_addr_t *dst; struct page *dpage; - uint64_t i = 0, j; - uint64_t addr; + u64 i = 0, j; + u64 addr; int r = 0; pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start, prange->last); - addr = prange->start << PAGE_SHIFT; + addr = migrate->start; - src = (uint64_t *)(scratch + npages); + src = (u64 *)(scratch + npages); dst = scratch; for (i = 0, j = 0; i < npages; i++, addr += PAGE_SIZE) { @@ -551,7 +618,7 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange, continue; } src[i] = svm_migrate_addr(adev, spage); - if (i > 0 && src[i] != src[i - 1] + PAGE_SIZE) { + if (j > 0 && src[i] != src[i - 1] + PAGE_SIZE) { r = svm_migrate_copy_memory_gart(adev, dst + i - j, src + i - j, j, FROM_VRAM_TO_RAM, @@ -569,18 +636,17 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange, goto out_oom; } - dst[i] = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_FROM_DEVICE); + dst[i] = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL); r = dma_mapping_error(dev, dst[i]); if (r) { - pr_debug("failed %d dma_map_page\n", r); + dev_err(adev->dev, "%s: fail %d dma_map_page\n", __func__, r); goto out_oom; } - pr_debug("dma mapping dst to 0x%llx, page_to_pfn 0x%lx\n", - dst[i] >> PAGE_SHIFT, page_to_pfn(dpage)); + pr_debug_ratelimited("dma mapping dst to 0x%llx, pfn 0x%lx\n", + dst[i] >> PAGE_SHIFT, page_to_pfn(dpage)); migrate->dst[i] = migrate_pfn(page_to_pfn(dpage)); - migrate->dst[i] |= MIGRATE_PFN_LOCKED; j++; } @@ -599,16 +665,37 @@ out_oom: return r; } -static int -svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange, - struct vm_area_struct *vma, uint64_t start, uint64_t end) +/** + * svm_migrate_vma_to_ram - migrate range inside one vma from device to system + * + * @prange: svm range structure + * @vma: vm_area_struct that range [start, end] belongs to + * @start: range start virtual address in pages + * @end: range end virtual address in pages + * @node: kfd node device to migrate from + * @trigger: reason of migration + * @fault_page: is from vmf->page, svm_migrate_to_ram(), this is CPU page fault callback + * + * Context: Process context, caller hold mmap read lock, prange->migrate_mutex + * + * Return: + * negative values - indicate error + * positive values or zero - number of pages got migrated + */ +static long +svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange, + struct vm_area_struct *vma, u64 start, u64 end, + uint32_t trigger, struct page *fault_page) { - uint64_t npages = (end - start) >> PAGE_SHIFT; + struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms); + u64 npages = (end - start) >> PAGE_SHIFT; + unsigned long cpages = 0; + unsigned long mpages = 0; + struct amdgpu_device *adev = node->adev; struct kfd_process_device *pdd; struct dma_fence *mfence = NULL; - struct migrate_vma migrate; + struct migrate_vma migrate = { 0 }; dma_addr_t *scratch; - size_t size; void *buf; int r = -ENOMEM; @@ -616,131 +703,189 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange, migrate.vma = vma; migrate.start = start; migrate.end = end; - migrate.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE; migrate.pgmap_owner = SVM_ADEV_PGMAP_OWNER(adev); + if (adev->gmc.xgmi.connected_to_cpu) + migrate.flags = MIGRATE_VMA_SELECT_DEVICE_COHERENT; + else + migrate.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE; - size = 2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t); - size *= npages; - buf = kvmalloc(size, GFP_KERNEL | __GFP_ZERO); + buf = kvcalloc(npages, + 2 * sizeof(*migrate.src) + sizeof(u64) + sizeof(dma_addr_t), + GFP_KERNEL); if (!buf) goto out; migrate.src = buf; migrate.dst = migrate.src + npages; + migrate.fault_page = fault_page; scratch = (dma_addr_t *)(migrate.dst + npages); + kfd_smi_event_migration_start(node, p->lead_thread->pid, + start >> PAGE_SHIFT, end >> PAGE_SHIFT, + node->id, 0, prange->prefetch_loc, + prange->preferred_loc, trigger); + r = migrate_vma_setup(&migrate); if (r) { - pr_debug("failed %d prepare migrate svms 0x%p [0x%lx 0x%lx]\n", - r, prange->svms, prange->start, prange->last); + dev_err(adev->dev, "%s: vma setup fail %d range [0x%lx 0x%lx]\n", + __func__, r, prange->start, prange->last); goto out_free; } - pr_debug("cpages %ld\n", migrate.cpages); - - if (migrate.cpages) { - r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence, - scratch, npages); - migrate_vma_pages(&migrate); - svm_migrate_copy_done(adev, mfence); - migrate_vma_finalize(&migrate); - } else { + cpages = migrate.cpages; + if (!cpages) { pr_debug("failed collect migrate device pages [0x%lx 0x%lx]\n", prange->start, prange->last); + goto out_free; } + if (cpages != npages) + pr_debug("partial migration, 0x%lx/0x%llx pages collected\n", + cpages, npages); + else + pr_debug("0x%lx pages collected\n", cpages); + + r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence, + scratch, npages); + migrate_vma_pages(&migrate); - svm_range_dma_unmap(adev->dev, scratch, 0, npages); + mpages = svm_migrate_successful_pages(&migrate); + pr_debug("migrated/collected/requested 0x%lx/0x%lx/0x%lx\n", + mpages, cpages, migrate.npages); + + svm_migrate_copy_done(adev, mfence); + migrate_vma_finalize(&migrate); + + svm_range_dma_unmap_dev(adev->dev, scratch, 0, npages); out_free: kvfree(buf); + kfd_smi_event_migration_end(node, p->lead_thread->pid, + start >> PAGE_SHIFT, end >> PAGE_SHIFT, + node->id, 0, trigger, r); out: - if (!r) { - pdd = svm_range_get_pdd_by_adev(prange, adev); + if (!r && mpages) { + pdd = svm_range_get_pdd_by_node(prange, node); if (pdd) - WRITE_ONCE(pdd->page_out, - pdd->page_out + migrate.cpages); + WRITE_ONCE(pdd->page_out, pdd->page_out + mpages); } - return r; + + return r ? r : mpages; } /** * svm_migrate_vram_to_ram - migrate svm range from device to system * @prange: range structure * @mm: process mm, use current->mm if NULL + * @start_mgr: start page need be migrated to sys ram + * @last_mgr: last page need be migrated to sys ram + * @trigger: reason of migration + * @fault_page: is from vmf->page, svm_migrate_to_ram(), this is CPU page fault callback * - * Context: Process context, caller hold mmap read lock, svms lock, prange lock + * Context: Process context, caller hold mmap read lock, prange->migrate_mutex * * Return: * 0 - OK, otherwise error code */ -int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm) +int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm, + unsigned long start_mgr, unsigned long last_mgr, + uint32_t trigger, struct page *fault_page) { - struct amdgpu_device *adev; + struct kfd_node *node; struct vm_area_struct *vma; unsigned long addr; unsigned long start; unsigned long end; - int r = 0; + unsigned long mpages = 0; + long r = 0; + /* this pragne has no any vram page to migrate to sys ram */ if (!prange->actual_loc) { pr_debug("[0x%lx 0x%lx] already migrated to ram\n", prange->start, prange->last); return 0; } - adev = svm_range_get_adev_by_id(prange, prange->actual_loc); - if (!adev) { - pr_debug("failed to get device by id 0x%x\n", - prange->actual_loc); - return -ENODEV; + if (start_mgr < prange->start || last_mgr > prange->last) { + pr_debug("range [0x%lx 0x%lx] out prange [0x%lx 0x%lx]\n", + start_mgr, last_mgr, prange->start, prange->last); + return -EFAULT; } + node = svm_range_get_node_by_id(prange, prange->actual_loc); + if (!node) { + pr_debug("failed to get kfd node by id 0x%x\n", prange->actual_loc); + return -ENODEV; + } pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] from gpu 0x%x to ram\n", - prange->svms, prange, prange->start, prange->last, + prange->svms, prange, start_mgr, last_mgr, prange->actual_loc); - start = prange->start << PAGE_SHIFT; - end = (prange->last + 1) << PAGE_SHIFT; + start = start_mgr << PAGE_SHIFT; + end = (last_mgr + 1) << PAGE_SHIFT; for (addr = start; addr < end;) { unsigned long next; - vma = find_vma(mm, addr); - if (!vma || addr < vma->vm_start) + vma = vma_lookup(mm, addr); + if (!vma) { + pr_debug("failed to find vma for prange %p\n", prange); + r = -EFAULT; break; + } next = min(vma->vm_end, end); - r = svm_migrate_vma_to_ram(adev, prange, vma, addr, next); - if (r) { - pr_debug("failed %d to migrate\n", r); + r = svm_migrate_vma_to_ram(node, prange, vma, addr, next, trigger, + fault_page); + if (r < 0) { + pr_debug("failed %ld to migrate prange %p\n", r, prange); break; + } else { + mpages += r; } addr = next; } - if (!r) { - svm_range_vram_node_free(prange); - prange->actual_loc = 0; + if (r >= 0) { + WARN_ONCE(prange->vram_pages < mpages, + "Recorded vram pages(0x%llx) should not be less than migration pages(0x%lx).", + prange->vram_pages, mpages); + prange->vram_pages -= mpages; + + /* prange does not have vram page set its actual_loc to system + * and drop its svm_bo ref + */ + if (prange->vram_pages == 0 && prange->ttm_res) { + prange->actual_loc = 0; + svm_range_vram_node_free(prange); + } } - return r; + + return r < 0 ? r : 0; } /** * svm_migrate_vram_to_vram - migrate svm range from device to device * @prange: range structure * @best_loc: the device to migrate to + * @start: start page need be migrated to sys ram + * @last: last page need be migrated to sys ram * @mm: process mm, use current->mm if NULL + * @trigger: reason of migration * * Context: Process context, caller hold mmap read lock, svms lock, prange lock * + * migrate all vram pages in prange to sys ram, then migrate + * [start, last] pages from sys ram to gpu node best_loc. + * * Return: * 0 - OK, otherwise error code */ static int svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc, - struct mm_struct *mm) + unsigned long start, unsigned long last, + struct mm_struct *mm, uint32_t trigger) { - int r; + int r, retries = 3; /* * TODO: for both devices with PCIe large bar or on same xgmi hive, skip @@ -749,21 +894,31 @@ svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc, pr_debug("from gpu 0x%x to gpu 0x%x\n", prange->actual_loc, best_loc); - r = svm_migrate_vram_to_ram(prange, mm); - if (r) - return r; + do { + r = svm_migrate_vram_to_ram(prange, mm, prange->start, prange->last, + trigger, NULL); + if (r) + return r; + } while (prange->actual_loc && --retries); - return svm_migrate_ram_to_vram(prange, best_loc, mm); + if (prange->actual_loc) + return -EDEADLK; + + return svm_migrate_ram_to_vram(prange, best_loc, start, last, mm, trigger); } int svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc, - struct mm_struct *mm) + unsigned long start, unsigned long last, + struct mm_struct *mm, uint32_t trigger) { - if (!prange->actual_loc) - return svm_migrate_ram_to_vram(prange, best_loc, mm); + if (!prange->actual_loc || prange->actual_loc == best_loc) + return svm_migrate_ram_to_vram(prange, best_loc, start, last, + mm, trigger); + else - return svm_migrate_vram_to_vram(prange, best_loc, mm); + return svm_migrate_vram_to_vram(prange, best_loc, start, last, + mm, trigger); } @@ -779,100 +934,103 @@ svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc, */ static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf) { + unsigned long start, last, size; unsigned long addr = vmf->address; - struct vm_area_struct *vma; - enum svm_work_list_ops op; - struct svm_range *parent; + struct svm_range_bo *svm_bo; struct svm_range *prange; struct kfd_process *p; struct mm_struct *mm; int r = 0; - vma = vmf->vma; - mm = vma->vm_mm; + svm_bo = vmf->page->zone_device_data; + if (!svm_bo) { + pr_debug("failed get device page at addr 0x%lx\n", addr); + return VM_FAULT_SIGBUS; + } + if (!mmget_not_zero(svm_bo->eviction_fence->mm)) { + pr_debug("addr 0x%lx of process mm is destroyed\n", addr); + return VM_FAULT_SIGBUS; + } - p = kfd_lookup_process_by_mm(vma->vm_mm); + mm = svm_bo->eviction_fence->mm; + if (mm != vmf->vma->vm_mm) + pr_debug("addr 0x%lx is COW mapping in child process\n", addr); + + p = kfd_lookup_process_by_mm(mm); if (!p) { pr_debug("failed find process at fault address 0x%lx\n", addr); - return VM_FAULT_SIGBUS; + r = VM_FAULT_SIGBUS; + goto out_mmput; } - addr >>= PAGE_SHIFT; + if (READ_ONCE(p->svms.faulting_task) == current) { + pr_debug("skipping ram migration\n"); + r = 0; + goto out_unref_process; + } + pr_debug("CPU page fault svms 0x%p address 0x%lx\n", &p->svms, addr); + addr >>= PAGE_SHIFT; mutex_lock(&p->svms.lock); - prange = svm_range_from_addr(&p->svms, addr, &parent); + prange = svm_range_from_addr(&p->svms, addr, NULL); if (!prange) { - pr_debug("cannot find svm range at 0x%lx\n", addr); + pr_debug("failed get range svms 0x%p addr 0x%lx\n", &p->svms, addr); r = -EFAULT; - goto out; + goto out_unlock_svms; } - mutex_lock(&parent->migrate_mutex); - if (prange != parent) - mutex_lock_nested(&prange->migrate_mutex, 1); + mutex_lock(&prange->migrate_mutex); if (!prange->actual_loc) goto out_unlock_prange; - svm_range_lock(parent); - if (prange != parent) - mutex_lock_nested(&prange->lock, 1); - r = svm_range_split_by_granularity(p, mm, addr, parent, prange); - if (prange != parent) - mutex_unlock(&prange->lock); - svm_range_unlock(parent); - if (r) { - pr_debug("failed %d to split range by granularity\n", r); - goto out_unlock_prange; - } + /* Align migration range start and size to granularity size */ + size = 1UL << prange->granularity; + start = max(ALIGN_DOWN(addr, size), prange->start); + last = min(ALIGN(addr + 1, size) - 1, prange->last); - r = svm_migrate_vram_to_ram(prange, mm); + r = svm_migrate_vram_to_ram(prange, vmf->vma->vm_mm, start, last, + KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU, vmf->page); if (r) - pr_debug("failed %d migrate 0x%p [0x%lx 0x%lx] to ram\n", r, - prange, prange->start, prange->last); - - /* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */ - if (p->xnack_enabled && parent == prange) - op = SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP; - else - op = SVM_OP_UPDATE_RANGE_NOTIFIER; - svm_range_add_list_work(&p->svms, parent, mm, op); - schedule_deferred_list_work(&p->svms); + pr_debug("failed %d migrate svms 0x%p range 0x%p [0x%lx 0x%lx]\n", + r, prange->svms, prange, start, last); out_unlock_prange: - if (prange != parent) - mutex_unlock(&prange->migrate_mutex); - mutex_unlock(&parent->migrate_mutex); -out: + mutex_unlock(&prange->migrate_mutex); +out_unlock_svms: mutex_unlock(&p->svms.lock); - kfd_unref_process(p); - +out_unref_process: pr_debug("CPU fault svms 0x%p address 0x%lx done\n", &p->svms, addr); - + kfd_unref_process(p); +out_mmput: + mmput(mm); return r ? VM_FAULT_SIGBUS : 0; } static const struct dev_pagemap_ops svm_migrate_pgmap_ops = { - .page_free = svm_migrate_page_free, + .folio_free = svm_migrate_folio_free, .migrate_to_ram = svm_migrate_to_ram, }; /* Each VRAM page uses sizeof(struct page) on system memory */ #define SVM_HMM_PAGE_STRUCT_SIZE(size) ((size)/PAGE_SIZE * sizeof(struct page)) -int svm_migrate_init(struct amdgpu_device *adev) +int kgd2kfd_init_zone_device(struct amdgpu_device *adev) { - struct kfd_dev *kfddev = adev->kfd.dev; + struct amdgpu_kfd_dev *kfddev = &adev->kfd; struct dev_pagemap *pgmap; - struct resource *res; + struct resource *res = NULL; unsigned long size; void *r; - /* Page migration works on Vega10 or newer */ - if (kfddev->device_info->asic_family < CHIP_VEGA10) + /* Page migration works on gfx9 or newer */ + if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 0, 1)) return -EINVAL; + if (adev->apu_prefer_gtt) + return 0; + pgmap = &kfddev->pgmap; memset(pgmap, 0, sizeof(*pgmap)); @@ -880,22 +1038,33 @@ int svm_migrate_init(struct amdgpu_device *adev) * should remove reserved size */ size = ALIGN(adev->gmc.real_vram_size, 2ULL << 20); - res = devm_request_free_mem_region(adev->dev, &iomem_resource, size); - if (IS_ERR(res)) - return -ENOMEM; + if (adev->gmc.xgmi.connected_to_cpu) { + pgmap->range.start = adev->gmc.aper_base; + pgmap->range.end = adev->gmc.aper_base + adev->gmc.aper_size - 1; + pgmap->type = MEMORY_DEVICE_COHERENT; + } else { + res = devm_request_free_mem_region(adev->dev, &iomem_resource, size); + if (IS_ERR(res)) + return PTR_ERR(res); + pgmap->range.start = res->start; + pgmap->range.end = res->end; + pgmap->type = MEMORY_DEVICE_PRIVATE; + } - pgmap->type = MEMORY_DEVICE_PRIVATE; pgmap->nr_range = 1; - pgmap->range.start = res->start; - pgmap->range.end = res->end; pgmap->ops = &svm_migrate_pgmap_ops; pgmap->owner = SVM_ADEV_PGMAP_OWNER(adev); - pgmap->flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE; + pgmap->flags = 0; + /* Device manager releases device-specific resources, memory region and + * pgmap when driver disconnects from device. + */ r = devm_memremap_pages(adev->dev, pgmap); if (IS_ERR(r)) { pr_err("failed to register HMM device memory\n"); - devm_release_mem_region(adev->dev, res->start, - res->end - res->start + 1); + if (pgmap->type == MEMORY_DEVICE_PRIVATE) + devm_release_mem_region(adev->dev, res->start, resource_size(res)); + /* Disable SVM support capability */ + pgmap->type = 0; return PTR_ERR(r); } @@ -908,12 +1077,3 @@ int svm_migrate_init(struct amdgpu_device *adev) return 0; } - -void svm_migrate_fini(struct amdgpu_device *adev) -{ - struct dev_pagemap *pgmap = &adev->kfd.dev->pgmap; - - devm_memunmap_pages(adev->dev, pgmap); - devm_release_mem_region(adev->dev, pgmap->range.start, - pgmap->range.end - pgmap->range.start + 1); -} |
