summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_migrate.c')
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c339
1 files changed, 182 insertions, 157 deletions
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index 709ac885ca6d..af53e796ea1b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -21,7 +21,6 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/types.h>
-#include <linux/hmm.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
#include <linux/migrate.h>
@@ -39,22 +38,22 @@
#endif
#define dev_fmt(fmt) "kfd_migrate: " fmt
-static uint64_t
-svm_migrate_direct_mapping_addr(struct amdgpu_device *adev, uint64_t addr)
+static u64
+svm_migrate_direct_mapping_addr(struct amdgpu_device *adev, u64 addr)
{
return addr + amdgpu_ttm_domain_start(adev, TTM_PL_VRAM);
}
static int
-svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages,
- dma_addr_t *addr, uint64_t *gart_addr, uint64_t flags)
+svm_migrate_gart_map(struct amdgpu_ring *ring, u64 npages,
+ dma_addr_t *addr, u64 *gart_addr, u64 flags)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_job *job;
unsigned int num_dw, num_bytes;
struct dma_fence *fence;
- uint64_t src_addr, dst_addr;
- uint64_t pte_flags;
+ u64 src_addr, dst_addr;
+ u64 pte_flags;
void *cpu_addr;
int r;
@@ -68,7 +67,8 @@ svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages,
AMDGPU_FENCE_OWNER_UNDEFINED,
num_dw * 4 + num_bytes,
AMDGPU_IB_POOL_DELAYED,
- &job);
+ &job,
+ AMDGPU_KERNEL_JOB_ID_KFD_GART_MAP);
if (r)
return r;
@@ -77,7 +77,7 @@ svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages,
dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
- dst_addr, num_bytes, false);
+ dst_addr, num_bytes, 0);
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
WARN_ON(job->ibs[0].length_dw > num_dw);
@@ -122,15 +122,15 @@ svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages,
static int
svm_migrate_copy_memory_gart(struct amdgpu_device *adev, dma_addr_t *sys,
- uint64_t *vram, uint64_t npages,
+ u64 *vram, u64 npages,
enum MIGRATION_COPY_DIR direction,
struct dma_fence **mfence)
{
- const uint64_t GTT_MAX_PAGES = AMDGPU_GTT_MAX_TRANSFER_SIZE;
+ const u64 GTT_MAX_PAGES = AMDGPU_GTT_MAX_TRANSFER_SIZE;
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
- uint64_t gart_s, gart_d;
+ u64 gart_s, gart_d;
struct dma_fence *next;
- uint64_t size;
+ u64 size;
int r;
mutex_lock(&adev->mman.gtt_window_lock);
@@ -153,7 +153,7 @@ svm_migrate_copy_memory_gart(struct amdgpu_device *adev, dma_addr_t *sys,
}
r = amdgpu_copy_buffer(ring, gart_s, gart_d, size * PAGE_SIZE,
- NULL, &next, false, true, false);
+ NULL, &next, false, true, 0);
if (r) {
dev_err(adev->dev, "fail %d to copy memory\n", r);
goto out_unlock;
@@ -217,7 +217,7 @@ svm_migrate_get_vram_page(struct svm_range *prange, unsigned long pfn)
page = pfn_to_page(pfn);
svm_range_bo_ref(prange->svm_bo);
page->zone_device_data = prange->svm_bo;
- zone_device_page_init(page);
+ zone_device_page_init(page, 0);
}
static void
@@ -262,64 +262,54 @@ static void svm_migrate_put_sys_page(unsigned long addr)
static unsigned long svm_migrate_successful_pages(struct migrate_vma *migrate)
{
- unsigned long cpages = 0;
+ unsigned long mpages = 0;
unsigned long i;
for (i = 0; i < migrate->npages; i++) {
- if (migrate->src[i] & MIGRATE_PFN_VALID &&
+ if (migrate->dst[i] & MIGRATE_PFN_VALID &&
migrate->src[i] & MIGRATE_PFN_MIGRATE)
- cpages++;
- }
- return cpages;
-}
-
-static unsigned long svm_migrate_unsuccessful_pages(struct migrate_vma *migrate)
-{
- unsigned long upages = 0;
- unsigned long i;
-
- for (i = 0; i < migrate->npages; i++) {
- if (migrate->src[i] & MIGRATE_PFN_VALID &&
- !(migrate->src[i] & MIGRATE_PFN_MIGRATE))
- upages++;
+ mpages++;
}
- return upages;
+ return mpages;
}
static int
svm_migrate_copy_to_vram(struct kfd_node *node, struct svm_range *prange,
struct migrate_vma *migrate, struct dma_fence **mfence,
- dma_addr_t *scratch, uint64_t ttm_res_offset)
+ dma_addr_t *scratch, u64 ttm_res_offset)
{
- uint64_t npages = migrate->cpages;
+ u64 npages = migrate->npages;
struct amdgpu_device *adev = node->adev;
struct device *dev = adev->dev;
struct amdgpu_res_cursor cursor;
+ u64 mpages = 0;
dma_addr_t *src;
- uint64_t *dst;
- uint64_t i, j;
+ u64 *dst;
+ u64 i, j;
int r;
pr_debug("svms 0x%p [0x%lx 0x%lx 0x%llx]\n", prange->svms, prange->start,
prange->last, ttm_res_offset);
src = scratch;
- dst = (uint64_t *)(scratch + npages);
+ dst = (u64 *)(scratch + npages);
amdgpu_res_first(prange->ttm_res, ttm_res_offset,
npages << PAGE_SHIFT, &cursor);
- for (i = j = 0; i < npages; i++) {
+ for (i = j = 0; (i < npages) && (mpages < migrate->cpages); i++) {
struct page *spage;
- dst[i] = cursor.start + (j << PAGE_SHIFT);
- migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]);
- svm_migrate_get_vram_page(prange, migrate->dst[i]);
- migrate->dst[i] = migrate_pfn(migrate->dst[i]);
-
+ if (migrate->src[i] & MIGRATE_PFN_MIGRATE) {
+ dst[i] = cursor.start + (j << PAGE_SHIFT);
+ migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]);
+ svm_migrate_get_vram_page(prange, migrate->dst[i]);
+ migrate->dst[i] = migrate_pfn(migrate->dst[i]);
+ mpages++;
+ }
spage = migrate_pfn_to_page(migrate->src[i]);
if (spage && !is_zone_device_page(spage)) {
src[i] = dma_map_page(dev, spage, 0, PAGE_SIZE,
- DMA_TO_DEVICE);
+ DMA_BIDIRECTIONAL);
r = dma_mapping_error(dev, src[i]);
if (r) {
dev_err(dev, "%s: fail %d dma_map_page\n",
@@ -366,9 +356,12 @@ svm_migrate_copy_to_vram(struct kfd_node *node, struct svm_range *prange,
out_free_vram_pages:
if (r) {
pr_debug("failed %d to copy memory to vram\n", r);
- while (i--) {
+ for (i = 0; i < npages && mpages; i++) {
+ if (!dst[i])
+ continue;
svm_migrate_put_vram_page(adev, dst[i]);
migrate->dst[i] = 0;
+ mpages--;
}
}
@@ -392,16 +385,17 @@ out_free_vram_pages:
static long
svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange,
- struct vm_area_struct *vma, uint64_t start,
- uint64_t end, uint32_t trigger, uint64_t ttm_res_offset)
+ struct vm_area_struct *vma, u64 start,
+ u64 end, uint32_t trigger, u64 ttm_res_offset)
{
struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
- uint64_t npages = (end - start) >> PAGE_SHIFT;
+ u64 npages = (end - start) >> PAGE_SHIFT;
struct amdgpu_device *adev = node->adev;
struct kfd_process_device *pdd;
struct dma_fence *mfence = NULL;
struct migrate_vma migrate = { 0 };
unsigned long cpages = 0;
+ unsigned long mpages = 0;
dma_addr_t *scratch;
void *buf;
int r = -ENOMEM;
@@ -414,7 +408,7 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange,
migrate.pgmap_owner = SVM_ADEV_PGMAP_OWNER(adev);
buf = kvcalloc(npages,
- 2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t),
+ 2 * sizeof(*migrate.src) + sizeof(u64) + sizeof(dma_addr_t),
GFP_KERNEL);
if (!buf)
goto out;
@@ -442,36 +436,35 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange,
goto out_free;
}
if (cpages != npages)
- pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n",
+ pr_debug("partial migration, 0x%lx/0x%llx pages collected\n",
cpages, npages);
else
- pr_debug("0x%lx pages migrated\n", cpages);
+ pr_debug("0x%lx pages collected\n", cpages);
r = svm_migrate_copy_to_vram(node, prange, &migrate, &mfence, scratch, ttm_res_offset);
migrate_vma_pages(&migrate);
- pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
- svm_migrate_successful_pages(&migrate), cpages, migrate.npages);
-
svm_migrate_copy_done(adev, mfence);
migrate_vma_finalize(&migrate);
- kfd_smi_event_migration_end(node, p->lead_thread->pid,
- start >> PAGE_SHIFT, end >> PAGE_SHIFT,
- 0, node->id, trigger);
+ mpages = svm_migrate_successful_pages(&migrate);
+ pr_debug("migrated/collected/requested 0x%lx/0x%lx/0x%lx\n",
+ mpages, cpages, migrate.npages);
- svm_range_dma_unmap(adev->dev, scratch, 0, npages);
- svm_range_free_dma_mappings(prange);
+ svm_range_dma_unmap_dev(adev->dev, scratch, 0, npages);
out_free:
kvfree(buf);
+ kfd_smi_event_migration_end(node, p->lead_thread->pid,
+ start >> PAGE_SHIFT, end >> PAGE_SHIFT,
+ 0, node->id, trigger, r);
out:
- if (!r && cpages) {
+ if (!r && mpages) {
pdd = svm_range_get_pdd_by_node(prange, node);
if (pdd)
- WRITE_ONCE(pdd->page_in, pdd->page_in + cpages);
+ WRITE_ONCE(pdd->page_in, pdd->page_in + mpages);
- return cpages;
+ return mpages;
}
return r;
}
@@ -480,6 +473,8 @@ out:
* svm_migrate_ram_to_vram - migrate svm range from system to device
* @prange: range structure
* @best_loc: the device to migrate to
+ * @start_mgr: start page to migrate
+ * @last_mgr: last page to migrate
* @mm: the process mm structure
* @trigger: reason of migration
*
@@ -490,19 +485,20 @@ out:
*/
static int
svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
+ unsigned long start_mgr, unsigned long last_mgr,
struct mm_struct *mm, uint32_t trigger)
{
unsigned long addr, start, end;
struct vm_area_struct *vma;
- uint64_t ttm_res_offset;
+ u64 ttm_res_offset;
struct kfd_node *node;
- unsigned long cpages = 0;
+ unsigned long mpages = 0;
long r = 0;
- if (prange->actual_loc == best_loc) {
- pr_debug("svms 0x%p [0x%lx 0x%lx] already on best_loc 0x%x\n",
- prange->svms, prange->start, prange->last, best_loc);
- return 0;
+ if (start_mgr < prange->start || last_mgr > prange->last) {
+ pr_debug("range [0x%lx 0x%lx] out prange [0x%lx 0x%lx]\n",
+ start_mgr, last_mgr, prange->start, prange->last);
+ return -EFAULT;
}
node = svm_range_get_node_by_id(prange, best_loc);
@@ -511,18 +507,28 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
return -ENODEV;
}
- pr_debug("svms 0x%p [0x%lx 0x%lx] to gpu 0x%x\n", prange->svms,
- prange->start, prange->last, best_loc);
+ pr_debug("svms 0x%p [0x%lx 0x%lx] in [0x%lx 0x%lx] to gpu 0x%x\n",
+ prange->svms, start_mgr, last_mgr, prange->start, prange->last,
+ best_loc);
- start = prange->start << PAGE_SHIFT;
- end = (prange->last + 1) << PAGE_SHIFT;
+ start = start_mgr << PAGE_SHIFT;
+ end = (last_mgr + 1) << PAGE_SHIFT;
+
+ r = amdgpu_amdkfd_reserve_mem_limit(node->adev,
+ prange->npages * PAGE_SIZE,
+ KFD_IOC_ALLOC_MEM_FLAGS_VRAM,
+ node->xcp ? node->xcp->id : 0);
+ if (r) {
+ dev_dbg(node->adev->dev, "failed to reserve VRAM, r: %ld\n", r);
+ return -ENOSPC;
+ }
r = svm_range_vram_node_new(node, prange, true);
if (r) {
dev_dbg(node->adev->dev, "fail %ld to alloc vram\n", r);
- return r;
+ goto out;
}
- ttm_res_offset = prange->offset << PAGE_SHIFT;
+ ttm_res_offset = (start_mgr - prange->start + prange->offset) << PAGE_SHIFT;
for (addr = start; addr < end;) {
unsigned long next;
@@ -537,22 +543,33 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
pr_debug("failed %ld to migrate\n", r);
break;
} else {
- cpages += r;
+ mpages += r;
}
ttm_res_offset += next - addr;
addr = next;
}
- if (cpages)
+ if (mpages) {
prange->actual_loc = best_loc;
- else
+ prange->vram_pages += mpages;
+ } else if (!prange->actual_loc) {
+ /* if no page migrated and all pages from prange are at
+ * sys ram drop svm_bo got from svm_range_vram_node_new
+ */
svm_range_vram_node_free(prange);
+ }
+out:
+ amdgpu_amdkfd_unreserve_mem_limit(node->adev,
+ prange->npages * PAGE_SIZE,
+ KFD_IOC_ALLOC_MEM_FLAGS_VRAM,
+ node->xcp ? node->xcp->id : 0);
return r < 0 ? r : 0;
}
-static void svm_migrate_page_free(struct page *page)
+static void svm_migrate_folio_free(struct folio *folio)
{
+ struct page *page = &folio->page;
struct svm_range_bo *svm_bo = page->zone_device_data;
if (svm_bo) {
@@ -564,22 +581,22 @@ static void svm_migrate_page_free(struct page *page)
static int
svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
struct migrate_vma *migrate, struct dma_fence **mfence,
- dma_addr_t *scratch, uint64_t npages)
+ dma_addr_t *scratch, u64 npages)
{
struct device *dev = adev->dev;
- uint64_t *src;
+ u64 *src;
dma_addr_t *dst;
struct page *dpage;
- uint64_t i = 0, j;
- uint64_t addr;
+ u64 i = 0, j;
+ u64 addr;
int r = 0;
pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start,
prange->last);
- addr = prange->start << PAGE_SHIFT;
+ addr = migrate->start;
- src = (uint64_t *)(scratch + npages);
+ src = (u64 *)(scratch + npages);
dst = scratch;
for (i = 0, j = 0; i < npages; i++, addr += PAGE_SIZE) {
@@ -619,7 +636,7 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
goto out_oom;
}
- dst[i] = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+ dst[i] = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
r = dma_mapping_error(dev, dst[i]);
if (r) {
dev_err(adev->dev, "%s: fail %d dma_map_page\n", __func__, r);
@@ -662,19 +679,18 @@ out_oom:
* Context: Process context, caller hold mmap read lock, prange->migrate_mutex
*
* Return:
- * 0 - success with all pages migrated
* negative values - indicate error
- * positive values - partial migration, number of pages not migrated
+ * positive values or zero - number of pages got migrated
*/
static long
svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange,
- struct vm_area_struct *vma, uint64_t start, uint64_t end,
+ struct vm_area_struct *vma, u64 start, u64 end,
uint32_t trigger, struct page *fault_page)
{
struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
- uint64_t npages = (end - start) >> PAGE_SHIFT;
- unsigned long upages = npages;
+ u64 npages = (end - start) >> PAGE_SHIFT;
unsigned long cpages = 0;
+ unsigned long mpages = 0;
struct amdgpu_device *adev = node->adev;
struct kfd_process_device *pdd;
struct dma_fence *mfence = NULL;
@@ -694,7 +710,7 @@ svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange,
migrate.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
buf = kvcalloc(npages,
- 2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t),
+ 2 * sizeof(*migrate.src) + sizeof(u64) + sizeof(dma_addr_t),
GFP_KERNEL);
if (!buf)
goto out;
@@ -720,47 +736,48 @@ svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange,
if (!cpages) {
pr_debug("failed collect migrate device pages [0x%lx 0x%lx]\n",
prange->start, prange->last);
- upages = svm_migrate_unsuccessful_pages(&migrate);
goto out_free;
}
if (cpages != npages)
- pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n",
+ pr_debug("partial migration, 0x%lx/0x%llx pages collected\n",
cpages, npages);
else
- pr_debug("0x%lx pages migrated\n", cpages);
+ pr_debug("0x%lx pages collected\n", cpages);
r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence,
scratch, npages);
migrate_vma_pages(&migrate);
- upages = svm_migrate_unsuccessful_pages(&migrate);
- pr_debug("unsuccessful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
- upages, cpages, migrate.npages);
+ mpages = svm_migrate_successful_pages(&migrate);
+ pr_debug("migrated/collected/requested 0x%lx/0x%lx/0x%lx\n",
+ mpages, cpages, migrate.npages);
svm_migrate_copy_done(adev, mfence);
migrate_vma_finalize(&migrate);
- kfd_smi_event_migration_end(node, p->lead_thread->pid,
- start >> PAGE_SHIFT, end >> PAGE_SHIFT,
- node->id, 0, trigger);
-
- svm_range_dma_unmap(adev->dev, scratch, 0, npages);
+ svm_range_dma_unmap_dev(adev->dev, scratch, 0, npages);
out_free:
kvfree(buf);
+ kfd_smi_event_migration_end(node, p->lead_thread->pid,
+ start >> PAGE_SHIFT, end >> PAGE_SHIFT,
+ node->id, 0, trigger, r);
out:
- if (!r && cpages) {
+ if (!r && mpages) {
pdd = svm_range_get_pdd_by_node(prange, node);
if (pdd)
- WRITE_ONCE(pdd->page_out, pdd->page_out + cpages);
+ WRITE_ONCE(pdd->page_out, pdd->page_out + mpages);
}
- return r ? r : upages;
+
+ return r ? r : mpages;
}
/**
* svm_migrate_vram_to_ram - migrate svm range from device to system
* @prange: range structure
* @mm: process mm, use current->mm if NULL
+ * @start_mgr: start page need be migrated to sys ram
+ * @last_mgr: last page need be migrated to sys ram
* @trigger: reason of migration
* @fault_page: is from vmf->page, svm_migrate_to_ram(), this is CPU page fault callback
*
@@ -770,6 +787,7 @@ out:
* 0 - OK, otherwise error code
*/
int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
+ unsigned long start_mgr, unsigned long last_mgr,
uint32_t trigger, struct page *fault_page)
{
struct kfd_node *node;
@@ -777,26 +795,33 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
unsigned long addr;
unsigned long start;
unsigned long end;
- unsigned long upages = 0;
+ unsigned long mpages = 0;
long r = 0;
+ /* this pragne has no any vram page to migrate to sys ram */
if (!prange->actual_loc) {
pr_debug("[0x%lx 0x%lx] already migrated to ram\n",
prange->start, prange->last);
return 0;
}
+ if (start_mgr < prange->start || last_mgr > prange->last) {
+ pr_debug("range [0x%lx 0x%lx] out prange [0x%lx 0x%lx]\n",
+ start_mgr, last_mgr, prange->start, prange->last);
+ return -EFAULT;
+ }
+
node = svm_range_get_node_by_id(prange, prange->actual_loc);
if (!node) {
pr_debug("failed to get kfd node by id 0x%x\n", prange->actual_loc);
return -ENODEV;
}
pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] from gpu 0x%x to ram\n",
- prange->svms, prange, prange->start, prange->last,
+ prange->svms, prange, start_mgr, last_mgr,
prange->actual_loc);
- start = prange->start << PAGE_SHIFT;
- end = (prange->last + 1) << PAGE_SHIFT;
+ start = start_mgr << PAGE_SHIFT;
+ end = (last_mgr + 1) << PAGE_SHIFT;
for (addr = start; addr < end;) {
unsigned long next;
@@ -815,14 +840,24 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
pr_debug("failed %ld to migrate prange %p\n", r, prange);
break;
} else {
- upages += r;
+ mpages += r;
}
addr = next;
}
- if (r >= 0 && !upages) {
- svm_range_vram_node_free(prange);
- prange->actual_loc = 0;
+ if (r >= 0) {
+ WARN_ONCE(prange->vram_pages < mpages,
+ "Recorded vram pages(0x%llx) should not be less than migration pages(0x%lx).",
+ prange->vram_pages, mpages);
+ prange->vram_pages -= mpages;
+
+ /* prange does not have vram page set its actual_loc to system
+ * and drop its svm_bo ref
+ */
+ if (prange->vram_pages == 0 && prange->ttm_res) {
+ prange->actual_loc = 0;
+ svm_range_vram_node_free(prange);
+ }
}
return r < 0 ? r : 0;
@@ -832,17 +867,23 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
* svm_migrate_vram_to_vram - migrate svm range from device to device
* @prange: range structure
* @best_loc: the device to migrate to
+ * @start: start page need be migrated to sys ram
+ * @last: last page need be migrated to sys ram
* @mm: process mm, use current->mm if NULL
* @trigger: reason of migration
*
* Context: Process context, caller hold mmap read lock, svms lock, prange lock
*
+ * migrate all vram pages in prange to sys ram, then migrate
+ * [start, last] pages from sys ram to gpu node best_loc.
+ *
* Return:
* 0 - OK, otherwise error code
*/
static int
svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc,
- struct mm_struct *mm, uint32_t trigger)
+ unsigned long start, unsigned long last,
+ struct mm_struct *mm, uint32_t trigger)
{
int r, retries = 3;
@@ -854,7 +895,8 @@ svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc,
pr_debug("from gpu 0x%x to gpu 0x%x\n", prange->actual_loc, best_loc);
do {
- r = svm_migrate_vram_to_ram(prange, mm, trigger, NULL);
+ r = svm_migrate_vram_to_ram(prange, mm, prange->start, prange->last,
+ trigger, NULL);
if (r)
return r;
} while (prange->actual_loc && --retries);
@@ -862,17 +904,21 @@ svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc,
if (prange->actual_loc)
return -EDEADLK;
- return svm_migrate_ram_to_vram(prange, best_loc, mm, trigger);
+ return svm_migrate_ram_to_vram(prange, best_loc, start, last, mm, trigger);
}
int
svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc,
+ unsigned long start, unsigned long last,
struct mm_struct *mm, uint32_t trigger)
{
- if (!prange->actual_loc)
- return svm_migrate_ram_to_vram(prange, best_loc, mm, trigger);
+ if (!prange->actual_loc || prange->actual_loc == best_loc)
+ return svm_migrate_ram_to_vram(prange, best_loc, start, last,
+ mm, trigger);
+
else
- return svm_migrate_vram_to_vram(prange, best_loc, mm, trigger);
+ return svm_migrate_vram_to_vram(prange, best_loc, start, last,
+ mm, trigger);
}
@@ -888,10 +934,9 @@ svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc,
*/
static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf)
{
+ unsigned long start, last, size;
unsigned long addr = vmf->address;
struct svm_range_bo *svm_bo;
- enum svm_work_list_ops op;
- struct svm_range *parent;
struct svm_range *prange;
struct kfd_process *p;
struct mm_struct *mm;
@@ -928,51 +973,31 @@ static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf)
mutex_lock(&p->svms.lock);
- prange = svm_range_from_addr(&p->svms, addr, &parent);
+ prange = svm_range_from_addr(&p->svms, addr, NULL);
if (!prange) {
pr_debug("failed get range svms 0x%p addr 0x%lx\n", &p->svms, addr);
r = -EFAULT;
goto out_unlock_svms;
}
- mutex_lock(&parent->migrate_mutex);
- if (prange != parent)
- mutex_lock_nested(&prange->migrate_mutex, 1);
+ mutex_lock(&prange->migrate_mutex);
if (!prange->actual_loc)
goto out_unlock_prange;
- svm_range_lock(parent);
- if (prange != parent)
- mutex_lock_nested(&prange->lock, 1);
- r = svm_range_split_by_granularity(p, mm, addr, parent, prange);
- if (prange != parent)
- mutex_unlock(&prange->lock);
- svm_range_unlock(parent);
- if (r) {
- pr_debug("failed %d to split range by granularity\n", r);
- goto out_unlock_prange;
- }
+ /* Align migration range start and size to granularity size */
+ size = 1UL << prange->granularity;
+ start = max(ALIGN_DOWN(addr, size), prange->start);
+ last = min(ALIGN(addr + 1, size) - 1, prange->last);
- r = svm_migrate_vram_to_ram(prange, vmf->vma->vm_mm,
- KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU,
- vmf->page);
+ r = svm_migrate_vram_to_ram(prange, vmf->vma->vm_mm, start, last,
+ KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU, vmf->page);
if (r)
pr_debug("failed %d migrate svms 0x%p range 0x%p [0x%lx 0x%lx]\n",
- r, prange->svms, prange, prange->start, prange->last);
-
- /* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */
- if (p->xnack_enabled && parent == prange)
- op = SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP;
- else
- op = SVM_OP_UPDATE_RANGE_NOTIFIER;
- svm_range_add_list_work(&p->svms, parent, mm, op);
- schedule_deferred_list_work(&p->svms);
+ r, prange->svms, prange, start, last);
out_unlock_prange:
- if (prange != parent)
- mutex_unlock(&prange->migrate_mutex);
- mutex_unlock(&parent->migrate_mutex);
+ mutex_unlock(&prange->migrate_mutex);
out_unlock_svms:
mutex_unlock(&p->svms.lock);
out_unref_process:
@@ -984,7 +1009,7 @@ out_mmput:
}
static const struct dev_pagemap_ops svm_migrate_pgmap_ops = {
- .page_free = svm_migrate_page_free,
+ .folio_free = svm_migrate_folio_free,
.migrate_to_ram = svm_migrate_to_ram,
};
@@ -1000,10 +1025,10 @@ int kgd2kfd_init_zone_device(struct amdgpu_device *adev)
void *r;
/* Page migration works on gfx9 or newer */
- if (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 0, 1))
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 0, 1))
return -EINVAL;
- if (adev->gmc.is_app_apu)
+ if (adev->apu_prefer_gtt)
return 0;
pgmap = &kfddev->pgmap;
@@ -1020,7 +1045,7 @@ int kgd2kfd_init_zone_device(struct amdgpu_device *adev)
} else {
res = devm_request_free_mem_region(adev->dev, &iomem_resource, size);
if (IS_ERR(res))
- return -ENOMEM;
+ return PTR_ERR(res);
pgmap->range.start = res->start;
pgmap->range.end = res->end;
pgmap->type = MEMORY_DEVICE_PRIVATE;
@@ -1036,10 +1061,10 @@ int kgd2kfd_init_zone_device(struct amdgpu_device *adev)
r = devm_memremap_pages(adev->dev, pgmap);
if (IS_ERR(r)) {
pr_err("failed to register HMM device memory\n");
- /* Disable SVM support capability */
- pgmap->type = 0;
if (pgmap->type == MEMORY_DEVICE_PRIVATE)
devm_release_mem_region(adev->dev, res->start, resource_size(res));
+ /* Disable SVM support capability */
+ pgmap->type = 0;
return PTR_ERR(r);
}