summaryrefslogtreecommitdiff
path: root/mm/migrate.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-08-05 13:28:50 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-08-05 13:28:50 -0700
commitfffe3ae0ee84e25d2befe2ae59bc32aa2b6bc77b (patch)
tree80db9b520298091787d70772530f51b90afb2709 /mm/migrate.c
parent8f7be6291529011a58856bf178f52ed5751c68ac (diff)
parent7d17e83abec1be3355260b3e4812044c65c32907 (diff)
Merge tag 'for-linus-hmm' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull hmm updates from Jason Gunthorpe: "Ralph has been working on nouveau's use of hmm_range_fault() and migrate_vma() which resulted in this small series. It adds reporting of the page table order from hmm_range_fault() and some optimization of migrate_vma(): - Report the size of the page table mapping out of hmm_range_fault(). This makes it easier to establish a large/huge/etc mapping in the device's page table. - Allow devices to ignore the invalidations during migration in cases where the migration is not going to change pages. For instance migrating pages to a device does not require the device to invalidate pages already in the device. - Update nouveau and hmm_tests to use the above" * tag 'for-linus-hmm' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: mm/hmm/test: use the new migration invalidation nouveau/svm: use the new migration invalidation mm/notifier: add migration invalidation type mm/migrate: add a flags parameter to migrate_vma nouveau: fix storing invalid ptes nouveau/hmm: support mapping large sysmem pages nouveau: fix mapping 2MB sysmem pages nouveau/hmm: fault one page at a time mm/hmm: add tests for hmm_pfn_to_map_order() mm/hmm: provide the page mapping order in hmm_range_fault()
Diffstat (limited to 'mm/migrate.c')
-rw-r--r--mm/migrate.c14
1 files changed, 11 insertions, 3 deletions
diff --git a/mm/migrate.c b/mm/migrate.c
index 40cd7016ae6f..4fcc465736ff 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -2276,7 +2276,9 @@ again:
goto next;
page = device_private_entry_to_page(entry);
- if (page->pgmap->owner != migrate->src_owner)
+ if (!(migrate->flags &
+ MIGRATE_VMA_SELECT_DEVICE_PRIVATE) ||
+ page->pgmap->owner != migrate->pgmap_owner)
goto next;
mpfn = migrate_pfn(page_to_pfn(page)) |
@@ -2284,7 +2286,7 @@ again:
if (is_write_device_private_entry(entry))
mpfn |= MIGRATE_PFN_WRITE;
} else {
- if (migrate->src_owner)
+ if (!(migrate->flags & MIGRATE_VMA_SELECT_SYSTEM))
goto next;
pfn = pte_pfn(pte);
if (is_zero_pfn(pfn)) {
@@ -2379,8 +2381,14 @@ static void migrate_vma_collect(struct migrate_vma *migrate)
{
struct mmu_notifier_range range;
- mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL,
+ /*
+ * Note that the pgmap_owner is passed to the mmu notifier callback so
+ * that the registered device driver can skip invalidating device
+ * private page mappings that won't be migrated.
+ */
+ mmu_notifier_range_init(&range, MMU_NOTIFY_MIGRATE, 0, migrate->vma,
migrate->vma->vm_mm, migrate->start, migrate->end);
+ range.migrate_pgmap_owner = migrate->pgmap_owner;
mmu_notifier_invalidate_range_start(&range);
walk_page_range(migrate->vma->vm_mm, migrate->start, migrate->end,