diff options
Diffstat (limited to 'include/linux/migrate.h')
| -rw-r--r-- | include/linux/migrate.h | 246 |
1 files changed, 159 insertions, 87 deletions
diff --git a/include/linux/migrate.h b/include/linux/migrate.h index a405d3dc0f61..26ca00c325d9 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -1,126 +1,198 @@ +/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_MIGRATE_H #define _LINUX_MIGRATE_H #include <linux/mm.h> #include <linux/mempolicy.h> #include <linux/migrate_mode.h> - -typedef struct page *new_page_t(struct page *, unsigned long private, int **); - -/* - * Return values from addresss_space_operations.migratepage(): - * - negative errno on page migration failure; - * - zero on page migration success; +#include <linux/hugetlb.h> + +typedef struct folio *new_folio_t(struct folio *folio, unsigned long private); +typedef void free_folio_t(struct folio *folio, unsigned long private); + +struct migration_target_control; + +/** + * struct movable_operations - Driver page migration + * @isolate_page: + * The VM calls this function to prepare the page to be moved. The page + * is locked and the driver should not unlock it. The driver should + * return ``true`` if the page is movable and ``false`` if it is not + * currently movable. After this function returns, the VM uses the + * page->lru field, so the driver must preserve any information which + * is usually stored here. + * + * @migrate_page: + * After isolation, the VM calls this function with the isolated + * @src page. The driver should copy the contents of the + * @src page to the @dst page and set up the fields of @dst page. + * Both pages are locked. + * If page migration is successful, the driver should return 0. + * If the driver cannot migrate the page at the moment, it can return + * -EAGAIN. The VM interprets this as a temporary migration failure and + * will retry it later. Any other error value is a permanent migration + * failure and migration will not be retried. + * The driver shouldn't touch the @src->lru field while in the + * migrate_page() function. It may write to @dst->lru. * - * The balloon page migration introduces this special case where a 'distinct' - * return code is used to flag a successful page migration to unmap_and_move(). - * This approach is necessary because page migration can race against balloon - * deflation procedure, and for such case we could introduce a nasty page leak - * if a successfully migrated balloon page gets released concurrently with - * migration's unmap_and_move() wrap-up steps. + * @putback_page: + * If migration fails on the isolated page, the VM informs the driver + * that the page is no longer a candidate for migration by calling + * this function. The driver should put the isolated page back into + * its own data structure. */ -#define MIGRATEPAGE_SUCCESS 0 -#define MIGRATEPAGE_BALLOON_SUCCESS 1 /* special ret code for balloon page - * sucessful migration case. - */ -enum migrate_reason { - MR_COMPACTION, - MR_MEMORY_FAILURE, - MR_MEMORY_HOTPLUG, - MR_SYSCALL, /* also applies to cpusets */ - MR_MEMPOLICY_MBIND, - MR_NUMA_MISPLACED, - MR_CMA +struct movable_operations { + bool (*isolate_page)(struct page *, isolate_mode_t); + int (*migrate_page)(struct page *dst, struct page *src, + enum migrate_mode); + void (*putback_page)(struct page *); }; +/* Defined in mm/debug.c: */ +extern const char *migrate_reason_names[MR_TYPES]; + #ifdef CONFIG_MIGRATION -extern void putback_lru_pages(struct list_head *l); -extern void putback_movable_pages(struct list_head *l); -extern int migrate_page(struct address_space *, - struct page *, struct page *, enum migrate_mode); -extern int migrate_pages(struct list_head *l, new_page_t x, - unsigned long private, enum migrate_mode mode, int reason); -extern int migrate_huge_page(struct page *, new_page_t x, - unsigned long private, enum migrate_mode mode); - -extern int fail_migrate_page(struct address_space *, - struct page *, struct page *); - -extern int migrate_prep(void); -extern int migrate_prep_local(void); -extern int migrate_vmas(struct mm_struct *mm, - const nodemask_t *from, const nodemask_t *to, - unsigned long flags); -extern void migrate_page_copy(struct page *newpage, struct page *page); -extern int migrate_huge_page_move_mapping(struct address_space *mapping, - struct page *newpage, struct page *page); +void putback_movable_pages(struct list_head *l); +int migrate_folio(struct address_space *mapping, struct folio *dst, + struct folio *src, enum migrate_mode mode); +int migrate_pages(struct list_head *l, new_folio_t new, free_folio_t free, + unsigned long private, enum migrate_mode mode, int reason, + unsigned int *ret_succeeded); +struct folio *alloc_migration_target(struct folio *src, unsigned long private); +bool isolate_movable_ops_page(struct page *page, isolate_mode_t mode); +bool isolate_folio_to_list(struct folio *folio, struct list_head *list); + +int migrate_huge_page_move_mapping(struct address_space *mapping, + struct folio *dst, struct folio *src); +void migration_entry_wait_on_locked(softleaf_t entry, spinlock_t *ptl) + __releases(ptl); +void folio_migrate_flags(struct folio *newfolio, struct folio *folio); +int folio_migrate_mapping(struct address_space *mapping, + struct folio *newfolio, struct folio *folio, int extra_count); +int set_movable_ops(const struct movable_operations *ops, enum pagetype type); + #else -static inline void putback_lru_pages(struct list_head *l) {} static inline void putback_movable_pages(struct list_head *l) {} -static inline int migrate_pages(struct list_head *l, new_page_t x, - unsigned long private, enum migrate_mode mode, int reason) +static inline int migrate_pages(struct list_head *l, new_folio_t new, + free_folio_t free, unsigned long private, + enum migrate_mode mode, int reason, unsigned int *ret_succeeded) { return -ENOSYS; } -static inline int migrate_huge_page(struct page *page, new_page_t x, - unsigned long private, enum migrate_mode mode) - { return -ENOSYS; } - -static inline int migrate_prep(void) { return -ENOSYS; } -static inline int migrate_prep_local(void) { return -ENOSYS; } +static inline struct folio *alloc_migration_target(struct folio *src, + unsigned long private) + { return NULL; } +static inline bool isolate_movable_ops_page(struct page *page, isolate_mode_t mode) + { return false; } +static inline bool isolate_folio_to_list(struct folio *folio, struct list_head *list) + { return false; } -static inline int migrate_vmas(struct mm_struct *mm, - const nodemask_t *from, const nodemask_t *to, - unsigned long flags) +static inline int migrate_huge_page_move_mapping(struct address_space *mapping, + struct folio *dst, struct folio *src) { return -ENOSYS; } - -static inline void migrate_page_copy(struct page *newpage, - struct page *page) {} - -static inline int migrate_huge_page_move_mapping(struct address_space *mapping, - struct page *newpage, struct page *page) +static inline int set_movable_ops(const struct movable_operations *ops, enum pagetype type) { return -ENOSYS; } -/* Possible settings for the migrate_page() method in address_operations */ -#define migrate_page NULL -#define fail_migrate_page NULL - #endif /* CONFIG_MIGRATION */ #ifdef CONFIG_NUMA_BALANCING -extern int migrate_misplaced_page(struct page *page, int node); -extern int migrate_misplaced_page(struct page *page, int node); -extern bool migrate_ratelimited(int node); +int migrate_misplaced_folio_prepare(struct folio *folio, + struct vm_area_struct *vma, int node); +int migrate_misplaced_folio(struct folio *folio, int node); #else -static inline int migrate_misplaced_page(struct page *page, int node) +static inline int migrate_misplaced_folio_prepare(struct folio *folio, + struct vm_area_struct *vma, int node) { return -EAGAIN; /* can't migrate now */ } -static inline bool migrate_ratelimited(int node) +static inline int migrate_misplaced_folio(struct folio *folio, int node) { - return false; + return -EAGAIN; /* can't migrate now */ } #endif /* CONFIG_NUMA_BALANCING */ -#if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE) -extern int migrate_misplaced_transhuge_page(struct mm_struct *mm, - struct vm_area_struct *vma, - pmd_t *pmd, pmd_t entry, - unsigned long address, - struct page *page, int node); -#else -static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm, - struct vm_area_struct *vma, - pmd_t *pmd, pmd_t entry, - unsigned long address, - struct page *page, int node) +#ifdef CONFIG_MIGRATION + +/* + * Watch out for PAE architecture, which has an unsigned long, and might not + * have enough bits to store all physical address and flags. So far we have + * enough room for all our flags. + */ +#define MIGRATE_PFN_VALID (1UL << 0) +#define MIGRATE_PFN_MIGRATE (1UL << 1) +#define MIGRATE_PFN_WRITE (1UL << 3) +#define MIGRATE_PFN_COMPOUND (1UL << 4) +#define MIGRATE_PFN_SHIFT 6 + +static inline struct page *migrate_pfn_to_page(unsigned long mpfn) +{ + if (!(mpfn & MIGRATE_PFN_VALID)) + return NULL; + return pfn_to_page(mpfn >> MIGRATE_PFN_SHIFT); +} + +static inline unsigned long migrate_pfn(unsigned long pfn) { - return -EAGAIN; + return (pfn << MIGRATE_PFN_SHIFT) | MIGRATE_PFN_VALID; } -#endif /* CONFIG_NUMA_BALANCING && CONFIG_TRANSPARENT_HUGEPAGE*/ + +enum migrate_vma_direction { + MIGRATE_VMA_SELECT_SYSTEM = 1 << 0, + MIGRATE_VMA_SELECT_DEVICE_PRIVATE = 1 << 1, + MIGRATE_VMA_SELECT_DEVICE_COHERENT = 1 << 2, + MIGRATE_VMA_SELECT_COMPOUND = 1 << 3, +}; + +struct migrate_vma { + struct vm_area_struct *vma; + /* + * Both src and dst array must be big enough for + * (end - start) >> PAGE_SHIFT entries. + * + * The src array must not be modified by the caller after + * migrate_vma_setup(), and must not change the dst array after + * migrate_vma_pages() returns. + */ + unsigned long *dst; + unsigned long *src; + unsigned long cpages; + unsigned long npages; + unsigned long start; + unsigned long end; + + /* + * Set to the owner value also stored in page_pgmap(page)->owner + * for migrating out of device private memory. The flags also need to + * be set to MIGRATE_VMA_SELECT_DEVICE_PRIVATE. + * The caller should always set this field when using mmu notifier + * callbacks to avoid device MMU invalidations for device private + * pages that are not being migrated. + */ + void *pgmap_owner; + unsigned long flags; + + /* + * Set to vmf->page if this is being called to migrate a page as part of + * a migrate_to_ram() callback. + */ + struct page *fault_page; +}; + +int migrate_vma_setup(struct migrate_vma *args); +void migrate_vma_pages(struct migrate_vma *migrate); +void migrate_vma_finalize(struct migrate_vma *migrate); +int migrate_device_range(unsigned long *src_pfns, unsigned long start, + unsigned long npages); +int migrate_device_pfns(unsigned long *src_pfns, unsigned long npages); +void migrate_device_pages(unsigned long *src_pfns, unsigned long *dst_pfns, + unsigned long npages); +void migrate_device_finalize(unsigned long *src_pfns, + unsigned long *dst_pfns, unsigned long npages); + +#endif /* CONFIG_MIGRATION */ #endif /* _LINUX_MIGRATE_H */ |
