diff options
Diffstat (limited to 'include/linux/mempolicy.h')
| -rw-r--r-- | include/linux/mempolicy.h | 162 |
1 files changed, 76 insertions, 86 deletions
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 0d7df39a5885..0fe96f3ab3ef 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * NUMA memory policies for Linux. * Copyright 2003,2004 Andi Kleen SuSE Labs @@ -5,17 +6,20 @@ #ifndef _LINUX_MEMPOLICY_H #define _LINUX_MEMPOLICY_H 1 - +#include <linux/sched.h> #include <linux/mmzone.h> #include <linux/slab.h> #include <linux/rbtree.h> #include <linux/spinlock.h> +#include <linux/node.h> #include <linux/nodemask.h> #include <linux/pagemap.h> #include <uapi/linux/mempolicy.h> struct mm_struct; +#define NO_INTERLEAVE_INDEX (-1UL) /* use task il_prev for interleaving */ + #ifdef CONFIG_NUMA /* @@ -26,10 +30,10 @@ struct mm_struct; * the process policy is used. Interrupts ignore the memory policy * of the current process. * - * Locking policy for interlave: + * Locking policy for interleave: * In process context there is no locking because only the process accesses * its own state. All vma manipulation is somewhat protected by a down_read on - * mmap_sem. + * mmap_lock. * * Freeing policy: * Mempolicy objects are reference counted. A mempolicy will be freed when @@ -44,11 +48,9 @@ struct mempolicy { atomic_t refcnt; unsigned short mode; /* See MPOL_* above */ unsigned short flags; /* See set_mempolicy() MPOL_F_* above */ - union { - short preferred_node; /* preferred */ - nodemask_t nodes; /* interleave/bind */ - /* undefined for default */ - } v; + nodemask_t nodes; /* interleave/bind/preferred/etc */ + int home_node; /* Home node to use for MPOL_BIND and MPOL_PREFERRED_MANY */ + union { nodemask_t cpuset_mems_allowed; /* relative to these nodes */ nodemask_t user_nodemask; /* nodemask passed by user */ @@ -90,9 +92,6 @@ static inline struct mempolicy *mpol_dup(struct mempolicy *pol) return pol; } -#define vma_policy(vma) ((vma)->vm_policy) -#define vma_set_policy(vma, pol) ((vma)->vm_policy = (pol)) - static inline void mpol_get(struct mempolicy *pol) { if (pol) @@ -109,48 +108,44 @@ static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b) /* * Tree of shared policies for a shared memory region. - * Maintain the policies in a pseudo mm that contains vmas. The vmas - * carry the policy. As a special twist the pseudo mm is indexed in pages, not - * bytes, so that we can work with shared memory segments bigger than - * unsigned long. */ - +struct shared_policy { + struct rb_root root; + rwlock_t lock; +}; struct sp_node { struct rb_node nd; - unsigned long start, end; + pgoff_t start, end; struct mempolicy *policy; }; -struct shared_policy { - struct rb_root root; - spinlock_t lock; -}; - +int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst); void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol); -int mpol_set_shared_policy(struct shared_policy *info, - struct vm_area_struct *vma, - struct mempolicy *new); -void mpol_free_shared_policy(struct shared_policy *p); +int mpol_set_shared_policy(struct shared_policy *sp, + struct vm_area_struct *vma, struct mempolicy *mpol); +void mpol_free_shared_policy(struct shared_policy *sp); struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp, - unsigned long idx); + pgoff_t idx); -struct mempolicy *get_vma_policy(struct task_struct *tsk, - struct vm_area_struct *vma, unsigned long addr); +struct mempolicy *get_task_policy(struct task_struct *p); +struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, + unsigned long addr, pgoff_t *ilx); +struct mempolicy *get_vma_policy(struct vm_area_struct *vma, + unsigned long addr, int order, pgoff_t *ilx); +bool vma_policy_mof(struct vm_area_struct *vma); extern void numa_default_policy(void); extern void numa_policy_init(void); -extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new, - enum mpol_rebind_step step); +extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new); extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new); -extern void mpol_fix_fork_child_flag(struct task_struct *p); -extern struct zonelist *huge_zonelist(struct vm_area_struct *vma, +extern int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags, struct mempolicy **mpol, nodemask_t **nodemask); extern bool init_nodemask_of_mempolicy(nodemask_t *mask); -extern bool mempolicy_nodemask_intersects(struct task_struct *tsk, +extern bool mempolicy_in_oom_domain(struct task_struct *tsk, const nodemask_t *mask); -extern unsigned slab_node(void); +extern unsigned int mempolicy_slab_node(void); extern enum zone_type policy_zone; @@ -168,37 +163,40 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, extern int mpol_parse_str(char *str, struct mempolicy **mpol); #endif -extern int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol); +extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol); /* Check if a vma is migratable */ -static inline int vma_migratable(struct vm_area_struct *vma) +extern bool vma_migratable(struct vm_area_struct *vma); + +int mpol_misplaced(struct folio *folio, struct vm_fault *vmf, + unsigned long addr); +extern void mpol_put_task_policy(struct task_struct *); + +static inline bool mpol_is_preferred_many(struct mempolicy *pol) { - if (vma->vm_flags & (VM_IO | VM_HUGETLB | VM_PFNMAP)) - return 0; - /* - * Migration allocates pages in the highest zone. If we cannot - * do so then migration (at least from node to node) is not - * possible. - */ - if (vma->vm_file && - gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping)) - < policy_zone) - return 0; - return 1; + return (pol->mode == MPOL_PREFERRED_MANY); } -extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long); +extern bool apply_policy_zone(struct mempolicy *policy, enum zone_type zone); + +extern int mempolicy_set_node_perf(unsigned int node, + struct access_coordinate *coords); #else struct mempolicy {}; +static inline struct mempolicy *get_task_policy(struct task_struct *p) +{ + return NULL; +} + static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b) { return true; } -static inline void mpol_put(struct mempolicy *p) +static inline void mpol_put(struct mempolicy *pol) { } @@ -210,37 +208,35 @@ static inline void mpol_get(struct mempolicy *pol) { } -static inline struct mempolicy *mpol_dup(struct mempolicy *old) -{ - return NULL; -} - struct shared_policy {}; -static inline int mpol_set_shared_policy(struct shared_policy *info, - struct vm_area_struct *vma, - struct mempolicy *new) -{ - return -EINVAL; -} - static inline void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) { } -static inline void mpol_free_shared_policy(struct shared_policy *p) +static inline void mpol_free_shared_policy(struct shared_policy *sp) { } static inline struct mempolicy * -mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) +mpol_shared_policy_lookup(struct shared_policy *sp, pgoff_t idx) { return NULL; } -#define vma_policy(vma) NULL -#define vma_set_policy(vma, pol) do {} while(0) +static inline struct mempolicy *get_vma_policy(struct vm_area_struct *vma, + unsigned long addr, int order, pgoff_t *ilx) +{ + *ilx = 0; + return NULL; +} + +static inline int +vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) +{ + return 0; +} static inline void numa_policy_init(void) { @@ -251,8 +247,7 @@ static inline void numa_default_policy(void) } static inline void mpol_rebind_task(struct task_struct *tsk, - const nodemask_t *new, - enum mpol_rebind_step step) + const nodemask_t *new) { } @@ -260,17 +255,13 @@ static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) { } -static inline void mpol_fix_fork_child_flag(struct task_struct *p) -{ -} - -static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma, +static inline int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags, struct mempolicy **mpol, nodemask_t **nodemask) { *mpol = NULL; *nodemask = NULL; - return node_zonelist(0, gfp_flags); + return 0; } static inline bool init_nodemask_of_mempolicy(nodemask_t *m) @@ -278,12 +269,6 @@ static inline bool init_nodemask_of_mempolicy(nodemask_t *m) return false; } -static inline bool mempolicy_nodemask_intersects(struct task_struct *tsk, - const nodemask_t *mask) -{ - return false; -} - static inline int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, const nodemask_t *to, int flags) { @@ -301,15 +286,20 @@ static inline int mpol_parse_str(char *str, struct mempolicy **mpol) } #endif -static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol) +static inline int mpol_misplaced(struct folio *folio, + struct vm_fault *vmf, + unsigned long address) { - return 0; + return -1; /* no node preference */ } -static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma, - unsigned long address) +static inline void mpol_put_task_policy(struct task_struct *task) { - return -1; /* no node preference */ +} + +static inline bool mpol_is_preferred_many(struct mempolicy *pol) +{ + return false; } #endif /* CONFIG_NUMA */ |
