summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/dmar.h14
-rw-r--r--include/linux/hugetlb.h120
-rw-r--r--include/linux/kasan-checks.h43
-rw-r--r--include/linux/kasan.h7
-rw-r--r--include/linux/memcontrol.h24
-rw-r--r--include/linux/mm.h57
-rw-r--r--include/linux/mm_types.h2
-rw-r--r--include/linux/oom.h1
-rw-r--r--include/linux/page-flags.h6
-rw-r--r--include/linux/page-isolation.h2
-rw-r--r--include/linux/page_ext.h1
-rw-r--r--include/linux/pagemap.h6
-rw-r--r--include/linux/pfn_t.h7
-rw-r--r--include/linux/slab.h16
-rw-r--r--include/linux/swap.h18
-rw-r--r--include/linux/vmalloc.h2
-rw-r--r--include/linux/vmpressure.h2
17 files changed, 232 insertions, 96 deletions
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index 28813c6f44b6..a7cf3599d9a1 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -92,12 +92,14 @@ static inline bool dmar_rcu_check(void)
#define dmar_rcu_dereference(p) rcu_dereference_check((p), dmar_rcu_check())
-#define for_each_dev_scope(a, c, p, d) \
- for ((p) = 0; ((d) = (p) < (c) ? dmar_rcu_dereference((a)[(p)].dev) : \
- NULL, (p) < (c)); (p)++)
-
-#define for_each_active_dev_scope(a, c, p, d) \
- for_each_dev_scope((a), (c), (p), (d)) if (!(d)) { continue; } else
+#define for_each_dev_scope(devs, cnt, i, tmp) \
+ for ((i) = 0; ((tmp) = (i) < (cnt) ? \
+ dmar_rcu_dereference((devs)[(i)].dev) : NULL, (i) < (cnt)); \
+ (i)++)
+
+#define for_each_active_dev_scope(devs, cnt, i, tmp) \
+ for_each_dev_scope((devs), (cnt), (i), (tmp)) \
+ if (!(tmp)) { continue; } else
extern int dmar_table_init(void);
extern int dmar_dev_scope_init(void);
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index edf476c8cfb9..edfca4278319 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -16,29 +16,11 @@ struct user_struct;
struct mmu_gather;
#ifndef is_hugepd
-/*
- * Some architectures requires a hugepage directory format that is
- * required to support multiple hugepage sizes. For example
- * a4fe3ce76 "powerpc/mm: Allow more flexible layouts for hugepage pagetables"
- * introduced the same on powerpc. This allows for a more flexible hugepage
- * pagetable layout.
- */
typedef struct { unsigned long pd; } hugepd_t;
#define is_hugepd(hugepd) (0)
#define __hugepd(x) ((hugepd_t) { (x) })
-static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
- unsigned pdshift, unsigned long end,
- int write, struct page **pages, int *nr)
-{
- return 0;
-}
-#else
-extern int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
- unsigned pdshift, unsigned long end,
- int write, struct page **pages, int *nr);
#endif
-
#ifdef CONFIG_HUGETLB_PAGE
#include <linux/mempolicy.h>
@@ -608,22 +590,92 @@ static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
#else /* CONFIG_HUGETLB_PAGE */
struct hstate {};
-#define alloc_huge_page(v, a, r) NULL
-#define alloc_huge_page_node(h, nid) NULL
-#define alloc_huge_page_nodemask(h, preferred_nid, nmask) NULL
-#define alloc_huge_page_vma(h, vma, address) NULL
-#define alloc_bootmem_huge_page(h) NULL
-#define hstate_file(f) NULL
-#define hstate_sizelog(s) NULL
-#define hstate_vma(v) NULL
-#define hstate_inode(i) NULL
-#define page_hstate(page) NULL
-#define huge_page_size(h) PAGE_SIZE
-#define huge_page_mask(h) PAGE_MASK
-#define vma_kernel_pagesize(v) PAGE_SIZE
-#define vma_mmu_pagesize(v) PAGE_SIZE
-#define huge_page_order(h) 0
-#define huge_page_shift(h) PAGE_SHIFT
+
+static inline struct page *alloc_huge_page(struct vm_area_struct *vma,
+ unsigned long addr,
+ int avoid_reserve)
+{
+ return NULL;
+}
+
+static inline struct page *alloc_huge_page_node(struct hstate *h, int nid)
+{
+ return NULL;
+}
+
+static inline struct page *
+alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, nodemask_t *nmask)
+{
+ return NULL;
+}
+
+static inline struct page *alloc_huge_page_vma(struct hstate *h,
+ struct vm_area_struct *vma,
+ unsigned long address)
+{
+ return NULL;
+}
+
+static inline int __alloc_bootmem_huge_page(struct hstate *h)
+{
+ return 0;
+}
+
+static inline struct hstate *hstate_file(struct file *f)
+{
+ return NULL;
+}
+
+static inline struct hstate *hstate_sizelog(int page_size_log)
+{
+ return NULL;
+}
+
+static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
+{
+ return NULL;
+}
+
+static inline struct hstate *hstate_inode(struct inode *i)
+{
+ return NULL;
+}
+
+static inline struct hstate *page_hstate(struct page *page)
+{
+ return NULL;
+}
+
+static inline unsigned long huge_page_size(struct hstate *h)
+{
+ return PAGE_SIZE;
+}
+
+static inline unsigned long huge_page_mask(struct hstate *h)
+{
+ return PAGE_MASK;
+}
+
+static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
+{
+ return PAGE_SIZE;
+}
+
+static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
+{
+ return PAGE_SIZE;
+}
+
+static inline unsigned int huge_page_order(struct hstate *h)
+{
+ return 0;
+}
+
+static inline unsigned int huge_page_shift(struct hstate *h)
+{
+ return PAGE_SHIFT;
+}
+
static inline bool hstate_is_gigantic(struct hstate *h)
{
return false;
diff --git a/include/linux/kasan-checks.h b/include/linux/kasan-checks.h
index a61dc075e2ce..ac6aba632f2d 100644
--- a/include/linux/kasan-checks.h
+++ b/include/linux/kasan-checks.h
@@ -2,14 +2,43 @@
#ifndef _LINUX_KASAN_CHECKS_H
#define _LINUX_KASAN_CHECKS_H
-#if defined(__SANITIZE_ADDRESS__) || defined(__KASAN_INTERNAL)
-void kasan_check_read(const volatile void *p, unsigned int size);
-void kasan_check_write(const volatile void *p, unsigned int size);
+#include <linux/types.h>
+
+/*
+ * __kasan_check_*: Always available when KASAN is enabled. This may be used
+ * even in compilation units that selectively disable KASAN, but must use KASAN
+ * to validate access to an address. Never use these in header files!
+ */
+#ifdef CONFIG_KASAN
+bool __kasan_check_read(const volatile void *p, unsigned int size);
+bool __kasan_check_write(const volatile void *p, unsigned int size);
+#else
+static inline bool __kasan_check_read(const volatile void *p, unsigned int size)
+{
+ return true;
+}
+static inline bool __kasan_check_write(const volatile void *p, unsigned int size)
+{
+ return true;
+}
+#endif
+
+/*
+ * kasan_check_*: Only available when the particular compilation unit has KASAN
+ * instrumentation enabled. May be used in header files.
+ */
+#ifdef __SANITIZE_ADDRESS__
+#define kasan_check_read __kasan_check_read
+#define kasan_check_write __kasan_check_write
#else
-static inline void kasan_check_read(const volatile void *p, unsigned int size)
-{ }
-static inline void kasan_check_write(const volatile void *p, unsigned int size)
-{ }
+static inline bool kasan_check_read(const volatile void *p, unsigned int size)
+{
+ return true;
+}
+static inline bool kasan_check_write(const volatile void *p, unsigned int size)
+{
+ return true;
+}
#endif
#endif
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index b40ea104dd36..cc8a03cc9674 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -76,8 +76,11 @@ void kasan_free_shadow(const struct vm_struct *vm);
int kasan_add_zero_shadow(void *start, unsigned long size);
void kasan_remove_zero_shadow(void *start, unsigned long size);
-size_t ksize(const void *);
-static inline void kasan_unpoison_slab(const void *ptr) { ksize(ptr); }
+size_t __ksize(const void *);
+static inline void kasan_unpoison_slab(const void *ptr)
+{
+ kasan_unpoison_shadow(ptr, __ksize(ptr));
+}
size_t kasan_metadata_size(struct kmem_cache *cache);
bool kasan_save_enable_multi_shot(void);
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 1dcb763bb610..44c41462be33 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -233,8 +233,9 @@ struct mem_cgroup {
/* OOM-Killer disable */
int oom_kill_disable;
- /* memory.events */
+ /* memory.events and memory.events.local */
struct cgroup_file events_file;
+ struct cgroup_file events_local_file;
/* handle for "memory.swap.events" */
struct cgroup_file swap_events_file;
@@ -281,6 +282,7 @@ struct mem_cgroup {
/* memory.events */
atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS];
+ atomic_long_t memory_events_local[MEMCG_NR_MEMORY_EVENTS];
unsigned long socket_pressure;
@@ -392,7 +394,6 @@ out:
struct lruvec *mem_cgroup_page_lruvec(struct page *, struct pglist_data *);
-bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg);
struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
@@ -747,6 +748,9 @@ static inline void count_memcg_event_mm(struct mm_struct *mm,
static inline void memcg_memory_event(struct mem_cgroup *memcg,
enum memcg_memory_event event)
{
+ atomic_long_inc(&memcg->memory_events_local[event]);
+ cgroup_file_notify(&memcg->events_local_file);
+
do {
atomic_long_inc(&memcg->memory_events[event]);
cgroup_file_notify(&memcg->events_file);
@@ -870,12 +874,6 @@ static inline bool mm_match_cgroup(struct mm_struct *mm,
return true;
}
-static inline bool task_in_mem_cgroup(struct task_struct *task,
- const struct mem_cgroup *memcg)
-{
- return true;
-}
-
static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
{
return NULL;
@@ -1273,6 +1271,8 @@ int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order);
void __memcg_kmem_uncharge(struct page *page, int order);
int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
struct mem_cgroup *memcg);
+void __memcg_kmem_uncharge_memcg(struct mem_cgroup *memcg,
+ unsigned int nr_pages);
extern struct static_key_false memcg_kmem_enabled_key;
extern struct workqueue_struct *memcg_kmem_cache_wq;
@@ -1314,6 +1314,14 @@ static inline int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp,
return __memcg_kmem_charge_memcg(page, gfp, order, memcg);
return 0;
}
+
+static inline void memcg_kmem_uncharge_memcg(struct page *page, int order,
+ struct mem_cgroup *memcg)
+{
+ if (memcg_kmem_enabled())
+ __memcg_kmem_uncharge_memcg(memcg, 1 << order);
+}
+
/*
* helper for accessing a memcg's index. It will be used as an index in the
* child cache array in kmem_cache, and also to derive its name. This function
diff --git a/include/linux/mm.h b/include/linux/mm.h
index dd0b5f4e1e45..f88f0eabcc5e 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -633,6 +633,11 @@ static inline bool is_vmalloc_addr(const void *x)
return false;
#endif
}
+
+#ifndef is_ioremap_addr
+#define is_ioremap_addr(x) is_vmalloc_addr(x)
+#endif
+
#ifdef CONFIG_MMU
extern int is_vmalloc_or_module_addr(const void *x);
#else
@@ -2681,8 +2686,7 @@ static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags)
return 0;
}
-typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
- void *data);
+typedef int (*pte_fn_t)(pte_t *pte, unsigned long addr, void *data);
extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
unsigned long size, pte_fn_t fn, void *data);
@@ -2696,11 +2700,42 @@ static inline void kernel_poison_pages(struct page *page, int numpages,
int enable) { }
#endif
-extern bool _debug_pagealloc_enabled;
+#ifdef CONFIG_INIT_ON_ALLOC_DEFAULT_ON
+DECLARE_STATIC_KEY_TRUE(init_on_alloc);
+#else
+DECLARE_STATIC_KEY_FALSE(init_on_alloc);
+#endif
+static inline bool want_init_on_alloc(gfp_t flags)
+{
+ if (static_branch_unlikely(&init_on_alloc) &&
+ !page_poisoning_enabled())
+ return true;
+ return flags & __GFP_ZERO;
+}
+
+#ifdef CONFIG_INIT_ON_FREE_DEFAULT_ON
+DECLARE_STATIC_KEY_TRUE(init_on_free);
+#else
+DECLARE_STATIC_KEY_FALSE(init_on_free);
+#endif
+static inline bool want_init_on_free(void)
+{
+ return static_branch_unlikely(&init_on_free) &&
+ !page_poisoning_enabled();
+}
+
+#ifdef CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT
+DECLARE_STATIC_KEY_TRUE(_debug_pagealloc_enabled);
+#else
+DECLARE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
+#endif
static inline bool debug_pagealloc_enabled(void)
{
- return IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) && _debug_pagealloc_enabled;
+ if (!IS_ENABLED(CONFIG_DEBUG_PAGEALLOC))
+ return false;
+
+ return static_branch_unlikely(&_debug_pagealloc_enabled);
}
#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_ARCH_HAS_SET_DIRECT_MAP)
@@ -2850,11 +2885,9 @@ extern long copy_huge_page_from_user(struct page *dst_page,
bool allow_pagefault);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
-extern struct page_ext_operations debug_guardpage_ops;
-
#ifdef CONFIG_DEBUG_PAGEALLOC
extern unsigned int _debug_guardpage_minorder;
-extern bool _debug_guardpage_enabled;
+DECLARE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
static inline unsigned int debug_guardpage_minorder(void)
{
@@ -2863,21 +2896,15 @@ static inline unsigned int debug_guardpage_minorder(void)
static inline bool debug_guardpage_enabled(void)
{
- return _debug_guardpage_enabled;
+ return static_branch_unlikely(&_debug_guardpage_enabled);
}
static inline bool page_is_guard(struct page *page)
{
- struct page_ext *page_ext;
-
if (!debug_guardpage_enabled())
return false;
- page_ext = lookup_page_ext(page);
- if (unlikely(!page_ext))
- return false;
-
- return test_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
+ return PageGuard(page);
}
#else
static inline unsigned int debug_guardpage_minorder(void) { return 0; }
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 8ec38b11b361..1d1093474c1a 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -329,7 +329,9 @@ struct vm_area_struct {
struct file * vm_file; /* File we map to (can be NULL). */
void * vm_private_data; /* was vm_pte (shared mem) */
+#ifdef CONFIG_SWAP
atomic_long_t swap_readahead_info;
+#endif
#ifndef CONFIG_MMU
struct vm_region *vm_region; /* NOMMU mapping region */
#endif
diff --git a/include/linux/oom.h b/include/linux/oom.h
index d07992009265..c696c265f019 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -108,7 +108,6 @@ static inline vm_fault_t check_stable_address_space(struct mm_struct *mm)
bool __oom_reap_task_mm(struct mm_struct *mm);
extern unsigned long oom_badness(struct task_struct *p,
- struct mem_cgroup *memcg, const nodemask_t *nodemask,
unsigned long totalpages);
extern bool out_of_memory(struct oom_control *oc);
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 9f8712a4b1a5..b848517da64c 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -703,6 +703,7 @@ PAGEFLAG_FALSE(DoubleMap)
#define PG_offline 0x00000100
#define PG_kmemcg 0x00000200
#define PG_table 0x00000400
+#define PG_guard 0x00000800
#define PageType(page, flag) \
((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
@@ -754,6 +755,11 @@ PAGE_TYPE_OPS(Kmemcg, kmemcg)
*/
PAGE_TYPE_OPS(Table, table)
+/*
+ * Marks guardpages used with debug_pagealloc.
+ */
+PAGE_TYPE_OPS(Guard, guard)
+
extern bool is_free_buddy_page(struct page *page);
__PAGEFLAG(Isolated, isolated, PF_ANY);
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index 280ae96dc4c3..1099c2fee20f 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -50,7 +50,7 @@ start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
* Changes MIGRATE_ISOLATE to MIGRATE_MOVABLE.
* target range is [start_pfn, end_pfn)
*/
-int
+void
undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
unsigned migratetype);
diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h
index f84f167ec04c..09592951725c 100644
--- a/include/linux/page_ext.h
+++ b/include/linux/page_ext.h
@@ -17,7 +17,6 @@ struct page_ext_operations {
#ifdef CONFIG_PAGE_EXTENSION
enum page_ext_flags {
- PAGE_EXT_DEBUG_GUARD,
PAGE_EXT_OWNER,
#if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT)
PAGE_EXT_YOUNG,
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index fe0b29bf2df7..c7552459a15f 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -383,8 +383,7 @@ extern int read_cache_pages(struct address_space *mapping,
static inline struct page *read_mapping_page(struct address_space *mapping,
pgoff_t index, void *data)
{
- filler_t *filler = (filler_t *)mapping->a_ops->readpage;
- return read_cache_page(mapping, index, filler, data);
+ return read_cache_page(mapping, index, NULL, data);
}
/*
@@ -452,6 +451,9 @@ extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
unsigned int flags);
extern void unlock_page(struct page *page);
+/*
+ * Return true if the page was successfully locked
+ */
static inline int trylock_page(struct page *page)
{
page = compound_head(page);
diff --git a/include/linux/pfn_t.h b/include/linux/pfn_t.h
index 3c202a11a79e..01e8037023f7 100644
--- a/include/linux/pfn_t.h
+++ b/include/linux/pfn_t.h
@@ -66,13 +66,6 @@ static inline phys_addr_t pfn_t_to_phys(pfn_t pfn)
return PFN_PHYS(pfn_t_to_pfn(pfn));
}
-static inline void *pfn_t_to_virt(pfn_t pfn)
-{
- if (pfn_t_has_page(pfn) && !is_device_private_page(pfn_t_to_page(pfn)))
- return __va(pfn_t_to_phys(pfn));
- return NULL;
-}
-
static inline pfn_t page_to_pfn_t(struct page *page)
{
return pfn_to_pfn_t(page_to_pfn(page));
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 9449b19c5f10..56c9c7eed34e 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -16,6 +16,7 @@
#include <linux/overflow.h>
#include <linux/types.h>
#include <linux/workqueue.h>
+#include <linux/percpu-refcount.h>
/*
@@ -115,6 +116,10 @@
/* Objects are reclaimable */
#define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000U)
#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
+
+/* Slab deactivation flag */
+#define SLAB_DEACTIVATED ((slab_flags_t __force)0x10000000U)
+
/*
* ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
*
@@ -151,8 +156,7 @@ void kmem_cache_destroy(struct kmem_cache *);
int kmem_cache_shrink(struct kmem_cache *);
void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *);
-void memcg_deactivate_kmem_caches(struct mem_cgroup *);
-void memcg_destroy_kmem_caches(struct mem_cgroup *);
+void memcg_deactivate_kmem_caches(struct mem_cgroup *, struct mem_cgroup *);
/*
* Please use this macro to create slab caches. Simply specify the
@@ -184,6 +188,7 @@ void * __must_check __krealloc(const void *, size_t, gfp_t);
void * __must_check krealloc(const void *, size_t, gfp_t);
void kfree(const void *);
void kzfree(const void *);
+size_t __ksize(const void *);
size_t ksize(const void *);
#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
@@ -641,11 +646,12 @@ struct memcg_cache_params {
struct mem_cgroup *memcg;
struct list_head children_node;
struct list_head kmem_caches_node;
+ struct percpu_ref refcnt;
- void (*deact_fn)(struct kmem_cache *);
+ void (*work_fn)(struct kmem_cache *);
union {
- struct rcu_head deact_rcu_head;
- struct work_struct deact_work;
+ struct rcu_head rcu_head;
+ struct work_struct work;
};
};
};
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 4bfb5c4ac108..de2c67a33b7e 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -148,7 +148,7 @@ struct zone;
* We always assume that blocks are of size PAGE_SIZE.
*/
struct swap_extent {
- struct list_head list;
+ struct rb_node rb_node;
pgoff_t start_page;
pgoff_t nr_pages;
sector_t start_block;
@@ -175,8 +175,9 @@ enum {
SWP_PAGE_DISCARD = (1 << 10), /* freed swap page-cluster discards */
SWP_STABLE_WRITES = (1 << 11), /* no overwrite PG_writeback pages */
SWP_SYNCHRONOUS_IO = (1 << 12), /* synchronous IO is efficient */
+ SWP_VALID = (1 << 13), /* swap is valid to be operated on? */
/* add others here before... */
- SWP_SCANNING = (1 << 13), /* refcount in scan_swap_map */
+ SWP_SCANNING = (1 << 14), /* refcount in scan_swap_map */
};
#define SWAP_CLUSTER_MAX 32UL
@@ -247,8 +248,7 @@ struct swap_info_struct {
unsigned int cluster_next; /* likely index for next allocation */
unsigned int cluster_nr; /* countdown to next cluster search */
struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */
- struct swap_extent *curr_swap_extent;
- struct swap_extent first_swap_extent;
+ struct rb_root swap_extent_root;/* root of the swap extent rbtree */
struct block_device *bdev; /* swap device or bdev of swap file */
struct file *swap_file; /* seldom referenced */
unsigned int old_block_size; /* seldom referenced */
@@ -460,7 +460,7 @@ extern unsigned int count_swap_pages(int, int);
extern sector_t map_swap_page(struct page *, struct block_device **);
extern sector_t swapdev_block(int, pgoff_t);
extern int page_swapcount(struct page *);
-extern int __swap_count(struct swap_info_struct *si, swp_entry_t entry);
+extern int __swap_count(swp_entry_t entry);
extern int __swp_swapcount(swp_entry_t entry);
extern int swp_swapcount(swp_entry_t entry);
extern struct swap_info_struct *page_swap_info(struct page *);
@@ -470,6 +470,12 @@ extern int try_to_free_swap(struct page *);
struct backing_dev_info;
extern int init_swap_address_space(unsigned int type, unsigned long nr_pages);
extern void exit_swap_address_space(unsigned int type);
+extern struct swap_info_struct *get_swap_device(swp_entry_t entry);
+
+static inline void put_swap_device(struct swap_info_struct *si)
+{
+ rcu_read_unlock();
+}
#else /* CONFIG_SWAP */
@@ -576,7 +582,7 @@ static inline int page_swapcount(struct page *page)
return 0;
}
-static inline int __swap_count(struct swap_info_struct *si, swp_entry_t entry)
+static inline int __swap_count(swp_entry_t entry)
{
return 0;
}
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 51e131245379..9b21d0047710 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -72,10 +72,12 @@ extern void vm_unmap_aliases(void);
#ifdef CONFIG_MMU
extern void __init vmalloc_init(void);
+extern unsigned long vmalloc_nr_pages(void);
#else
static inline void vmalloc_init(void)
{
}
+static inline unsigned long vmalloc_nr_pages(void) { return 0; }
#endif
extern void *vmalloc(unsigned long size);
diff --git a/include/linux/vmpressure.h b/include/linux/vmpressure.h
index 61e6fddfb26f..6d28bc433c1c 100644
--- a/include/linux/vmpressure.h
+++ b/include/linux/vmpressure.h
@@ -17,7 +17,7 @@ struct vmpressure {
unsigned long tree_scanned;
unsigned long tree_reclaimed;
/* The lock is used to keep the scanned/reclaimed above in sync. */
- struct spinlock sr_lock;
+ spinlock_t sr_lock;
/* The list of vmpressure_event structs. */
struct list_head events;