summaryrefslogtreecommitdiff
path: root/include/linux/memcontrol.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/memcontrol.h')
-rw-r--r--include/linux/memcontrol.h322
1 files changed, 187 insertions, 135 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 5502aa8e138e..0651865a4564 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -52,6 +52,7 @@ enum memcg_memory_event {
MEMCG_SWAP_HIGH,
MEMCG_SWAP_MAX,
MEMCG_SWAP_FAIL,
+ MEMCG_SOCK_THROTTLED,
MEMCG_NR_MEMORY_EVENTS,
};
@@ -113,6 +114,12 @@ struct mem_cgroup_per_node {
CACHELINE_PADDING(_pad2_);
unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
struct mem_cgroup_reclaim_iter iter;
+
+#ifdef CONFIG_MEMCG_NMI_SAFETY_REQUIRES_ATOMIC
+ /* slab stats for nmi context */
+ atomic_t slab_reclaimable;
+ atomic_t slab_unreclaimable;
+#endif
};
struct mem_cgroup_threshold {
@@ -236,13 +243,19 @@ struct mem_cgroup {
atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS];
atomic_long_t memory_events_local[MEMCG_NR_MEMORY_EVENTS];
+#ifdef CONFIG_MEMCG_NMI_SAFETY_REQUIRES_ATOMIC
+ /* MEMCG_KMEM for nmi context */
+ atomic_t kmem_stat;
+#endif
/*
* Hint of reclaim pressure for socket memroy management. Note
* that this indicator should NOT be used in legacy cgroup mode
* where socket memory is accounted/charged separately.
*/
- unsigned long socket_pressure;
-
+ u64 socket_pressure;
+#if BITS_PER_LONG < 64
+ seqlock_t socket_pressure_seqlock;
+#endif
int kmemcg_id;
/*
* memcg->objcg is wiped out as a part of the objcg repaprenting
@@ -329,17 +342,25 @@ enum page_memcg_data_flags {
__NR_MEMCG_DATA_FLAGS = (1UL << 2),
};
+#define __OBJEXTS_ALLOC_FAIL MEMCG_DATA_OBJEXTS
#define __FIRST_OBJEXT_FLAG __NR_MEMCG_DATA_FLAGS
#else /* CONFIG_MEMCG */
+#define __OBJEXTS_ALLOC_FAIL (1UL << 0)
#define __FIRST_OBJEXT_FLAG (1UL << 0)
#endif /* CONFIG_MEMCG */
enum objext_flags {
- /* slabobj_ext vector failed to allocate */
- OBJEXTS_ALLOC_FAIL = __FIRST_OBJEXT_FLAG,
+ /*
+ * Use bit 0 with zero other bits to signal that slabobj_ext vector
+ * failed to allocate. The same bit 0 with valid upper bits means
+ * MEMCG_DATA_OBJEXTS.
+ */
+ OBJEXTS_ALLOC_FAIL = __OBJEXTS_ALLOC_FAIL,
+ /* slabobj_ext vector allocated with kmalloc_nolock() */
+ OBJEXTS_NOSPIN_ALLOC = __FIRST_OBJEXT_FLAG,
/* the next bit after the last actual flag */
__NR_OBJEXTS_FLAGS = (__FIRST_OBJEXT_FLAG << 1),
};
@@ -438,9 +459,7 @@ static inline struct mem_cgroup *folio_memcg(struct folio *folio)
*/
static inline bool folio_memcg_charged(struct folio *folio)
{
- if (folio_memcg_kmem(folio))
- return __folio_objcg(folio) != NULL;
- return __folio_memcg(folio) != NULL;
+ return folio->memcg_data != 0;
}
/*
@@ -620,8 +639,6 @@ static inline bool mem_cgroup_below_min(struct mem_cgroup *target,
page_counter_read(&memcg->memory);
}
-void mem_cgroup_commit_charge(struct folio *folio, struct mem_cgroup *memcg);
-
int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp);
/**
@@ -646,14 +663,11 @@ static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm,
return __mem_cgroup_charge(folio, mm, gfp);
}
-int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg, gfp_t gfp,
- long nr_pages);
+int mem_cgroup_charge_hugetlb(struct folio* folio, gfp_t gfp);
int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
gfp_t gfp, swp_entry_t entry);
-void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry, unsigned int nr_pages);
-
void __mem_cgroup_uncharge(struct folio *folio);
/**
@@ -677,7 +691,6 @@ static inline void mem_cgroup_uncharge_folios(struct folio_batch *folios)
__mem_cgroup_uncharge_folios(folios);
}
-void mem_cgroup_cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages);
void mem_cgroup_replace_folio(struct folio *old, struct folio *new);
void mem_cgroup_migrate(struct folio *old, struct folio *new);
@@ -896,7 +909,13 @@ unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
return READ_ONCE(mz->lru_zone_size[zone_idx][lru]);
}
-void mem_cgroup_handle_over_high(gfp_t gfp_mask);
+void __mem_cgroup_handle_over_high(gfp_t gfp_mask);
+
+static inline void mem_cgroup_handle_over_high(gfp_t gfp_mask)
+{
+ if (unlikely(current->memcg_nr_pages_over_high))
+ __mem_cgroup_handle_over_high(gfp_mask);
+}
unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
@@ -911,19 +930,9 @@ struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
struct mem_cgroup *oom_domain);
void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
-void __mod_memcg_state(struct mem_cgroup *memcg, enum memcg_stat_item idx,
- int val);
-
/* idx can be of type enum memcg_stat_item or node_stat_item */
-static inline void mod_memcg_state(struct mem_cgroup *memcg,
- enum memcg_stat_item idx, int val)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- __mod_memcg_state(memcg, idx, val);
- local_irq_restore(flags);
-}
+void mod_memcg_state(struct mem_cgroup *memcg,
+ enum memcg_stat_item idx, int val);
static inline void mod_memcg_page_state(struct page *page,
enum memcg_stat_item idx, int val)
@@ -948,31 +957,10 @@ unsigned long lruvec_page_state_local(struct lruvec *lruvec,
void mem_cgroup_flush_stats(struct mem_cgroup *memcg);
void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg);
-void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val);
+void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val);
-static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
- int val)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- __mod_lruvec_kmem_state(p, idx, val);
- local_irq_restore(flags);
-}
-
-void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
- unsigned long count);
-
-static inline void count_memcg_events(struct mem_cgroup *memcg,
- enum vm_event_item idx,
- unsigned long count)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- __count_memcg_events(memcg, idx, count);
- local_irq_restore(flags);
-}
+void count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
+ unsigned long count);
static inline void count_memcg_folio_events(struct folio *folio,
enum vm_event_item idx, unsigned long nr)
@@ -1004,29 +992,13 @@ static inline void count_memcg_event_mm(struct mm_struct *mm,
count_memcg_events_mm(mm, idx, 1);
}
+void __memcg_memory_event(struct mem_cgroup *memcg,
+ enum memcg_memory_event event, bool allow_spinning);
+
static inline void memcg_memory_event(struct mem_cgroup *memcg,
enum memcg_memory_event event)
{
- bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX ||
- event == MEMCG_SWAP_FAIL;
-
- atomic_long_inc(&memcg->memory_events_local[event]);
- if (!swap_event)
- cgroup_file_notify(&memcg->events_local_file);
-
- do {
- atomic_long_inc(&memcg->memory_events[event]);
- if (swap_event)
- cgroup_file_notify(&memcg->swap_events_file);
- else
- cgroup_file_notify(&memcg->events_file);
-
- if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
- break;
- if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
- break;
- } while ((memcg = parent_mem_cgroup(memcg)) &&
- !mem_cgroup_is_root(memcg));
+ __memcg_memory_event(memcg, event, true);
}
static inline void memcg_memory_event_mm(struct mm_struct *mm,
@@ -1044,12 +1016,34 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm,
rcu_read_unlock();
}
-void split_page_memcg(struct page *head, int old_order, int new_order);
+void split_page_memcg(struct page *first, unsigned order);
+void folio_split_memcg_refs(struct folio *folio, unsigned old_order,
+ unsigned new_order);
+static inline u64 cgroup_id_from_mm(struct mm_struct *mm)
+{
+ struct mem_cgroup *memcg;
+ u64 id;
+
+ if (mem_cgroup_disabled())
+ return 0;
+
+ rcu_read_lock();
+ memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
+ if (!memcg)
+ memcg = root_mem_cgroup;
+ id = cgroup_id(memcg->css.cgroup);
+ rcu_read_unlock();
+ return id;
+}
+
+extern int mem_cgroup_init(void);
#else /* CONFIG_MEMCG */
#define MEM_CGROUP_ID_SHIFT 0
+#define root_mem_cgroup (NULL)
+
static inline struct mem_cgroup *folio_memcg(struct folio *folio)
{
return NULL;
@@ -1135,21 +1129,15 @@ static inline bool mem_cgroup_below_min(struct mem_cgroup *target,
return false;
}
-static inline void mem_cgroup_commit_charge(struct folio *folio,
- struct mem_cgroup *memcg)
-{
-}
-
static inline int mem_cgroup_charge(struct folio *folio,
struct mm_struct *mm, gfp_t gfp)
{
return 0;
}
-static inline int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg,
- gfp_t gfp, long nr_pages)
+static inline int mem_cgroup_charge_hugetlb(struct folio* folio, gfp_t gfp)
{
- return 0;
+ return 0;
}
static inline int mem_cgroup_swapin_charge_folio(struct folio *folio,
@@ -1158,10 +1146,6 @@ static inline int mem_cgroup_swapin_charge_folio(struct folio *folio,
return 0;
}
-static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry, unsigned int nr)
-{
-}
-
static inline void mem_cgroup_uncharge(struct folio *folio)
{
}
@@ -1170,11 +1154,6 @@ static inline void mem_cgroup_uncharge_folios(struct folio_batch *folios)
{
}
-static inline void mem_cgroup_cancel_charge(struct mem_cgroup *memcg,
- unsigned int nr_pages)
-{
-}
-
static inline void mem_cgroup_replace_folio(struct folio *old,
struct folio *new)
{
@@ -1378,12 +1357,6 @@ static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
{
}
-static inline void __mod_memcg_state(struct mem_cgroup *memcg,
- enum memcg_stat_item idx,
- int nr)
-{
-}
-
static inline void mod_memcg_state(struct mem_cgroup *memcg,
enum memcg_stat_item idx,
int nr)
@@ -1420,14 +1393,6 @@ static inline void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg)
{
}
-static inline void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
- int val)
-{
- struct page *page = virt_to_head_page(p);
-
- __mod_node_page_state(page_pgdat(page), idx, val);
-}
-
static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
int val)
{
@@ -1437,12 +1402,6 @@ static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
}
static inline void count_memcg_events(struct mem_cgroup *memcg,
- enum vm_event_item idx,
- unsigned long count)
-{
-}
-
-static inline void __count_memcg_events(struct mem_cgroup *memcg,
enum vm_event_item idx,
unsigned long count)
{
@@ -1463,9 +1422,21 @@ void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
{
}
-static inline void split_page_memcg(struct page *head, int old_order, int new_order)
+static inline void split_page_memcg(struct page *first, unsigned order)
{
}
+
+static inline void folio_split_memcg_refs(struct folio *folio,
+ unsigned old_order, unsigned new_order)
+{
+}
+
+static inline u64 cgroup_id_from_mm(struct mm_struct *mm)
+{
+ return 0;
+}
+
+static inline int mem_cgroup_init(void) { return 0; }
#endif /* CONFIG_MEMCG */
/*
@@ -1481,16 +1452,6 @@ struct slabobj_ext {
#endif
} __aligned(8);
-static inline void __inc_lruvec_kmem_state(void *p, enum node_stat_item idx)
-{
- __mod_lruvec_kmem_state(p, idx, 1);
-}
-
-static inline void __dec_lruvec_kmem_state(void *p, enum node_stat_item idx)
-{
- __mod_lruvec_kmem_state(p, idx, -1);
-}
-
static inline struct lruvec *parent_lruvec(struct lruvec *lruvec)
{
struct mem_cgroup *memcg;
@@ -1608,44 +1569,97 @@ static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
#endif /* CONFIG_CGROUP_WRITEBACK */
struct sock;
-bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
- gfp_t gfp_mask);
-void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
#ifdef CONFIG_MEMCG
extern struct static_key_false memcg_sockets_enabled_key;
#define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
+
void mem_cgroup_sk_alloc(struct sock *sk);
void mem_cgroup_sk_free(struct sock *sk);
-static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
+void mem_cgroup_sk_inherit(const struct sock *sk, struct sock *newsk);
+bool mem_cgroup_sk_charge(const struct sock *sk, unsigned int nr_pages,
+ gfp_t gfp_mask);
+void mem_cgroup_sk_uncharge(const struct sock *sk, unsigned int nr_pages);
+
+#if BITS_PER_LONG < 64
+static inline void mem_cgroup_set_socket_pressure(struct mem_cgroup *memcg)
{
-#ifdef CONFIG_MEMCG_V1
- if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
- return !!memcg->tcpmem_pressure;
-#endif /* CONFIG_MEMCG_V1 */
+ u64 val = get_jiffies_64() + HZ;
+ unsigned long flags;
+
+ write_seqlock_irqsave(&memcg->socket_pressure_seqlock, flags);
+ memcg->socket_pressure = val;
+ write_sequnlock_irqrestore(&memcg->socket_pressure_seqlock, flags);
+}
+
+static inline u64 mem_cgroup_get_socket_pressure(struct mem_cgroup *memcg)
+{
+ unsigned int seq;
+ u64 val;
+
do {
- if (time_before(jiffies, READ_ONCE(memcg->socket_pressure)))
- return true;
- } while ((memcg = parent_mem_cgroup(memcg)));
- return false;
+ seq = read_seqbegin(&memcg->socket_pressure_seqlock);
+ val = memcg->socket_pressure;
+ } while (read_seqretry(&memcg->socket_pressure_seqlock, seq));
+
+ return val;
+}
+#else
+static inline void mem_cgroup_set_socket_pressure(struct mem_cgroup *memcg)
+{
+ WRITE_ONCE(memcg->socket_pressure, jiffies + HZ);
}
+static inline u64 mem_cgroup_get_socket_pressure(struct mem_cgroup *memcg)
+{
+ return READ_ONCE(memcg->socket_pressure);
+}
+#endif
+
int alloc_shrinker_info(struct mem_cgroup *memcg);
void free_shrinker_info(struct mem_cgroup *memcg);
void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id);
void reparent_shrinker_deferred(struct mem_cgroup *memcg);
+
+static inline int shrinker_id(struct shrinker *shrinker)
+{
+ return shrinker->id;
+}
#else
#define mem_cgroup_sockets_enabled 0
-static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
-static inline void mem_cgroup_sk_free(struct sock *sk) { };
-static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
+
+static inline void mem_cgroup_sk_alloc(struct sock *sk)
+{
+}
+
+static inline void mem_cgroup_sk_free(struct sock *sk)
+{
+}
+
+static inline void mem_cgroup_sk_inherit(const struct sock *sk, struct sock *newsk)
+{
+}
+
+static inline bool mem_cgroup_sk_charge(const struct sock *sk,
+ unsigned int nr_pages,
+ gfp_t gfp_mask)
{
return false;
}
+static inline void mem_cgroup_sk_uncharge(const struct sock *sk,
+ unsigned int nr_pages)
+{
+}
+
static inline void set_shrinker_bit(struct mem_cgroup *memcg,
int nid, int shrinker_id)
{
}
+
+static inline int shrinker_id(struct shrinker *shrinker)
+{
+ return -1;
+}
#endif
#ifdef CONFIG_MEMCG
@@ -1730,6 +1744,15 @@ static inline void count_objcg_events(struct obj_cgroup *objcg,
rcu_read_unlock();
}
+bool mem_cgroup_node_allowed(struct mem_cgroup *memcg, int nid);
+
+void mem_cgroup_show_protected_memory(struct mem_cgroup *memcg);
+
+static inline bool memcg_is_dying(struct mem_cgroup *memcg)
+{
+ return memcg ? css_is_dying(&memcg->css) : false;
+}
+
#else
static inline bool mem_cgroup_kmem_disabled(void)
{
@@ -1787,6 +1810,24 @@ static inline void count_objcg_events(struct obj_cgroup *objcg,
{
}
+static inline ino_t page_cgroup_ino(struct page *page)
+{
+ return 0;
+}
+
+static inline bool mem_cgroup_node_allowed(struct mem_cgroup *memcg, int nid)
+{
+ return true;
+}
+
+static inline void mem_cgroup_show_protected_memory(struct mem_cgroup *memcg)
+{
+}
+
+static inline bool memcg_is_dying(struct mem_cgroup *memcg)
+{
+ return false;
+}
#endif /* CONFIG_MEMCG */
#if defined(CONFIG_MEMCG) && defined(CONFIG_ZSWAP)
@@ -1841,6 +1882,9 @@ static inline void mem_cgroup_exit_user_fault(void)
current->in_user_fault = 0;
}
+void memcg1_swapout(struct folio *folio, swp_entry_t entry);
+void memcg1_swapin(swp_entry_t entry, unsigned int nr_pages);
+
#else /* CONFIG_MEMCG_V1 */
static inline
unsigned long memcg1_soft_limit_reclaim(pg_data_t *pgdat, int order,
@@ -1868,6 +1912,14 @@ static inline void mem_cgroup_exit_user_fault(void)
{
}
+static inline void memcg1_swapout(struct folio *folio, swp_entry_t entry)
+{
+}
+
+static inline void memcg1_swapin(swp_entry_t entry, unsigned int nr_pages)
+{
+}
+
#endif /* CONFIG_MEMCG_V1 */
#endif /* _LINUX_MEMCONTROL_H */