summaryrefslogtreecommitdiff
path: root/include/linux/zswap.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/zswap.h')
-rw-r--r--include/linux/zswap.h57
1 files changed, 47 insertions, 10 deletions
diff --git a/include/linux/zswap.h b/include/linux/zswap.h
index 2a60ce39cfde..30c193a1207e 100644
--- a/include/linux/zswap.h
+++ b/include/linux/zswap.h
@@ -5,32 +5,69 @@
#include <linux/types.h>
#include <linux/mm_types.h>
-extern u64 zswap_pool_total_size;
-extern atomic_t zswap_stored_pages;
+struct lruvec;
+
+extern atomic_long_t zswap_stored_pages;
#ifdef CONFIG_ZSWAP
+struct zswap_lruvec_state {
+ /*
+ * Number of swapped in pages from disk, i.e not found in the zswap pool.
+ *
+ * This is consumed and subtracted from the lru size in
+ * zswap_shrinker_count() to penalize past overshrinking that led to disk
+ * swapins. The idea is that had we considered this many more pages in the
+ * LRU active/protected and not written them back, we would not have had to
+ * swapped them in.
+ */
+ atomic_long_t nr_disk_swapins;
+};
+
+unsigned long zswap_total_pages(void);
bool zswap_store(struct folio *folio);
-bool zswap_load(struct folio *folio);
-void zswap_invalidate(int type, pgoff_t offset);
-void zswap_swapon(int type);
+int zswap_load(struct folio *folio);
+void zswap_invalidate(swp_entry_t swp);
+int zswap_swapon(int type, unsigned long nr_pages);
void zswap_swapoff(int type);
-
+void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg);
+void zswap_lruvec_state_init(struct lruvec *lruvec);
+void zswap_folio_swapin(struct folio *folio);
+bool zswap_is_enabled(void);
+bool zswap_never_enabled(void);
#else
+struct zswap_lruvec_state {};
+
static inline bool zswap_store(struct folio *folio)
{
return false;
}
-static inline bool zswap_load(struct folio *folio)
+static inline int zswap_load(struct folio *folio)
{
- return false;
+ return -ENOENT;
}
-static inline void zswap_invalidate(int type, pgoff_t offset) {}
-static inline void zswap_swapon(int type) {}
+static inline void zswap_invalidate(swp_entry_t swp) {}
+static inline int zswap_swapon(int type, unsigned long nr_pages)
+{
+ return 0;
+}
static inline void zswap_swapoff(int type) {}
+static inline void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg) {}
+static inline void zswap_lruvec_state_init(struct lruvec *lruvec) {}
+static inline void zswap_folio_swapin(struct folio *folio) {}
+
+static inline bool zswap_is_enabled(void)
+{
+ return false;
+}
+
+static inline bool zswap_never_enabled(void)
+{
+ return true;
+}
#endif