summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/linux/quicklist.h94
-rw-r--r--mm/Kconfig5
-rw-r--r--mm/Makefile2
-rw-r--r--mm/quicklist.c88
4 files changed, 189 insertions, 0 deletions
diff --git a/include/linux/quicklist.h b/include/linux/quicklist.h
new file mode 100644
index 000000000000..9371c6116df3
--- /dev/null
+++ b/include/linux/quicklist.h
@@ -0,0 +1,94 @@
+#ifndef LINUX_QUICKLIST_H
+#define LINUX_QUICKLIST_H
+/*
+ * Fast allocations and disposal of pages. Pages must be in the condition
+ * as needed after allocation when they are freed. Per cpu lists of pages
+ * are kept that only contain node local pages.
+ *
+ * (C) 2007, SGI. Christoph Lameter <clameter@sgi.com>
+ */
+#include <linux/kernel.h>
+#include <linux/gfp.h>
+#include <linux/percpu.h>
+
+#ifdef CONFIG_QUICKLIST
+
+struct quicklist {
+ void *page;
+ int nr_pages;
+};
+
+DECLARE_PER_CPU(struct quicklist, quicklist)[CONFIG_NR_QUICK];
+
+/*
+ * The two key functions quicklist_alloc and quicklist_free are inline so
+ * that they may be custom compiled for the platform.
+ * Specifying a NULL ctor can remove constructor support. Specifying
+ * a constant quicklist allows the determination of the exact address
+ * in the per cpu area.
+ *
+ * The fast patch in quicklist_alloc touched only a per cpu cacheline and
+ * the first cacheline of the page itself. There is minmal overhead involved.
+ */
+static inline void *quicklist_alloc(int nr, gfp_t flags, void (*ctor)(void *))
+{
+ struct quicklist *q;
+ void **p = NULL;
+
+ q =&get_cpu_var(quicklist)[nr];
+ p = q->page;
+ if (likely(p)) {
+ q->page = p[0];
+ p[0] = NULL;
+ q->nr_pages--;
+ }
+ put_cpu_var(quicklist);
+ if (likely(p))
+ return p;
+
+ p = (void *)__get_free_page(flags | __GFP_ZERO);
+ if (ctor && p)
+ ctor(p);
+ return p;
+}
+
+static inline void __quicklist_free(int nr, void (*dtor)(void *), void *p,
+ struct page *page)
+{
+ struct quicklist *q;
+ int nid = page_to_nid(page);
+
+ if (unlikely(nid != numa_node_id())) {
+ if (dtor)
+ dtor(p);
+ __free_page(page);
+ return;
+ }
+
+ q = &get_cpu_var(quicklist)[nr];
+ *(void **)p = q->page;
+ q->page = p;
+ q->nr_pages++;
+ put_cpu_var(quicklist);
+}
+
+static inline void quicklist_free(int nr, void (*dtor)(void *), void *pp)
+{
+ __quicklist_free(nr, dtor, pp, virt_to_page(pp));
+}
+
+static inline void quicklist_free_page(int nr, void (*dtor)(void *),
+ struct page *page)
+{
+ __quicklist_free(nr, dtor, page_address(page), page);
+}
+
+void quicklist_trim(int nr, void (*dtor)(void *),
+ unsigned long min_pages, unsigned long max_free);
+
+unsigned long quicklist_total_size(void);
+
+#endif
+
+#endif /* LINUX_QUICKLIST_H */
+
diff --git a/mm/Kconfig b/mm/Kconfig
index 7942b333e46c..1ac718f636ec 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -163,3 +163,8 @@ config ZONE_DMA_FLAG
default "0" if !ZONE_DMA
default "1"
+config NR_QUICK
+ int
+ depends on QUICKLIST
+ default "1"
+
diff --git a/mm/Makefile b/mm/Makefile
index 1887148e44e7..a9148ea329aa 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -30,3 +30,5 @@ obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o
obj-$(CONFIG_FS_XIP) += filemap_xip.o
obj-$(CONFIG_MIGRATION) += migrate.o
obj-$(CONFIG_SMP) += allocpercpu.o
+obj-$(CONFIG_QUICKLIST) += quicklist.o
+
diff --git a/mm/quicklist.c b/mm/quicklist.c
new file mode 100644
index 000000000000..ae8189c2799e
--- /dev/null
+++ b/mm/quicklist.c
@@ -0,0 +1,88 @@
+/*
+ * Quicklist support.
+ *
+ * Quicklists are light weight lists of pages that have a defined state
+ * on alloc and free. Pages must be in the quicklist specific defined state
+ * (zero by default) when the page is freed. It seems that the initial idea
+ * for such lists first came from Dave Miller and then various other people
+ * improved on it.
+ *
+ * Copyright (C) 2007 SGI,
+ * Christoph Lameter <clameter@sgi.com>
+ * Generalized, added support for multiple lists and
+ * constructors / destructors.
+ */
+#include <linux/kernel.h>
+
+#include <linux/mm.h>
+#include <linux/mmzone.h>
+#include <linux/module.h>
+#include <linux/quicklist.h>
+
+DEFINE_PER_CPU(struct quicklist, quicklist)[CONFIG_NR_QUICK];
+
+#define FRACTION_OF_NODE_MEM 16
+
+static unsigned long max_pages(unsigned long min_pages)
+{
+ unsigned long node_free_pages, max;
+
+ node_free_pages = node_page_state(numa_node_id(),
+ NR_FREE_PAGES);
+ max = node_free_pages / FRACTION_OF_NODE_MEM;
+ return max(max, min_pages);
+}
+
+static long min_pages_to_free(struct quicklist *q,
+ unsigned long min_pages, long max_free)
+{
+ long pages_to_free;
+
+ pages_to_free = q->nr_pages - max_pages(min_pages);
+
+ return min(pages_to_free, max_free);
+}
+
+/*
+ * Trim down the number of pages in the quicklist
+ */
+void quicklist_trim(int nr, void (*dtor)(void *),
+ unsigned long min_pages, unsigned long max_free)
+{
+ long pages_to_free;
+ struct quicklist *q;
+
+ q = &get_cpu_var(quicklist)[nr];
+ if (q->nr_pages > min_pages) {
+ pages_to_free = min_pages_to_free(q, min_pages, max_free);
+
+ while (pages_to_free > 0) {
+ /*
+ * We pass a gfp_t of 0 to quicklist_alloc here
+ * because we will never call into the page allocator.
+ */
+ void *p = quicklist_alloc(nr, 0, NULL);
+
+ if (dtor)
+ dtor(p);
+ free_page((unsigned long)p);
+ pages_to_free--;
+ }
+ }
+ put_cpu_var(quicklist);
+}
+
+unsigned long quicklist_total_size(void)
+{
+ unsigned long count = 0;
+ int cpu;
+ struct quicklist *ql, *q;
+
+ for_each_online_cpu(cpu) {
+ ql = per_cpu(quicklist, cpu);
+ for (q = ql; q < ql + CONFIG_NR_QUICK; q++)
+ count += q->nr_pages;
+ }
+ return count;
+}
+