diff options
author | Paul Mackerras <paulus@samba.org> | 2006-05-19 15:02:42 +1000 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2006-05-19 15:02:42 +1000 |
commit | 3c06da5ae5358e9d325d541a053e1059e9654bcc (patch) | |
tree | 04c953cc82fe57cff248ac523095cd4f0d9611a7 /mm | |
parent | 4d1f3f25d9c303d1ce63b42cc94c54ac0ab2e950 (diff) | |
parent | a54c9d30dbb06391ec4422aaf0e1dc2c8c53bd3e (diff) |
Merge ../linux-2.6
Diffstat (limited to 'mm')
-rw-r--r-- | mm/page_alloc.c | 11 | ||||
-rw-r--r-- | mm/slab.c | 19 | ||||
-rw-r--r-- | mm/sparse.c | 2 |
3 files changed, 23 insertions, 9 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ea77c999047e..813b4ec1298a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -39,6 +39,7 @@ #include <linux/mempolicy.h> #include <asm/tlbflush.h> +#include <asm/div64.h> #include "internal.h" /* @@ -2566,9 +2567,11 @@ void setup_per_zone_pages_min(void) } for_each_zone(zone) { - unsigned long tmp; + u64 tmp; + spin_lock_irqsave(&zone->lru_lock, flags); - tmp = (pages_min * zone->present_pages) / lowmem_pages; + tmp = (u64)pages_min * zone->present_pages; + do_div(tmp, lowmem_pages); if (is_highmem(zone)) { /* * __GFP_HIGH and PF_MEMALLOC allocations usually don't @@ -2595,8 +2598,8 @@ void setup_per_zone_pages_min(void) zone->pages_min = tmp; } - zone->pages_low = zone->pages_min + tmp / 4; - zone->pages_high = zone->pages_min + tmp / 2; + zone->pages_low = zone->pages_min + (tmp >> 2); + zone->pages_high = zone->pages_min + (tmp >> 1); spin_unlock_irqrestore(&zone->lru_lock, flags); } diff --git a/mm/slab.c b/mm/slab.c index c32af7e7581e..d31a06bfbea5 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -700,6 +700,14 @@ static enum { FULL } g_cpucache_up; +/* + * used by boot code to determine if it can use slab based allocator + */ +int slab_is_available(void) +{ + return g_cpucache_up == FULL; +} + static DEFINE_PER_CPU(struct work_struct, reap_work); static void free_block(struct kmem_cache *cachep, void **objpp, int len, @@ -2192,11 +2200,14 @@ static void drain_cpu_caches(struct kmem_cache *cachep) check_irq_on(); for_each_online_node(node) { l3 = cachep->nodelists[node]; - if (l3) { + if (l3 && l3->alien) + drain_alien_cache(cachep, l3->alien); + } + + for_each_online_node(node) { + l3 = cachep->nodelists[node]; + if (l3) drain_array(cachep, l3, l3->shared, 1, node); - if (l3->alien) - drain_alien_cache(cachep, l3->alien); - } } } diff --git a/mm/sparse.c b/mm/sparse.c index d7c32de99ee8..c5e89eb9ac8f 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -32,7 +32,7 @@ static struct mem_section *sparse_index_alloc(int nid) unsigned long array_size = SECTIONS_PER_ROOT * sizeof(struct mem_section); - if (system_state == SYSTEM_RUNNING) + if (slab_is_available()) section = kmalloc_node(array_size, GFP_KERNEL, nid); else section = alloc_bootmem_node(NODE_DATA(nid), array_size); |