summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/i386/mm/pgtable.c2
-rw-r--r--arch/ia64/mm/discontig.c4
-rw-r--r--arch/ia64/mm/init.c2
-rw-r--r--arch/m32r/mm/init.c2
-rw-r--r--arch/powerpc/mm/mem.c4
-rw-r--r--arch/x86_64/mm/init.c2
-rw-r--r--fs/buffer.c2
-rw-r--r--mm/page_alloc.c6
-rw-r--r--mm/vmscan.c6
9 files changed, 15 insertions, 15 deletions
diff --git a/arch/i386/mm/pgtable.c b/arch/i386/mm/pgtable.c
index 9db3242103be..2889567e21a1 100644
--- a/arch/i386/mm/pgtable.c
+++ b/arch/i386/mm/pgtable.c
@@ -36,7 +36,7 @@ void show_mem(void)
printk(KERN_INFO "Mem-info:\n");
show_free_areas();
printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
- for_each_pgdat(pgdat) {
+ for_each_online_pgdat(pgdat) {
pgdat_resize_lock(pgdat, &flags);
for (i = 0; i < pgdat->node_spanned_pages; ++i) {
page = pgdat_page_nr(pgdat, i);
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 2f5e44862e91..384f1d7dce96 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -386,7 +386,7 @@ static void __init pgdat_insert(pg_data_t *pgdat)
{
pg_data_t *prev = NULL, *next;
- for_each_pgdat(next)
+ for_each_online_pgdat(next)
if (pgdat->node_id < next->node_id)
break;
else
@@ -560,7 +560,7 @@ void show_mem(void)
printk("Mem-info:\n");
show_free_areas();
printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
- for_each_pgdat(pgdat) {
+ for_each_online_pgdat(pgdat) {
unsigned long present;
unsigned long flags;
int shared = 0, cached = 0, reserved = 0;
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index ff4f31fcd330..2ef1151cde90 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -600,7 +600,7 @@ mem_init (void)
kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START);
kclist_add(&kcore_kernel, _stext, _end - _stext);
- for_each_pgdat(pgdat)
+ for_each_online_pgdat(pgdat)
if (pgdat->bdata->node_bootmem_map)
totalram_pages += free_all_bootmem_node(pgdat);
diff --git a/arch/m32r/mm/init.c b/arch/m32r/mm/init.c
index c9e7dad860b7..2e0fe199ce38 100644
--- a/arch/m32r/mm/init.c
+++ b/arch/m32r/mm/init.c
@@ -47,7 +47,7 @@ void show_mem(void)
printk("Mem-info:\n");
show_free_areas();
printk("Free swap: %6ldkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
- for_each_pgdat(pgdat) {
+ for_each_online_pgdat(pgdat) {
unsigned long flags;
pgdat_resize_lock(pgdat, &flags);
for (i = 0; i < pgdat->node_spanned_pages; ++i) {
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index badac10d700c..5e435a9c3431 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -195,7 +195,7 @@ void show_mem(void)
printk("Mem-info:\n");
show_free_areas();
printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
- for_each_pgdat(pgdat) {
+ for_each_online_pgdat(pgdat) {
unsigned long flags;
pgdat_resize_lock(pgdat, &flags);
for (i = 0; i < pgdat->node_spanned_pages; i++) {
@@ -351,7 +351,7 @@ void __init mem_init(void)
max_mapnr = max_pfn;
totalram_pages += free_all_bootmem();
#endif
- for_each_pgdat(pgdat) {
+ for_each_online_pgdat(pgdat) {
for (i = 0; i < pgdat->node_spanned_pages; i++) {
if (!pfn_valid(pgdat->node_start_pfn + i))
continue;
diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c
index b04415625442..e5f7f1c34462 100644
--- a/arch/x86_64/mm/init.c
+++ b/arch/x86_64/mm/init.c
@@ -72,7 +72,7 @@ void show_mem(void)
show_free_areas();
printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
- for_each_pgdat(pgdat) {
+ for_each_online_pgdat(pgdat) {
for (i = 0; i < pgdat->node_spanned_pages; ++i) {
page = pfn_to_page(pgdat->node_start_pfn + i);
total++;
diff --git a/fs/buffer.c b/fs/buffer.c
index d597758dd129..23f1f3a68077 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -493,7 +493,7 @@ static void free_more_memory(void)
wakeup_pdflush(1024);
yield();
- for_each_pgdat(pgdat) {
+ for_each_online_pgdat(pgdat) {
zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
if (*zones)
try_to_free_pages(zones, GFP_NOFS);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 8dc8f2735d22..ccc3713dd407 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1201,7 +1201,7 @@ unsigned int nr_free_highpages (void)
pg_data_t *pgdat;
unsigned int pages = 0;
- for_each_pgdat(pgdat)
+ for_each_online_pgdat(pgdat)
pages += pgdat->node_zones[ZONE_HIGHMEM].free_pages;
return pages;
@@ -1343,7 +1343,7 @@ void get_zone_counts(unsigned long *active,
*active = 0;
*inactive = 0;
*free = 0;
- for_each_pgdat(pgdat) {
+ for_each_online_pgdat(pgdat) {
unsigned long l, m, n;
__get_zone_counts(&l, &m, &n, pgdat);
*active += l;
@@ -2482,7 +2482,7 @@ static void setup_per_zone_lowmem_reserve(void)
struct pglist_data *pgdat;
int j, idx;
- for_each_pgdat(pgdat) {
+ for_each_online_pgdat(pgdat) {
for (j = 0; j < MAX_NR_ZONES; j++) {
struct zone *zone = pgdat->node_zones + j;
unsigned long present_pages = zone->present_pages;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 78865c849f8f..acdf001d6941 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1305,7 +1305,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
current->reclaim_state = &reclaim_state;
repeat:
- for_each_pgdat(pgdat) {
+ for_each_online_pgdat(pgdat) {
unsigned long freed;
freed = balance_pgdat(pgdat, nr_to_free, 0);
@@ -1335,7 +1335,7 @@ static int __devinit cpu_callback(struct notifier_block *nfb,
cpumask_t mask;
if (action == CPU_ONLINE) {
- for_each_pgdat(pgdat) {
+ for_each_online_pgdat(pgdat) {
mask = node_to_cpumask(pgdat->node_id);
if (any_online_cpu(mask) != NR_CPUS)
/* One of our CPUs online: restore mask */
@@ -1351,7 +1351,7 @@ static int __init kswapd_init(void)
pg_data_t *pgdat;
swap_setup();
- for_each_pgdat(pgdat) {
+ for_each_online_pgdat(pgdat) {
pid_t pid;
pid = kernel_thread(kswapd, pgdat, CLONE_KERNEL);