summaryrefslogtreecommitdiff
path: root/arch/ia64
diff options
context:
space:
mode:
authorTony Luck <tony.luck@intel.com>2005-10-28 15:26:43 -0700
committerTony Luck <tony.luck@intel.com>2005-10-28 15:26:43 -0700
commitd73dee6ee4b554074f88e4ebd956ea4db8552d52 (patch)
tree9f4468c7b54d0796d7659ce3ae30b756217522e9 /arch/ia64
parent9acd3fa2e10f4e5b093ddf93af8f23cc9bdbd621 (diff)
parentddf6d0a00cbb4ad6d4fb4200cc911bb9389174c1 (diff)
Pull for-each-cpu into release branch
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/kernel/irq.c12
-rw-r--r--arch/ia64/kernel/mca.c4
-rw-r--r--arch/ia64/kernel/module.c6
-rw-r--r--arch/ia64/kernel/smp.c10
-rw-r--r--arch/ia64/kernel/smpboot.c6
-rw-r--r--arch/ia64/mm/tlb.c5
6 files changed, 21 insertions, 22 deletions
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index 205d98028261..d33244c32759 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -57,9 +57,9 @@ int show_interrupts(struct seq_file *p, void *v)
if (i == 0) {
seq_printf(p, " ");
- for (j=0; j<NR_CPUS; j++)
- if (cpu_online(j))
- seq_printf(p, "CPU%d ",j);
+ for_each_online_cpu(j) {
+ seq_printf(p, "CPU%d ",j);
+ }
seq_putc(p, '\n');
}
@@ -72,9 +72,9 @@ int show_interrupts(struct seq_file *p, void *v)
#ifndef CONFIG_SMP
seq_printf(p, "%10u ", kstat_irqs(i));
#else
- for (j = 0; j < NR_CPUS; j++)
- if (cpu_online(j))
- seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
+ for_each_online_cpu(j) {
+ seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
+ }
#endif
seq_printf(p, " %14s", irq_desc[i].handler->typename);
seq_printf(p, " %s", action->name);
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index d0a5106fba24..52c47da17246 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -508,9 +508,7 @@ ia64_mca_wakeup_all(void)
int cpu;
/* Clear the Rendez checkin flag for all cpus */
- for(cpu = 0; cpu < NR_CPUS; cpu++) {
- if (!cpu_online(cpu))
- continue;
+ for_each_online_cpu(cpu) {
if (ia64_mc_info.imi_rendez_checkin[cpu] == IA64_MCA_RENDEZ_CHECKIN_DONE)
ia64_mca_wakeup(cpu);
}
diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
index f1aca7cffd12..7a2f0a798d12 100644
--- a/arch/ia64/kernel/module.c
+++ b/arch/ia64/kernel/module.c
@@ -947,8 +947,8 @@ void
percpu_modcopy (void *pcpudst, const void *src, unsigned long size)
{
unsigned int i;
- for (i = 0; i < NR_CPUS; i++)
- if (cpu_possible(i))
- memcpy(pcpudst + __per_cpu_offset[i], src, size);
+ for_each_cpu(i) {
+ memcpy(pcpudst + __per_cpu_offset[i], src, size);
+ }
}
#endif /* CONFIG_SMP */
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
index 0166a9847095..657ac99a451c 100644
--- a/arch/ia64/kernel/smp.c
+++ b/arch/ia64/kernel/smp.c
@@ -185,8 +185,8 @@ send_IPI_allbutself (int op)
{
unsigned int i;
- for (i = 0; i < NR_CPUS; i++) {
- if (cpu_online(i) && i != smp_processor_id())
+ for_each_online_cpu(i) {
+ if (i != smp_processor_id())
send_IPI_single(i, op);
}
}
@@ -199,9 +199,9 @@ send_IPI_all (int op)
{
int i;
- for (i = 0; i < NR_CPUS; i++)
- if (cpu_online(i))
- send_IPI_single(i, op);
+ for_each_online_cpu(i) {
+ send_IPI_single(i, op);
+ }
}
/*
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index 7d72c0d872b3..400a48987124 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -694,9 +694,9 @@ smp_cpus_done (unsigned int dummy)
* Allow the user to impress friends.
*/
- for (cpu = 0; cpu < NR_CPUS; cpu++)
- if (cpu_online(cpu))
- bogosum += cpu_data(cpu)->loops_per_jiffy;
+ for_each_online_cpu(cpu) {
+ bogosum += cpu_data(cpu)->loops_per_jiffy;
+ }
printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
(int)num_online_cpus(), bogosum/(500000/HZ), (bogosum/(5000/HZ))%100);
diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c
index 464557e4ed82..987fb754d6ad 100644
--- a/arch/ia64/mm/tlb.c
+++ b/arch/ia64/mm/tlb.c
@@ -77,9 +77,10 @@ wrap_mmu_context (struct mm_struct *mm)
/* can't call flush_tlb_all() here because of race condition with O(1) scheduler [EF] */
{
int cpu = get_cpu(); /* prevent preemption/migration */
- for (i = 0; i < NR_CPUS; ++i)
- if (cpu_online(i) && (i != cpu))
+ for_each_online_cpu(i) {
+ if (i != cpu)
per_cpu(ia64_need_tlb_flush, i) = 1;
+ }
put_cpu();
}
local_flush_tlb_all();