From 270bd400e8030ed921e80f334c3ebf274fd93037 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Thu, 5 Mar 2015 10:47:49 +1030 Subject: cpumask: fix cpu-hotplug documentation It refers to an obsolete function. Signed-off-by: Rusty Russell Cc: Jonathan Corbet Cc: linux-doc@vger.kernel.org --- Documentation/cpu-hotplug.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/cpu-hotplug.txt b/Documentation/cpu-hotplug.txt index a0b005d2bd95..f9ad5e048b11 100644 --- a/Documentation/cpu-hotplug.txt +++ b/Documentation/cpu-hotplug.txt @@ -108,7 +108,7 @@ Never use anything other than cpumask_t to represent bitmap of CPUs. for_each_possible_cpu - Iterate over cpu_possible_mask for_each_online_cpu - Iterate over cpu_online_mask for_each_present_cpu - Iterate over cpu_present_mask - for_each_cpu_mask(x,mask) - Iterate over some random collection of cpu mask. + for_each_cpu(x,mask) - Iterate over some random collection of cpu mask. #include get_online_cpus() and put_online_cpus(): -- cgit From 51f7bd8590267011db7b632f53f3d32ec83ee8bb Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Thu, 5 Mar 2015 10:48:49 +1030 Subject: ia64: Use for_each_cpu_and() and cpumask_any_and() instead of temp var. Just a bit of manual neatening, before spatch cleans the rest. Signed-off-by: Rusty Russell Cc: Tony Luck Cc: Fenghua Yu Cc: linux-ia64@vger.kernel.org --- arch/ia64/kernel/irq_ia64.c | 4 +--- arch/ia64/kernel/msi_ia64.c | 10 ++++------ 2 files changed, 5 insertions(+), 9 deletions(-) diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index 698d8fefde6c..3329177c262e 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c @@ -161,7 +161,6 @@ int bind_irq_vector(int irq, int vector, cpumask_t domain) static void __clear_irq_vector(int irq) { int vector, cpu; - cpumask_t mask; cpumask_t domain; struct irq_cfg *cfg = &irq_cfg[irq]; @@ -169,8 +168,7 @@ static void __clear_irq_vector(int irq) BUG_ON(cfg->vector == IRQ_VECTOR_UNASSIGNED); vector = cfg->vector; domain = cfg->domain; - cpumask_and(&mask, &cfg->domain, cpu_online_mask); - for_each_cpu_mask(cpu, mask) + for_each_cpu_and(cpu, &cfg->domain, cpu_online_mask) per_cpu(vector_irq, cpu)[vector] = -1; cfg->vector = IRQ_VECTOR_UNASSIGNED; cfg->domain = CPU_MASK_NONE; diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c index 8ae36ea177d3..9dd7464f8c17 100644 --- a/arch/ia64/kernel/msi_ia64.c +++ b/arch/ia64/kernel/msi_ia64.c @@ -47,15 +47,14 @@ int ia64_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) struct msi_msg msg; unsigned long dest_phys_id; int irq, vector; - cpumask_t mask; irq = create_irq(); if (irq < 0) return irq; irq_set_msi_desc(irq, desc); - cpumask_and(&mask, &(irq_to_domain(irq)), cpu_online_mask); - dest_phys_id = cpu_physical_id(first_cpu(mask)); + dest_phys_id = cpu_physical_id(cpumask_any_and(&(irq_to_domain(irq)), + cpu_online_mask)); vector = irq_to_vector(irq); msg.address_hi = 0; @@ -171,10 +170,9 @@ msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg) { struct irq_cfg *cfg = irq_cfg + irq; unsigned dest; - cpumask_t mask; - cpumask_and(&mask, &(irq_to_domain(irq)), cpu_online_mask); - dest = cpu_physical_id(first_cpu(mask)); + dest = cpu_physical_id(cpumask_first_and(&(irq_to_domain(irq)), + cpu_online_mask)); msg->address_hi = 0; msg->address_lo = -- cgit From f9b531fe14a539ec2ad802b73c9638f324e4a4ff Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Thu, 5 Mar 2015 10:49:16 +1030 Subject: drivers: fix up obsolete cpu function usage. Thanks to spatch, plus manual removal of "&*". Then a sweep for for_each_cpu_mask => for_each_cpu. Signed-off-by: Rusty Russell Acked-by: Rafael J. Wysocki Cc: Thomas Gleixner Cc: Herbert Xu Cc: Jason Cooper Cc: Chris Metcalf Cc: netdev@vger.kernel.org --- drivers/clocksource/dw_apb_timer.c | 3 ++- drivers/cpuidle/coupled.c | 6 +++--- drivers/crypto/n2_core.c | 4 ++-- drivers/irqchip/irq-gic-v3.c | 2 +- drivers/irqchip/irq-mips-gic.c | 6 +++--- drivers/net/ethernet/tile/tilegx.c | 4 ++-- 6 files changed, 13 insertions(+), 12 deletions(-) diff --git a/drivers/clocksource/dw_apb_timer.c b/drivers/clocksource/dw_apb_timer.c index f3656a6b0382..35a88097af3c 100644 --- a/drivers/clocksource/dw_apb_timer.c +++ b/drivers/clocksource/dw_apb_timer.c @@ -117,7 +117,8 @@ static void apbt_set_mode(enum clock_event_mode mode, unsigned long period; struct dw_apb_clock_event_device *dw_ced = ced_to_dw_apb_ced(evt); - pr_debug("%s CPU %d mode=%d\n", __func__, first_cpu(*evt->cpumask), + pr_debug("%s CPU %d mode=%d\n", __func__, + cpumask_first(evt->cpumask), mode); switch (mode) { diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c index 73fe2f8d7f96..7936dce4b878 100644 --- a/drivers/cpuidle/coupled.c +++ b/drivers/cpuidle/coupled.c @@ -292,7 +292,7 @@ static inline int cpuidle_coupled_get_state(struct cpuidle_device *dev, */ smp_rmb(); - for_each_cpu_mask(i, coupled->coupled_cpus) + for_each_cpu(i, &coupled->coupled_cpus) if (cpu_online(i) && coupled->requested_state[i] < state) state = coupled->requested_state[i]; @@ -338,7 +338,7 @@ static void cpuidle_coupled_poke_others(int this_cpu, { int cpu; - for_each_cpu_mask(cpu, coupled->coupled_cpus) + for_each_cpu(cpu, &coupled->coupled_cpus) if (cpu != this_cpu && cpu_online(cpu)) cpuidle_coupled_poke(cpu); } @@ -638,7 +638,7 @@ int cpuidle_coupled_register_device(struct cpuidle_device *dev) if (cpumask_empty(&dev->coupled_cpus)) return 0; - for_each_cpu_mask(cpu, dev->coupled_cpus) { + for_each_cpu(cpu, &dev->coupled_cpus) { other_dev = per_cpu(cpuidle_devices, cpu); if (other_dev && other_dev->coupled) { coupled = other_dev->coupled; diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c index afd136b45f49..10a9aeff1666 100644 --- a/drivers/crypto/n2_core.c +++ b/drivers/crypto/n2_core.c @@ -1754,7 +1754,7 @@ static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc, dev->dev.of_node->full_name); return -EINVAL; } - cpu_set(*id, p->sharing); + cpumask_set_cpu(*id, &p->sharing); table[*id] = p; } return 0; @@ -1776,7 +1776,7 @@ static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list, return -ENOMEM; } - cpus_clear(p->sharing); + cpumask_clear(&p->sharing); spin_lock_init(&p->lock); p->q_type = q_type; INIT_LIST_HEAD(&p->jobs); diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 1c6dea2fbc34..04b6f0732c1a 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -512,7 +512,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) */ smp_wmb(); - for_each_cpu_mask(cpu, *mask) { + for_each_cpu(cpu, mask) { u64 cluster_id = cpu_logical_map(cpu) & ~0xffUL; u16 tlist; diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index 9acdc080e7ec..f26307908a2a 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c @@ -345,19 +345,19 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask, int i; cpumask_and(&tmp, cpumask, cpu_online_mask); - if (cpus_empty(tmp)) + if (cpumask_empty(&tmp)) return -EINVAL; /* Assumption : cpumask refers to a single CPU */ spin_lock_irqsave(&gic_lock, flags); /* Re-route this IRQ */ - gic_map_to_vpe(irq, first_cpu(tmp)); + gic_map_to_vpe(irq, cpumask_first(&tmp)); /* Update the pcpu_masks */ for (i = 0; i < NR_CPUS; i++) clear_bit(irq, pcpu_masks[i].pcpu_mask); - set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask); + set_bit(irq, pcpu_masks[cpumask_first(&tmp)].pcpu_mask); cpumask_copy(d->affinity, cpumask); spin_unlock_irqrestore(&gic_lock, flags); diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c index bea8cd2bb56c..deac41498c6e 100644 --- a/drivers/net/ethernet/tile/tilegx.c +++ b/drivers/net/ethernet/tile/tilegx.c @@ -1122,7 +1122,7 @@ static int alloc_percpu_mpipe_resources(struct net_device *dev, addr + i * sizeof(struct tile_net_comps); /* If this is a network cpu, create an iqueue. */ - if (cpu_isset(cpu, network_cpus_map)) { + if (cpumask_test_cpu(cpu, &network_cpus_map)) { order = get_order(NOTIF_RING_SIZE); page = homecache_alloc_pages(GFP_KERNEL, order, cpu); if (page == NULL) { @@ -1298,7 +1298,7 @@ static int tile_net_init_mpipe(struct net_device *dev) int first_ring, ring; int instance = mpipe_instance(dev); struct mpipe_data *md = &mpipe_data[instance]; - int network_cpus_count = cpus_weight(network_cpus_map); + int network_cpus_count = cpumask_weight(&network_cpus_map); if (!hash_default) { netdev_err(dev, "Networking requires hash_default!\n"); -- cgit From 5d2068da8d339e4dff8f9b9a1246e6a79e2949d8 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Thu, 5 Mar 2015 10:49:16 +1030 Subject: ia64: fix up obsolete cpu function usage. Thanks to spatch, then a sweep for for_each_cpu_mask => for_each_cpu. Signed-off-by: Rusty Russell Cc: Tony Luck Cc: Fenghua Yu Cc: linux-ia64@vger.kernel.org --- arch/ia64/include/asm/acpi.h | 6 +++--- arch/ia64/kernel/acpi.c | 2 +- arch/ia64/kernel/iosapic.c | 2 +- arch/ia64/kernel/irq_ia64.c | 28 ++++++++++++++-------------- arch/ia64/kernel/mca.c | 10 +++++----- arch/ia64/kernel/numa.c | 10 +++++----- arch/ia64/kernel/salinfo.c | 24 ++++++++++++------------ arch/ia64/kernel/setup.c | 11 ++++++----- arch/ia64/kernel/smp.c | 6 +++--- arch/ia64/kernel/smpboot.c | 42 ++++++++++++++++++++++-------------------- arch/ia64/kernel/topology.c | 6 +++--- 11 files changed, 75 insertions(+), 72 deletions(-) diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h index a1d91ab4c5ef..aa0fdf125aba 100644 --- a/arch/ia64/include/asm/acpi.h +++ b/arch/ia64/include/asm/acpi.h @@ -117,7 +117,7 @@ static inline void arch_acpi_set_pdc_bits(u32 *buf) #ifdef CONFIG_ACPI_NUMA extern cpumask_t early_cpu_possible_map; #define for_each_possible_early_cpu(cpu) \ - for_each_cpu_mask((cpu), early_cpu_possible_map) + for_each_cpu((cpu), &early_cpu_possible_map) static inline void per_cpu_scan_finalize(int min_cpus, int reserve_cpus) { @@ -125,13 +125,13 @@ static inline void per_cpu_scan_finalize(int min_cpus, int reserve_cpus) int cpu; int next_nid = 0; - low_cpu = cpus_weight(early_cpu_possible_map); + low_cpu = cpumask_weight(&early_cpu_possible_map); high_cpu = max(low_cpu, min_cpus); high_cpu = min(high_cpu + reserve_cpus, NR_CPUS); for (cpu = low_cpu; cpu < high_cpu; cpu++) { - cpu_set(cpu, early_cpu_possible_map); + cpumask_set_cpu(cpu, &early_cpu_possible_map); if (node_cpuid[cpu].nid == NUMA_NO_NODE) { node_cpuid[cpu].nid = next_nid; next_nid++; diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 2c4498919d3c..35bf22cc71b7 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c @@ -483,7 +483,7 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) (pa->apic_id << 8) | (pa->local_sapic_eid); /* nid should be overridden as logical node id later */ node_cpuid[srat_num_cpus].nid = pxm; - cpu_set(srat_num_cpus, early_cpu_possible_map); + cpumask_set_cpu(srat_num_cpus, &early_cpu_possible_map); srat_num_cpus++; } diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index cd44a57c73be..bc9501e36e77 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c @@ -690,7 +690,7 @@ skip_numa_setup: do { if (++cpu >= nr_cpu_ids) cpu = 0; - } while (!cpu_online(cpu) || !cpu_isset(cpu, domain)); + } while (!cpu_online(cpu) || !cpumask_test_cpu(cpu, &domain)); return cpu_physical_id(cpu); #else /* CONFIG_SMP */ diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index 3329177c262e..9f40d972969c 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c @@ -109,13 +109,13 @@ static inline int find_unassigned_vector(cpumask_t domain) int pos, vector; cpumask_and(&mask, &domain, cpu_online_mask); - if (cpus_empty(mask)) + if (cpumask_empty(&mask)) return -EINVAL; for (pos = 0; pos < IA64_NUM_DEVICE_VECTORS; pos++) { vector = IA64_FIRST_DEVICE_VECTOR + pos; - cpus_and(mask, domain, vector_table[vector]); - if (!cpus_empty(mask)) + cpumask_and(&mask, &domain, &vector_table[vector]); + if (!cpumask_empty(&mask)) continue; return vector; } @@ -132,18 +132,18 @@ static int __bind_irq_vector(int irq, int vector, cpumask_t domain) BUG_ON((unsigned)vector >= IA64_NUM_VECTORS); cpumask_and(&mask, &domain, cpu_online_mask); - if (cpus_empty(mask)) + if (cpumask_empty(&mask)) return -EINVAL; - if ((cfg->vector == vector) && cpus_equal(cfg->domain, domain)) + if ((cfg->vector == vector) && cpumask_equal(&cfg->domain, &domain)) return 0; if (cfg->vector != IRQ_VECTOR_UNASSIGNED) return -EBUSY; - for_each_cpu_mask(cpu, mask) + for_each_cpu(cpu, &mask) per_cpu(vector_irq, cpu)[vector] = irq; cfg->vector = vector; cfg->domain = domain; irq_status[irq] = IRQ_USED; - cpus_or(vector_table[vector], vector_table[vector], domain); + cpumask_or(&vector_table[vector], &vector_table[vector], &domain); return 0; } @@ -242,7 +242,7 @@ void __setup_vector_irq(int cpu) per_cpu(vector_irq, cpu)[vector] = -1; /* Mark the inuse vectors */ for (irq = 0; irq < NR_IRQS; ++irq) { - if (!cpu_isset(cpu, irq_cfg[irq].domain)) + if (!cpumask_test_cpu(cpu, &irq_cfg[irq].domain)) continue; vector = irq_to_vector(irq); per_cpu(vector_irq, cpu)[vector] = irq; @@ -273,7 +273,7 @@ static int __irq_prepare_move(int irq, int cpu) return -EBUSY; if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu)) return -EINVAL; - if (cpu_isset(cpu, cfg->domain)) + if (cpumask_test_cpu(cpu, &cfg->domain)) return 0; domain = vector_allocation_domain(cpu); vector = find_unassigned_vector(domain); @@ -307,12 +307,12 @@ void irq_complete_move(unsigned irq) if (likely(!cfg->move_in_progress)) return; - if (unlikely(cpu_isset(smp_processor_id(), cfg->old_domain))) + if (unlikely(cpumask_test_cpu(smp_processor_id(), &cfg->old_domain))) return; cpumask_and(&cleanup_mask, &cfg->old_domain, cpu_online_mask); - cfg->move_cleanup_count = cpus_weight(cleanup_mask); - for_each_cpu_mask(i, cleanup_mask) + cfg->move_cleanup_count = cpumask_weight(&cleanup_mask); + for_each_cpu(i, &cleanup_mask) platform_send_ipi(i, IA64_IRQ_MOVE_VECTOR, IA64_IPI_DM_INT, 0); cfg->move_in_progress = 0; } @@ -338,12 +338,12 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id) if (!cfg->move_cleanup_count) goto unlock; - if (!cpu_isset(me, cfg->old_domain)) + if (!cpumask_test_cpu(me, &cfg->old_domain)) goto unlock; spin_lock_irqsave(&vector_lock, flags); __this_cpu_write(vector_irq[vector], -1); - cpu_clear(me, vector_table[vector]); + cpumask_clear_cpu(me, &vector_table[vector]); spin_unlock_irqrestore(&vector_lock, flags); cfg->move_cleanup_count--; unlock: diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 8bfd36af46f8..dd5801eb4c69 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -1293,7 +1293,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, monarch_cpu = cpu; sos->monarch = 1; } else { - cpu_set(cpu, mca_cpu); + cpumask_set_cpu(cpu, &mca_cpu); sos->monarch = 0; } mprintk(KERN_INFO "Entered OS MCA handler. PSP=%lx cpu=%d " @@ -1316,7 +1316,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, */ ia64_mca_wakeup_all(); } else { - while (cpu_isset(cpu, mca_cpu)) + while (cpumask_test_cpu(cpu, &mca_cpu)) cpu_relax(); /* spin until monarch wakes us */ } @@ -1355,9 +1355,9 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, * and put this cpu in the rendez loop. */ for_each_online_cpu(i) { - if (cpu_isset(i, mca_cpu)) { + if (cpumask_test_cpu(i, &mca_cpu)) { monarch_cpu = i; - cpu_clear(i, mca_cpu); /* wake next cpu */ + cpumask_clear_cpu(i, &mca_cpu); /* wake next cpu */ while (monarch_cpu != -1) cpu_relax(); /* spin until last cpu leaves */ set_curr_task(cpu, previous_current); @@ -1822,7 +1822,7 @@ format_mca_init_stack(void *mca_data, unsigned long offset, ti->cpu = cpu; p->stack = ti; p->state = TASK_UNINTERRUPTIBLE; - cpu_set(cpu, p->cpus_allowed); + cpumask_set_cpu(cpu, &p->cpus_allowed); INIT_LIST_HEAD(&p->tasks); p->parent = p->real_parent = p->group_leader = p; INIT_LIST_HEAD(&p->children); diff --git a/arch/ia64/kernel/numa.c b/arch/ia64/kernel/numa.c index d288cde93606..92c376279c6d 100644 --- a/arch/ia64/kernel/numa.c +++ b/arch/ia64/kernel/numa.c @@ -39,7 +39,7 @@ void map_cpu_to_node(int cpu, int nid) } /* sanity check first */ oldnid = cpu_to_node_map[cpu]; - if (cpu_isset(cpu, node_to_cpu_mask[oldnid])) { + if (cpumask_test_cpu(cpu, &node_to_cpu_mask[oldnid])) { return; /* nothing to do */ } /* we don't have cpu-driven node hot add yet... @@ -47,16 +47,16 @@ void map_cpu_to_node(int cpu, int nid) if (!node_online(nid)) nid = first_online_node; cpu_to_node_map[cpu] = nid; - cpu_set(cpu, node_to_cpu_mask[nid]); + cpumask_set_cpu(cpu, &node_to_cpu_mask[nid]); return; } void unmap_cpu_from_node(int cpu, int nid) { - WARN_ON(!cpu_isset(cpu, node_to_cpu_mask[nid])); + WARN_ON(!cpumask_test_cpu(cpu, &node_to_cpu_mask[nid])); WARN_ON(cpu_to_node_map[cpu] != nid); cpu_to_node_map[cpu] = 0; - cpu_clear(cpu, node_to_cpu_mask[nid]); + cpumask_clear_cpu(cpu, &node_to_cpu_mask[nid]); } @@ -71,7 +71,7 @@ void __init build_cpu_to_node_map(void) int cpu, i, node; for(node=0; node < MAX_NUMNODES; node++) - cpus_clear(node_to_cpu_mask[node]); + cpumask_clear(&node_to_cpu_mask[node]); for_each_possible_early_cpu(cpu) { node = -1; diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c index ee9719eebb1e..1eeffb7fbb16 100644 --- a/arch/ia64/kernel/salinfo.c +++ b/arch/ia64/kernel/salinfo.c @@ -256,7 +256,7 @@ salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe) data_saved->buffer = buffer; } } - cpu_set(smp_processor_id(), data->cpu_event); + cpumask_set_cpu(smp_processor_id(), &data->cpu_event); if (irqsafe) { salinfo_work_to_do(data); spin_unlock_irqrestore(&data_saved_lock, flags); @@ -274,7 +274,7 @@ salinfo_timeout_check(struct salinfo_data *data) unsigned long flags; if (!data->open) return; - if (!cpus_empty(data->cpu_event)) { + if (!cpumask_empty(&data->cpu_event)) { spin_lock_irqsave(&data_saved_lock, flags); salinfo_work_to_do(data); spin_unlock_irqrestore(&data_saved_lock, flags); @@ -308,7 +308,7 @@ salinfo_event_read(struct file *file, char __user *buffer, size_t count, loff_t int i, n, cpu = -1; retry: - if (cpus_empty(data->cpu_event) && down_trylock(&data->mutex)) { + if (cpumask_empty(&data->cpu_event) && down_trylock(&data->mutex)) { if (file->f_flags & O_NONBLOCK) return -EAGAIN; if (down_interruptible(&data->mutex)) @@ -317,9 +317,9 @@ retry: n = data->cpu_check; for (i = 0; i < nr_cpu_ids; i++) { - if (cpu_isset(n, data->cpu_event)) { + if (cpumask_test_cpu(n, &data->cpu_event)) { if (!cpu_online(n)) { - cpu_clear(n, data->cpu_event); + cpumask_clear_cpu(n, &data->cpu_event); continue; } cpu = n; @@ -451,7 +451,7 @@ retry: call_on_cpu(cpu, salinfo_log_read_cpu, data); if (!data->log_size) { data->state = STATE_NO_DATA; - cpu_clear(cpu, data->cpu_event); + cpumask_clear_cpu(cpu, &data->cpu_event); } else { data->state = STATE_LOG_RECORD; } @@ -491,11 +491,11 @@ salinfo_log_clear(struct salinfo_data *data, int cpu) unsigned long flags; spin_lock_irqsave(&data_saved_lock, flags); data->state = STATE_NO_DATA; - if (!cpu_isset(cpu, data->cpu_event)) { + if (!cpumask_test_cpu(cpu, &data->cpu_event)) { spin_unlock_irqrestore(&data_saved_lock, flags); return 0; } - cpu_clear(cpu, data->cpu_event); + cpumask_clear_cpu(cpu, &data->cpu_event); if (data->saved_num) { shift1_data_saved(data, data->saved_num - 1); data->saved_num = 0; @@ -509,7 +509,7 @@ salinfo_log_clear(struct salinfo_data *data, int cpu) salinfo_log_new_read(cpu, data); if (data->state == STATE_LOG_RECORD) { spin_lock_irqsave(&data_saved_lock, flags); - cpu_set(cpu, data->cpu_event); + cpumask_set_cpu(cpu, &data->cpu_event); salinfo_work_to_do(data); spin_unlock_irqrestore(&data_saved_lock, flags); } @@ -581,7 +581,7 @@ salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu for (i = 0, data = salinfo_data; i < ARRAY_SIZE(salinfo_data); ++i, ++data) { - cpu_set(cpu, data->cpu_event); + cpumask_set_cpu(cpu, &data->cpu_event); salinfo_work_to_do(data); } spin_unlock_irqrestore(&data_saved_lock, flags); @@ -601,7 +601,7 @@ salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu shift1_data_saved(data, j); } } - cpu_clear(cpu, data->cpu_event); + cpumask_clear_cpu(cpu, &data->cpu_event); } spin_unlock_irqrestore(&data_saved_lock, flags); break; @@ -659,7 +659,7 @@ salinfo_init(void) /* we missed any events before now */ for_each_online_cpu(j) - cpu_set(j, data->cpu_event); + cpumask_set_cpu(j, &data->cpu_event); *sdir++ = dir; } diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index d86669bcdfb2..b9761389cb8d 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c @@ -562,8 +562,8 @@ setup_arch (char **cmdline_p) # ifdef CONFIG_ACPI_HOTPLUG_CPU prefill_possible_map(); # endif - per_cpu_scan_finalize((cpus_weight(early_cpu_possible_map) == 0 ? - 32 : cpus_weight(early_cpu_possible_map)), + per_cpu_scan_finalize((cpumask_weight(&early_cpu_possible_map) == 0 ? + 32 : cpumask_weight(&early_cpu_possible_map)), additional_cpus > 0 ? additional_cpus : 0); # endif #endif /* CONFIG_APCI_BOOT */ @@ -702,7 +702,8 @@ show_cpuinfo (struct seq_file *m, void *v) c->itc_freq / 1000000, c->itc_freq % 1000000, lpj*HZ/500000, (lpj*HZ/5000) % 100); #ifdef CONFIG_SMP - seq_printf(m, "siblings : %u\n", cpus_weight(cpu_core_map[cpunum])); + seq_printf(m, "siblings : %u\n", + cpumask_weight(&cpu_core_map[cpunum])); if (c->socket_id != -1) seq_printf(m, "physical id: %u\n", c->socket_id); if (c->threads_per_core > 1 || c->cores_per_socket > 1) @@ -933,8 +934,8 @@ cpu_init (void) * (must be done after per_cpu area is setup) */ if (smp_processor_id() == 0) { - cpu_set(0, per_cpu(cpu_sibling_map, 0)); - cpu_set(0, cpu_core_map[0]); + cpumask_set_cpu(0, &per_cpu(cpu_sibling_map, 0)); + cpumask_set_cpu(0, &cpu_core_map[0]); } else { /* * Set ar.k3 so that assembly code in MCA handler can compute diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c index 9fcd4e63048f..7f706d4f84f7 100644 --- a/arch/ia64/kernel/smp.c +++ b/arch/ia64/kernel/smp.c @@ -262,11 +262,11 @@ smp_flush_tlb_cpumask(cpumask_t xcpumask) preempt_disable(); mycpu = smp_processor_id(); - for_each_cpu_mask(cpu, cpumask) + for_each_cpu(cpu, &cpumask) counts[cpu] = local_tlb_flush_counts[cpu].count & 0xffff; mb(); - for_each_cpu_mask(cpu, cpumask) { + for_each_cpu(cpu, &cpumask) { if (cpu == mycpu) flush_mycpu = 1; else @@ -276,7 +276,7 @@ smp_flush_tlb_cpumask(cpumask_t xcpumask) if (flush_mycpu) smp_local_flush_tlb(); - for_each_cpu_mask(cpu, cpumask) + for_each_cpu(cpu, &cpumask) while(counts[cpu] == (local_tlb_flush_counts[cpu].count & 0xffff)) udelay(FLUSH_DELAY); diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 547a48d78bd7..15051e9c2c6f 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c @@ -434,7 +434,7 @@ smp_callin (void) /* * Allow the master to continue. */ - cpu_set(cpuid, cpu_callin_map); + cpumask_set_cpu(cpuid, &cpu_callin_map); Dprintk("Stack on CPU %d at about %p\n",cpuid, &cpuid); } @@ -475,13 +475,13 @@ do_boot_cpu (int sapicid, int cpu, struct task_struct *idle) */ Dprintk("Waiting on callin_map ..."); for (timeout = 0; timeout < 100000; timeout++) { - if (cpu_isset(cpu, cpu_callin_map)) + if (cpumask_test_cpu(cpu, &cpu_callin_map)) break; /* It has booted */ udelay(100); } Dprintk("\n"); - if (!cpu_isset(cpu, cpu_callin_map)) { + if (!cpumask_test_cpu(cpu, &cpu_callin_map)) { printk(KERN_ERR "Processor 0x%x/0x%x is stuck.\n", cpu, sapicid); ia64_cpu_to_sapicid[cpu] = -1; set_cpu_online(cpu, false); /* was set in smp_callin() */ @@ -541,7 +541,7 @@ smp_prepare_cpus (unsigned int max_cpus) smp_setup_percpu_timer(); - cpu_set(0, cpu_callin_map); + cpumask_set_cpu(0, &cpu_callin_map); local_cpu_data->loops_per_jiffy = loops_per_jiffy; ia64_cpu_to_sapicid[0] = boot_cpu_id; @@ -565,7 +565,7 @@ smp_prepare_cpus (unsigned int max_cpus) void smp_prepare_boot_cpu(void) { set_cpu_online(smp_processor_id(), true); - cpu_set(smp_processor_id(), cpu_callin_map); + cpumask_set_cpu(smp_processor_id(), &cpu_callin_map); set_numa_node(cpu_to_node_map[smp_processor_id()]); per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; paravirt_post_smp_prepare_boot_cpu(); @@ -577,10 +577,10 @@ clear_cpu_sibling_map(int cpu) { int i; - for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu)) - cpu_clear(cpu, per_cpu(cpu_sibling_map, i)); - for_each_cpu_mask(i, cpu_core_map[cpu]) - cpu_clear(cpu, cpu_core_map[i]); + for_each_cpu(i, &per_cpu(cpu_sibling_map, cpu)) + cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i)); + for_each_cpu(i, &cpu_core_map[cpu]) + cpumask_clear_cpu(cpu, &cpu_core_map[i]); per_cpu(cpu_sibling_map, cpu) = cpu_core_map[cpu] = CPU_MASK_NONE; } @@ -592,12 +592,12 @@ remove_siblinginfo(int cpu) if (cpu_data(cpu)->threads_per_core == 1 && cpu_data(cpu)->cores_per_socket == 1) { - cpu_clear(cpu, cpu_core_map[cpu]); - cpu_clear(cpu, per_cpu(cpu_sibling_map, cpu)); + cpumask_clear_cpu(cpu, &cpu_core_map[cpu]); + cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, cpu)); return; } - last = (cpus_weight(cpu_core_map[cpu]) == 1 ? 1 : 0); + last = (cpumask_weight(&cpu_core_map[cpu]) == 1 ? 1 : 0); /* remove it from all sibling map's */ clear_cpu_sibling_map(cpu); @@ -673,7 +673,7 @@ int __cpu_disable(void) remove_siblinginfo(cpu); fixup_irqs(); local_flush_tlb_all(); - cpu_clear(cpu, cpu_callin_map); + cpumask_clear_cpu(cpu, &cpu_callin_map); return 0; } @@ -718,11 +718,13 @@ static inline void set_cpu_sibling_map(int cpu) for_each_online_cpu(i) { if ((cpu_data(cpu)->socket_id == cpu_data(i)->socket_id)) { - cpu_set(i, cpu_core_map[cpu]); - cpu_set(cpu, cpu_core_map[i]); + cpumask_set_cpu(i, &cpu_core_map[cpu]); + cpumask_set_cpu(cpu, &cpu_core_map[i]); if (cpu_data(cpu)->core_id == cpu_data(i)->core_id) { - cpu_set(i, per_cpu(cpu_sibling_map, cpu)); - cpu_set(cpu, per_cpu(cpu_sibling_map, i)); + cpumask_set_cpu(i, + &per_cpu(cpu_sibling_map, cpu)); + cpumask_set_cpu(cpu, + &per_cpu(cpu_sibling_map, i)); } } } @@ -742,7 +744,7 @@ __cpu_up(unsigned int cpu, struct task_struct *tidle) * Already booted cpu? not valid anymore since we dont * do idle loop tightspin anymore. */ - if (cpu_isset(cpu, cpu_callin_map)) + if (cpumask_test_cpu(cpu, &cpu_callin_map)) return -EINVAL; per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; @@ -753,8 +755,8 @@ __cpu_up(unsigned int cpu, struct task_struct *tidle) if (cpu_data(cpu)->threads_per_core == 1 && cpu_data(cpu)->cores_per_socket == 1) { - cpu_set(cpu, per_cpu(cpu_sibling_map, cpu)); - cpu_set(cpu, cpu_core_map[cpu]); + cpumask_set_cpu(cpu, &per_cpu(cpu_sibling_map, cpu)); + cpumask_set_cpu(cpu, &cpu_core_map[cpu]); return 0; } diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c index 965ab42fabb0..c01fe8991244 100644 --- a/arch/ia64/kernel/topology.c +++ b/arch/ia64/kernel/topology.c @@ -148,7 +148,7 @@ static void cache_shared_cpu_map_setup(unsigned int cpu, if (cpu_data(cpu)->threads_per_core <= 1 && cpu_data(cpu)->cores_per_socket <= 1) { - cpu_set(cpu, this_leaf->shared_cpu_map); + cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map); return; } @@ -164,7 +164,7 @@ static void cache_shared_cpu_map_setup(unsigned int cpu, if (cpu_data(cpu)->socket_id == cpu_data(j)->socket_id && cpu_data(j)->core_id == csi.log1_cid && cpu_data(j)->thread_id == csi.log1_tid) - cpu_set(j, this_leaf->shared_cpu_map); + cpumask_set_cpu(j, &this_leaf->shared_cpu_map); i++; } while (i < num_shared && @@ -177,7 +177,7 @@ static void cache_shared_cpu_map_setup(unsigned int cpu, static void cache_shared_cpu_map_setup(unsigned int cpu, struct cache_info * this_leaf) { - cpu_set(cpu, this_leaf->shared_cpu_map); + cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map); return; } #endif -- cgit From b09fcc90dc834b98c47cbb4d151526526ff10397 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Thu, 5 Mar 2015 10:49:17 +1030 Subject: um: fix up obsolete cpu function usage. Thanks to spatch. Signed-off-by: Rusty Russell Cc: Jeff Dike Cc: Richard Weinberger Cc: user-mode-linux-devel@lists.sourceforge.net --- arch/um/kernel/smp.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/um/kernel/smp.c b/arch/um/kernel/smp.c index 5c8c3ea7db7b..74077892b34a 100644 --- a/arch/um/kernel/smp.c +++ b/arch/um/kernel/smp.c @@ -67,12 +67,12 @@ static int idle_proc(void *cpup) os_set_fd_async(cpu_data[cpu].ipi_pipe[0]); wmb(); - if (cpu_test_and_set(cpu, cpu_callin_map)) { + if (cpumask_test_and_set_cpu(cpu, &cpu_callin_map)) { printk(KERN_ERR "huh, CPU#%d already present??\n", cpu); BUG(); } - while (!cpu_isset(cpu, smp_commenced_mask)) + while (!cpumask_test_cpu(cpu, &smp_commenced_mask)) cpu_relax(); notify_cpu_starting(cpu); @@ -111,7 +111,7 @@ void smp_prepare_cpus(unsigned int maxcpus) set_cpu_possible(i, true); set_cpu_online(me, true); - cpu_set(me, cpu_callin_map); + cpumask_set_cpu(me, &cpu_callin_map); err = os_pipe(cpu_data[me].ipi_pipe, 1, 1); if (err < 0) @@ -127,11 +127,11 @@ void smp_prepare_cpus(unsigned int maxcpus) init_idle(idle, cpu); waittime = 200000000; - while (waittime-- && !cpu_isset(cpu, cpu_callin_map)) + while (waittime-- && !cpumask_test_cpu(cpu, &cpu_callin_map)) cpu_relax(); printk(KERN_INFO "%s\n", - cpu_isset(cpu, cpu_calling_map) ? "done" : "failed"); + cpumask_test_cpu(cpu, &cpu_calling_map) ? "done" : "failed"); } } @@ -142,7 +142,7 @@ void smp_prepare_boot_cpu(void) int __cpu_up(unsigned int cpu, struct task_struct *tidle) { - cpu_set(cpu, smp_commenced_mask); + cpumask_set_cpu(cpu, &smp_commenced_mask); while (!cpu_online(cpu)) mb(); return 0; -- cgit From d089f8e97d371a662dd233491e03bda377c9d46d Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Thu, 5 Mar 2015 10:49:17 +1030 Subject: x86: fix up obsolete cpu function usage. Thanks to spatch, plus manual removal of "&*". Signed-off-by: Rusty Russell Cc: x86@kernel.org --- arch/x86/kernel/apic/x2apic_cluster.c | 8 ++++---- arch/x86/kernel/irq.c | 4 ++-- arch/x86/platform/uv/tlb_uv.c | 6 +++--- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c index e658f21681c8..d9d0bd2faaf4 100644 --- a/arch/x86/kernel/apic/x2apic_cluster.c +++ b/arch/x86/kernel/apic/x2apic_cluster.c @@ -135,12 +135,12 @@ static void init_x2apic_ldr(void) per_cpu(x86_cpu_to_logical_apicid, this_cpu) = apic_read(APIC_LDR); - __cpu_set(this_cpu, per_cpu(cpus_in_cluster, this_cpu)); + cpumask_set_cpu(this_cpu, per_cpu(cpus_in_cluster, this_cpu)); for_each_online_cpu(cpu) { if (x2apic_cluster(this_cpu) != x2apic_cluster(cpu)) continue; - __cpu_set(this_cpu, per_cpu(cpus_in_cluster, cpu)); - __cpu_set(cpu, per_cpu(cpus_in_cluster, this_cpu)); + cpumask_set_cpu(this_cpu, per_cpu(cpus_in_cluster, cpu)); + cpumask_set_cpu(cpu, per_cpu(cpus_in_cluster, this_cpu)); } } @@ -195,7 +195,7 @@ static int x2apic_init_cpu_notifier(void) BUG_ON(!per_cpu(cpus_in_cluster, cpu) || !per_cpu(ipi_mask, cpu)); - __cpu_set(cpu, per_cpu(cpus_in_cluster, cpu)); + cpumask_set_cpu(cpu, per_cpu(cpus_in_cluster, cpu)); register_hotcpu_notifier(&x2apic_cpu_notifier); return 1; } diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 67b1cbe0093a..e5952c225532 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -295,7 +295,7 @@ int check_irq_vectors_for_cpu_disable(void) this_cpu = smp_processor_id(); cpumask_copy(&online_new, cpu_online_mask); - cpu_clear(this_cpu, online_new); + cpumask_clear_cpu(this_cpu, &online_new); this_count = 0; for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { @@ -307,7 +307,7 @@ int check_irq_vectors_for_cpu_disable(void) data = irq_desc_get_irq_data(desc); cpumask_copy(&affinity_new, data->affinity); - cpu_clear(this_cpu, affinity_new); + cpumask_clear_cpu(this_cpu, &affinity_new); /* Do not count inactive or per-cpu irqs. */ if (!irq_has_action(irq) || irqd_is_per_cpu(data)) diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c index 994798548b1a..3b6ec42718e4 100644 --- a/arch/x86/platform/uv/tlb_uv.c +++ b/arch/x86/platform/uv/tlb_uv.c @@ -415,7 +415,7 @@ static void reset_with_ipi(struct pnmask *distribution, struct bau_control *bcp) struct reset_args reset_args; reset_args.sender = sender; - cpus_clear(*mask); + cpumask_clear(mask); /* find a single cpu for each uvhub in this distribution mask */ maskbits = sizeof(struct pnmask) * BITSPERBYTE; /* each bit is a pnode relative to the partition base pnode */ @@ -425,7 +425,7 @@ static void reset_with_ipi(struct pnmask *distribution, struct bau_control *bcp) continue; apnode = pnode + bcp->partition_base_pnode; cpu = pnode_to_first_cpu(apnode, smaster); - cpu_set(cpu, *mask); + cpumask_set_cpu(cpu, mask); } /* IPI all cpus; preemption is already disabled */ @@ -1126,7 +1126,7 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, /* don't actually do a shootdown of the local cpu */ cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu)); - if (cpu_isset(cpu, *cpumask)) + if (cpumask_test_cpu(cpu, cpumask)) stat->s_ntargself++; bau_desc = bcp->descriptor_base; -- cgit From 8dd928915a73bf95a727a46037964243eb1e042c Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Thu, 5 Mar 2015 10:49:17 +1030 Subject: mips: fix up obsolete cpu function usage. Thanks to spatch, plus manual removal of "&*". Then a sweep for for_each_cpu_mask => for_each_cpu. Signed-off-by: Rusty Russell Cc: Ralf Baechle Cc: Kevin Cernekee Cc: Florian Fainelli Cc: linux-mips@linux-mips.org --- arch/mips/bcm63xx/irq.c | 4 ++-- arch/mips/cavium-octeon/smp.c | 4 ++-- arch/mips/kernel/crash.c | 8 ++++---- arch/mips/kernel/mips-mt-fpaff.c | 4 ++-- arch/mips/kernel/process.c | 2 +- arch/mips/kernel/smp-bmips.c | 2 +- arch/mips/kernel/smp-cmp.c | 4 ++-- arch/mips/kernel/smp-cps.c | 4 ++-- arch/mips/kernel/smp-mt.c | 4 ++-- arch/mips/kernel/smp.c | 26 +++++++++++++------------- arch/mips/kernel/traps.c | 6 +++--- arch/mips/loongson/loongson-3/numa.c | 4 ++-- arch/mips/loongson/loongson-3/smp.c | 2 +- arch/mips/paravirt/paravirt-smp.c | 2 +- arch/mips/sgi-ip27/ip27-init.c | 2 +- arch/mips/sgi-ip27/ip27-klnuma.c | 10 +++++----- arch/mips/sgi-ip27/ip27-memory.c | 2 +- 17 files changed, 45 insertions(+), 45 deletions(-) diff --git a/arch/mips/bcm63xx/irq.c b/arch/mips/bcm63xx/irq.c index b94bf44d8d8e..e3e808a6c542 100644 --- a/arch/mips/bcm63xx/irq.c +++ b/arch/mips/bcm63xx/irq.c @@ -58,9 +58,9 @@ static inline int enable_irq_for_cpu(int cpu, struct irq_data *d, #ifdef CONFIG_SMP if (m) - enable &= cpu_isset(cpu, *m); + enable &= cpumask_test_cpu(cpu, m); else if (irqd_affinity_was_set(d)) - enable &= cpu_isset(cpu, *d->affinity); + enable &= cpumask_test_cpu(cpu, d->affinity); #endif return enable; } diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c index 8b1eeffa12ed..56f5d080ef9d 100644 --- a/arch/mips/cavium-octeon/smp.c +++ b/arch/mips/cavium-octeon/smp.c @@ -72,7 +72,7 @@ static inline void octeon_send_ipi_mask(const struct cpumask *mask, { unsigned int i; - for_each_cpu_mask(i, *mask) + for_each_cpu(i, mask) octeon_send_ipi_single(i, action); } @@ -239,7 +239,7 @@ static int octeon_cpu_disable(void) return -ENOTSUPP; set_cpu_online(cpu, false); - cpu_clear(cpu, cpu_callin_map); + cpumask_clear_cpu(cpu, &cpu_callin_map); octeon_fixup_irqs(); flush_cache_all(); diff --git a/arch/mips/kernel/crash.c b/arch/mips/kernel/crash.c index d21264681e97..d434d5d5ae6e 100644 --- a/arch/mips/kernel/crash.c +++ b/arch/mips/kernel/crash.c @@ -25,9 +25,9 @@ static void crash_shutdown_secondary(void *ignore) return; local_irq_disable(); - if (!cpu_isset(cpu, cpus_in_crash)) + if (!cpumask_test_cpu(cpu, &cpus_in_crash)) crash_save_cpu(regs, cpu); - cpu_set(cpu, cpus_in_crash); + cpumask_set_cpu(cpu, &cpus_in_crash); while (!atomic_read(&kexec_ready_to_reboot)) cpu_relax(); @@ -50,7 +50,7 @@ static void crash_kexec_prepare_cpus(void) */ pr_emerg("Sending IPI to other cpus...\n"); msecs = 10000; - while ((cpus_weight(cpus_in_crash) < ncpus) && (--msecs > 0)) { + while ((cpumask_weight(&cpus_in_crash) < ncpus) && (--msecs > 0)) { cpu_relax(); mdelay(1); } @@ -66,5 +66,5 @@ void default_machine_crash_shutdown(struct pt_regs *regs) crashing_cpu = smp_processor_id(); crash_save_cpu(regs, crashing_cpu); crash_kexec_prepare_cpus(); - cpu_set(crashing_cpu, cpus_in_crash); + cpumask_set_cpu(crashing_cpu, &cpus_in_crash); } diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c index 362bb3707e62..3e4491aa6d6b 100644 --- a/arch/mips/kernel/mips-mt-fpaff.c +++ b/arch/mips/kernel/mips-mt-fpaff.c @@ -114,8 +114,8 @@ asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len, /* Compute new global allowed CPU set if necessary */ ti = task_thread_info(p); if (test_ti_thread_flag(ti, TIF_FPUBOUND) && - cpus_intersects(*new_mask, mt_fpu_cpumask)) { - cpus_and(*effective_mask, *new_mask, mt_fpu_cpumask); + cpumask_intersects(new_mask, &mt_fpu_cpumask)) { + cpumask_and(effective_mask, new_mask, &mt_fpu_cpumask); retval = set_cpus_allowed_ptr(p, effective_mask); } else { cpumask_copy(effective_mask, new_mask); diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index bf85cc180d91..4501c7a4bd58 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -49,7 +49,7 @@ void arch_cpu_idle_dead(void) { /* What the heck is this check doing ? */ - if (!cpu_isset(smp_processor_id(), cpu_callin_map)) + if (!cpumask_test_cpu(smp_processor_id(), &cpu_callin_map)) play_dead(); } #endif diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c index b8bd9340c9c7..fd528d7ea278 100644 --- a/arch/mips/kernel/smp-bmips.c +++ b/arch/mips/kernel/smp-bmips.c @@ -362,7 +362,7 @@ static int bmips_cpu_disable(void) pr_info("SMP: CPU%d is offline\n", cpu); set_cpu_online(cpu, false); - cpu_clear(cpu, cpu_callin_map); + cpumask_clear_cpu(cpu, &cpu_callin_map); clear_c0_status(IE_IRQ5); local_flush_tlb_all(); diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c index e36a859af666..d5e0f949dc48 100644 --- a/arch/mips/kernel/smp-cmp.c +++ b/arch/mips/kernel/smp-cmp.c @@ -66,7 +66,7 @@ static void cmp_smp_finish(void) #ifdef CONFIG_MIPS_MT_FPAFF /* If we have an FPU, enroll ourselves in the FPU-full mask */ if (cpu_has_fpu) - cpu_set(smp_processor_id(), mt_fpu_cpumask); + cpumask_set_cpu(smp_processor_id(), &mt_fpu_cpumask); #endif /* CONFIG_MIPS_MT_FPAFF */ local_irq_enable(); @@ -110,7 +110,7 @@ void __init cmp_smp_setup(void) #ifdef CONFIG_MIPS_MT_FPAFF /* If we have an FPU, enroll ourselves in the FPU-full mask */ if (cpu_has_fpu) - cpu_set(0, mt_fpu_cpumask); + cpumask_set_cpu(0, &mt_fpu_cpumask); #endif /* CONFIG_MIPS_MT_FPAFF */ for (i = 1; i < NR_CPUS; i++) { diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c index bed7590e475f..b0fe93e6537e 100644 --- a/arch/mips/kernel/smp-cps.c +++ b/arch/mips/kernel/smp-cps.c @@ -284,7 +284,7 @@ static void cps_smp_finish(void) #ifdef CONFIG_MIPS_MT_FPAFF /* If we have an FPU, enroll ourselves in the FPU-full mask */ if (cpu_has_fpu) - cpu_set(smp_processor_id(), mt_fpu_cpumask); + cpumask_set_cpu(smp_processor_id(), &mt_fpu_cpumask); #endif /* CONFIG_MIPS_MT_FPAFF */ local_irq_enable(); @@ -307,7 +307,7 @@ static int cps_cpu_disable(void) atomic_sub(1 << cpu_vpe_id(¤t_cpu_data), &core_cfg->vpe_mask); smp_mb__after_atomic(); set_cpu_online(cpu, false); - cpu_clear(cpu, cpu_callin_map); + cpumask_clear_cpu(cpu, &cpu_callin_map); return 0; } diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c index 17ea705f6c40..86311a164ef1 100644 --- a/arch/mips/kernel/smp-mt.c +++ b/arch/mips/kernel/smp-mt.c @@ -178,7 +178,7 @@ static void vsmp_smp_finish(void) #ifdef CONFIG_MIPS_MT_FPAFF /* If we have an FPU, enroll ourselves in the FPU-full mask */ if (cpu_has_fpu) - cpu_set(smp_processor_id(), mt_fpu_cpumask); + cpumask_set_cpu(smp_processor_id(), &mt_fpu_cpumask); #endif /* CONFIG_MIPS_MT_FPAFF */ local_irq_enable(); @@ -239,7 +239,7 @@ static void __init vsmp_smp_setup(void) #ifdef CONFIG_MIPS_MT_FPAFF /* If we have an FPU, enroll ourselves in the FPU-full mask */ if (cpu_has_fpu) - cpu_set(0, mt_fpu_cpumask); + cpumask_set_cpu(0, &mt_fpu_cpumask); #endif /* CONFIG_MIPS_MT_FPAFF */ if (!cpu_has_mipsmt) return; diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 1c0d8c50b7e1..357acd3ac0e6 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -75,30 +75,30 @@ static inline void set_cpu_sibling_map(int cpu) { int i; - cpu_set(cpu, cpu_sibling_setup_map); + cpumask_set_cpu(cpu, &cpu_sibling_setup_map); if (smp_num_siblings > 1) { - for_each_cpu_mask(i, cpu_sibling_setup_map) { + for_each_cpu(i, &cpu_sibling_setup_map) { if (cpu_data[cpu].package == cpu_data[i].package && cpu_data[cpu].core == cpu_data[i].core) { - cpu_set(i, cpu_sibling_map[cpu]); - cpu_set(cpu, cpu_sibling_map[i]); + cpumask_set_cpu(i, &cpu_sibling_map[cpu]); + cpumask_set_cpu(cpu, &cpu_sibling_map[i]); } } } else - cpu_set(cpu, cpu_sibling_map[cpu]); + cpumask_set_cpu(cpu, &cpu_sibling_map[cpu]); } static inline void set_cpu_core_map(int cpu) { int i; - cpu_set(cpu, cpu_core_setup_map); + cpumask_set_cpu(cpu, &cpu_core_setup_map); - for_each_cpu_mask(i, cpu_core_setup_map) { + for_each_cpu(i, &cpu_core_setup_map) { if (cpu_data[cpu].package == cpu_data[i].package) { - cpu_set(i, cpu_core_map[cpu]); - cpu_set(cpu, cpu_core_map[i]); + cpumask_set_cpu(i, &cpu_core_map[cpu]); + cpumask_set_cpu(cpu, &cpu_core_map[i]); } } } @@ -138,7 +138,7 @@ asmlinkage void start_secondary(void) cpu = smp_processor_id(); cpu_data[cpu].udelay_val = loops_per_jiffy; - cpu_set(cpu, cpu_coherent_mask); + cpumask_set_cpu(cpu, &cpu_coherent_mask); notify_cpu_starting(cpu); set_cpu_online(cpu, true); @@ -146,7 +146,7 @@ asmlinkage void start_secondary(void) set_cpu_sibling_map(cpu); set_cpu_core_map(cpu); - cpu_set(cpu, cpu_callin_map); + cpumask_set_cpu(cpu, &cpu_callin_map); synchronise_count_slave(cpu); @@ -210,7 +210,7 @@ void smp_prepare_boot_cpu(void) { set_cpu_possible(0, true); set_cpu_online(0, true); - cpu_set(0, cpu_callin_map); + cpumask_set_cpu(0, &cpu_callin_map); } int __cpu_up(unsigned int cpu, struct task_struct *tidle) @@ -220,7 +220,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle) /* * Trust is futile. We should really have timeouts ... */ - while (!cpu_isset(cpu, cpu_callin_map)) + while (!cpumask_test_cpu(cpu, &cpu_callin_map)) udelay(100); synchronise_count_master(cpu); diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 33984c04b60b..b05b9462c728 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -1121,13 +1121,13 @@ static void mt_ase_fp_affinity(void) * restricted the allowed set to exclude any CPUs with FPUs, * we'll skip the procedure. */ - if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) { + if (cpumask_intersects(¤t->cpus_allowed, &mt_fpu_cpumask)) { cpumask_t tmask; current->thread.user_cpus_allowed = current->cpus_allowed; - cpus_and(tmask, current->cpus_allowed, - mt_fpu_cpumask); + cpumask_and(&tmask, ¤t->cpus_allowed, + &mt_fpu_cpumask); set_cpus_allowed_ptr(current, &tmask); set_thread_flag(TIF_FPUBOUND); } diff --git a/arch/mips/loongson/loongson-3/numa.c b/arch/mips/loongson/loongson-3/numa.c index 6cae0e75de27..12d14ed48778 100644 --- a/arch/mips/loongson/loongson-3/numa.c +++ b/arch/mips/loongson/loongson-3/numa.c @@ -233,7 +233,7 @@ static __init void prom_meminit(void) if (node_online(node)) { szmem(node); node_mem_init(node); - cpus_clear(__node_data[(node)]->cpumask); + cpumask_clear(&__node_data[(node)]->cpumask); } } for (cpu = 0; cpu < loongson_sysconf.nr_cpus; cpu++) { @@ -244,7 +244,7 @@ static __init void prom_meminit(void) if (loongson_sysconf.reserved_cpus_mask & (1<cpumask); + cpumask_set_cpu(active_cpu, &__node_data[(node)]->cpumask); pr_info("NUMA: set cpumask cpu %d on node %d\n", active_cpu, node); active_cpu++; diff --git a/arch/mips/loongson/loongson-3/smp.c b/arch/mips/loongson/loongson-3/smp.c index e2eb688b5434..e3c68b5da18d 100644 --- a/arch/mips/loongson/loongson-3/smp.c +++ b/arch/mips/loongson/loongson-3/smp.c @@ -408,7 +408,7 @@ static int loongson3_cpu_disable(void) return -EBUSY; set_cpu_online(cpu, false); - cpu_clear(cpu, cpu_callin_map); + cpumask_clear_cpu(cpu, &cpu_callin_map); local_irq_save(flags); fixup_irqs(); local_irq_restore(flags); diff --git a/arch/mips/paravirt/paravirt-smp.c b/arch/mips/paravirt/paravirt-smp.c index 0164b0c48352..42181c7105df 100644 --- a/arch/mips/paravirt/paravirt-smp.c +++ b/arch/mips/paravirt/paravirt-smp.c @@ -75,7 +75,7 @@ static void paravirt_send_ipi_mask(const struct cpumask *mask, unsigned int acti { unsigned int cpu; - for_each_cpu_mask(cpu, *mask) + for_each_cpu(cpu, mask) paravirt_send_ipi_single(cpu, action); } diff --git a/arch/mips/sgi-ip27/ip27-init.c b/arch/mips/sgi-ip27/ip27-init.c index ee736bd103f8..570098bfdf87 100644 --- a/arch/mips/sgi-ip27/ip27-init.c +++ b/arch/mips/sgi-ip27/ip27-init.c @@ -60,7 +60,7 @@ static void per_hub_init(cnodeid_t cnode) nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode); int i; - cpu_set(smp_processor_id(), hub->h_cpus); + cpumask_set_cpu(smp_processor_id(), &hub->h_cpus); if (test_and_set_bit(cnode, hub_init_mask)) return; diff --git a/arch/mips/sgi-ip27/ip27-klnuma.c b/arch/mips/sgi-ip27/ip27-klnuma.c index ecbb62f339c5..bda90cf87e8c 100644 --- a/arch/mips/sgi-ip27/ip27-klnuma.c +++ b/arch/mips/sgi-ip27/ip27-klnuma.c @@ -29,8 +29,8 @@ static cpumask_t ktext_repmask; void __init setup_replication_mask(void) { /* Set only the master cnode's bit. The master cnode is always 0. */ - cpus_clear(ktext_repmask); - cpu_set(0, ktext_repmask); + cpumask_clear(&ktext_repmask); + cpumask_set_cpu(0, &ktext_repmask); #ifdef CONFIG_REPLICATE_KTEXT #ifndef CONFIG_MAPPED_KERNEL @@ -43,7 +43,7 @@ void __init setup_replication_mask(void) if (cnode == 0) continue; /* Advertise that we have a copy of the kernel */ - cpu_set(cnode, ktext_repmask); + cpumask_set_cpu(cnode, &ktext_repmask); } } #endif @@ -99,7 +99,7 @@ void __init replicate_kernel_text() client_nasid = COMPACT_TO_NASID_NODEID(cnode); /* Check if this node should get a copy of the kernel */ - if (cpu_isset(cnode, ktext_repmask)) { + if (cpumask_test_cpu(cnode, &ktext_repmask)) { server_nasid = client_nasid; copy_kernel(server_nasid); } @@ -124,7 +124,7 @@ unsigned long node_getfirstfree(cnodeid_t cnode) loadbase += 16777216; #endif offset = PAGE_ALIGN((unsigned long)(&_end)) - loadbase; - if ((cnode == 0) || (cpu_isset(cnode, ktext_repmask))) + if ((cnode == 0) || (cpumask_test_cpu(cnode, &ktext_repmask))) return TO_NODE(nasid, offset) >> PAGE_SHIFT; else return KDM_TO_PHYS(PAGE_ALIGN(SYMMON_STK_ADDR(nasid, 0))) >> PAGE_SHIFT; diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c index 0b68469e063f..8d0eb2643248 100644 --- a/arch/mips/sgi-ip27/ip27-memory.c +++ b/arch/mips/sgi-ip27/ip27-memory.c @@ -404,7 +404,7 @@ static void __init node_mem_init(cnodeid_t node) NODE_DATA(node)->node_start_pfn = start_pfn; NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn; - cpus_clear(hub_data(node)->h_cpus); + cpumask_clear(&hub_data(node)->h_cpus); slot_freepfn += PFN_UP(sizeof(struct pglist_data) + sizeof(struct hub_data)); -- cgit From 434ed7f4b08fb8c23bdfabbb1f9427f2ed9ec27c Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Thu, 5 Mar 2015 10:49:18 +1030 Subject: arm64: fix up obsolete cpu function usage. Thanks to spatch. Signed-off-by: Rusty Russell Cc: Catalin Marinas Cc: Will Deacon Cc: linux-arm-kernel@lists.infradead.org --- arch/arm64/kernel/smp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 328b8ce4b007..ccf734ad4961 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -635,7 +635,7 @@ void smp_send_stop(void) cpumask_t mask; cpumask_copy(&mask, cpu_online_mask); - cpu_clear(smp_processor_id(), mask); + cpumask_clear_cpu(smp_processor_id(), &mask); smp_cross_call(&mask, IPI_CPU_STOP); } -- cgit From 0cc0cd7049f06a5ddaf38b6b268f0663c806a337 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Thu, 5 Mar 2015 10:49:18 +1030 Subject: tile: fix up obsolete cpu function usage. Thanks to spatch. Signed-off-by: Rusty Russell Cc: Chris Metcalf --- arch/tile/kernel/setup.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c index f1f579914952..776c741b84a4 100644 --- a/arch/tile/kernel/setup.c +++ b/arch/tile/kernel/setup.c @@ -773,7 +773,7 @@ static void __init zone_sizes_init(void) * though, there'll be no lowmem, so we just alloc_bootmem * the memmap. There will be no percpu memory either. */ - if (i != 0 && cpu_isset(i, isolnodes)) { + if (i != 0 && cpumask_test_cpu(i, &isolnodes)) { node_memmap_pfn[i] = alloc_bootmem_pfn(0, memmap_size, 0); BUG_ON(node_percpu[i] != 0); -- cgit From 409e56f3ef355ef7670b33e3fc6de8d1256a11d3 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Thu, 5 Mar 2015 10:49:18 +1030 Subject: parisc: fix up obsolete cpu function usage. Thanks to spatch. Signed-off-by: Rusty Russell Cc: "James E.J. Bottomley" Cc: Helge Deller Cc: linux-parisc@vger.kernel.org --- arch/parisc/kernel/irq.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c index cfe056fe7f5c..f3191db6e2e9 100644 --- a/arch/parisc/kernel/irq.c +++ b/arch/parisc/kernel/irq.c @@ -525,8 +525,8 @@ void do_cpu_irq_mask(struct pt_regs *regs) desc = irq_to_desc(irq); cpumask_copy(&dest, desc->irq_data.affinity); if (irqd_is_per_cpu(&desc->irq_data) && - !cpu_isset(smp_processor_id(), dest)) { - int cpu = first_cpu(dest); + !cpumask_test_cpu(smp_processor_id(), &dest)) { + int cpu = cpumask_first(&dest); printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n", irq, smp_processor_id(), cpu); -- cgit From 35168f512ff7eff9153e8c192f1155c994a75a0a Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Thu, 5 Mar 2015 10:49:18 +1030 Subject: blackfin: fix up obsolete cpu function usage. Signed-off-by: Rusty Russell Cc: Steven Miao Cc: adi-buildroot-devel@lists.sourceforge.net --- arch/blackfin/mach-bf561/smp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/blackfin/mach-bf561/smp.c b/arch/blackfin/mach-bf561/smp.c index 11789beca75a..8c0c80fd1a45 100644 --- a/arch/blackfin/mach-bf561/smp.c +++ b/arch/blackfin/mach-bf561/smp.c @@ -124,7 +124,7 @@ void platform_send_ipi(cpumask_t callmap, int irq) unsigned int cpu; int offset = (irq == IRQ_SUPPLE_0) ? 6 : 8; - for_each_cpu_mask(cpu, callmap) { + for_each_cpu(cpu, &callmap) { BUG_ON(cpu >= 2); SSYNC(); bfin_write_SICB_SYSCR(bfin_read_SICB_SYSCR() | (1 << (offset + cpu))); -- cgit From dc0d838a0527296730af6d3826bcda79d2918613 Mon Sep 17 00:00:00 2001 From: Oleg Drokin Date: Wed, 4 Mar 2015 01:07:51 -0500 Subject: staging/lustre/ptlrpc: Do not use deprecated cpus_* functions As per Rusty Russel, cpus_* functions are deprecated. When mixing cpumask_copy with cpus_weight, they operate on different sized masks if CPUMASK_OFFSTACK is enabled, causing an immediate assertion failure. Copying of cpumasks by assignment is also not allowed now. Additionally, in ptlrpc/service.c avoid the cpumask copies, since we only use it to check how many siblings are there for core #0 and nothing else. Reported-by: Tyson Whitehead Signed-off-by: Oleg Drokin Signed-off-by: Rusty Russell --- drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c | 8 ++++---- drivers/staging/lustre/lustre/ptlrpc/service.c | 9 +++------ 2 files changed, 7 insertions(+), 10 deletions(-) diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c index 4621b71fe0b6..7f13a2845e9f 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c +++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c @@ -511,10 +511,10 @@ static int ptlrpcd_bind(int index, int max) #if defined(CONFIG_NUMA) { int i; - mask = *cpumask_of_node(cpu_to_node(index)); + cpumask_copy(&mask, cpumask_of_node(cpu_to_node(index))); for (i = max; i < num_online_cpus(); i++) - cpu_clear(i, mask); - pc->pc_npartners = cpus_weight(mask) - 1; + cpumask_clear_cpu(i, &mask); + pc->pc_npartners = cpumask_weight(&mask) - 1; set_bit(LIOD_BIND, &pc->pc_flags); } #else @@ -554,7 +554,7 @@ static int ptlrpcd_bind(int index, int max) * that are already initialized */ for (pidx = 0, i = 0; i < index; i++) { - if (cpu_isset(i, mask)) { + if (cpumask_test_cpu(i, &mask)) { ppc = &ptlrpcds->pd_threads[i]; pc->pc_partners[pidx++] = ppc; ppc->pc_partners[ppc-> diff --git a/drivers/staging/lustre/lustre/ptlrpc/service.c b/drivers/staging/lustre/lustre/ptlrpc/service.c index 635b12b22cef..8e61421515cb 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/service.c +++ b/drivers/staging/lustre/lustre/ptlrpc/service.c @@ -543,7 +543,6 @@ ptlrpc_server_nthreads_check(struct ptlrpc_service *svc, if (tc->tc_thr_factor != 0) { int factor = tc->tc_thr_factor; const int fade = 4; - cpumask_t mask; /* * User wants to increase number of threads with for @@ -557,8 +556,8 @@ ptlrpc_server_nthreads_check(struct ptlrpc_service *svc, * have too many threads no matter how many cores/HTs * there are. */ - cpumask_copy(&mask, topology_thread_cpumask(0)); - if (cpus_weight(mask) > 1) { /* weight is # of HTs */ + /* weight is # of HTs */ + if (cpumask_weight(topology_thread_cpumask(0)) > 1) { /* depress thread factor for hyper-thread */ factor = factor - (factor >> 1) + (factor >> 3); } @@ -2752,7 +2751,6 @@ int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait) int ptlrpc_hr_init(void) { - cpumask_t mask; struct ptlrpc_hr_partition *hrp; struct ptlrpc_hr_thread *hrt; int rc; @@ -2770,8 +2768,7 @@ int ptlrpc_hr_init(void) init_waitqueue_head(&ptlrpc_hr.hr_waitq); - cpumask_copy(&mask, topology_thread_cpumask(0)); - weight = cpus_weight(mask); + weight = cpumask_weight(topology_thread_cpumask(0)); cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) { hrp->hrp_cpt = i; -- cgit From b25e74b56a34908c777c1e72ecd9c56c4c8e124d Mon Sep 17 00:00:00 2001 From: Oleg Drokin Date: Wed, 4 Mar 2015 01:07:52 -0500 Subject: staging/lustre/libcfs: replace deprecated cpus_ calls with cpumask_ Rusty Russel advises that cpus_* functions are deprecated to work on cpumasks and cpumask_* functions should be called instead, otherwise problems with CPUMASK_OFFSTACK arise. Signed-off-by: Oleg Drokin Signed-off-by: Rusty Russell --- .../staging/lustre/lustre/libcfs/linux/linux-cpu.c | 102 +++++++++++---------- 1 file changed, 53 insertions(+), 49 deletions(-) diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c index 05f7595f18aa..cc3ab351943e 100644 --- a/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c +++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c @@ -204,7 +204,7 @@ cfs_cpt_table_print(struct cfs_cpt_table *cptab, char *buf, int len) } tmp += rc; - for_each_cpu_mask(j, *cptab->ctb_parts[i].cpt_cpumask) { + for_each_cpu(j, cptab->ctb_parts[i].cpt_cpumask) { rc = snprintf(tmp, len, "%d ", j); len -= rc; if (len <= 0) { @@ -240,8 +240,8 @@ cfs_cpt_weight(struct cfs_cpt_table *cptab, int cpt) LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts)); return cpt == CFS_CPT_ANY ? - cpus_weight(*cptab->ctb_cpumask) : - cpus_weight(*cptab->ctb_parts[cpt].cpt_cpumask); + cpumask_weight(cptab->ctb_cpumask) : + cpumask_weight(cptab->ctb_parts[cpt].cpt_cpumask); } EXPORT_SYMBOL(cfs_cpt_weight); @@ -251,8 +251,10 @@ cfs_cpt_online(struct cfs_cpt_table *cptab, int cpt) LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts)); return cpt == CFS_CPT_ANY ? - any_online_cpu(*cptab->ctb_cpumask) != NR_CPUS : - any_online_cpu(*cptab->ctb_parts[cpt].cpt_cpumask) != NR_CPUS; + cpumask_any_and(cptab->ctb_cpumask, + cpu_online_mask) < nr_cpu_ids : + cpumask_any_and(cptab->ctb_parts[cpt].cpt_cpumask, + cpu_online_mask) < nr_cpu_ids; } EXPORT_SYMBOL(cfs_cpt_online); @@ -283,7 +285,7 @@ cfs_cpt_set_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu) LASSERT(cpt >= 0 && cpt < cptab->ctb_nparts); - if (cpu < 0 || cpu >= NR_CPUS || !cpu_online(cpu)) { + if (cpu < 0 || cpu >= nr_cpu_ids || !cpu_online(cpu)) { CDEBUG(D_INFO, "CPU %d is invalid or it's offline\n", cpu); return 0; } @@ -296,11 +298,11 @@ cfs_cpt_set_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu) cptab->ctb_cpu2cpt[cpu] = cpt; - LASSERT(!cpu_isset(cpu, *cptab->ctb_cpumask)); - LASSERT(!cpu_isset(cpu, *cptab->ctb_parts[cpt].cpt_cpumask)); + LASSERT(!cpumask_test_cpu(cpu, cptab->ctb_cpumask)); + LASSERT(!cpumask_test_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask)); - cpu_set(cpu, *cptab->ctb_cpumask); - cpu_set(cpu, *cptab->ctb_parts[cpt].cpt_cpumask); + cpumask_set_cpu(cpu, cptab->ctb_cpumask); + cpumask_set_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask); node = cpu_to_node(cpu); @@ -324,7 +326,7 @@ cfs_cpt_unset_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu) LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts)); - if (cpu < 0 || cpu >= NR_CPUS) { + if (cpu < 0 || cpu >= nr_cpu_ids) { CDEBUG(D_INFO, "Invalid CPU id %d\n", cpu); return; } @@ -344,11 +346,11 @@ cfs_cpt_unset_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu) return; } - LASSERT(cpu_isset(cpu, *cptab->ctb_parts[cpt].cpt_cpumask)); - LASSERT(cpu_isset(cpu, *cptab->ctb_cpumask)); + LASSERT(cpumask_test_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask)); + LASSERT(cpumask_test_cpu(cpu, cptab->ctb_cpumask)); - cpu_clear(cpu, *cptab->ctb_parts[cpt].cpt_cpumask); - cpu_clear(cpu, *cptab->ctb_cpumask); + cpumask_clear_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask); + cpumask_clear_cpu(cpu, cptab->ctb_cpumask); cptab->ctb_cpu2cpt[cpu] = -1; node = cpu_to_node(cpu); @@ -356,22 +358,22 @@ cfs_cpt_unset_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu) LASSERT(node_isset(node, *cptab->ctb_parts[cpt].cpt_nodemask)); LASSERT(node_isset(node, *cptab->ctb_nodemask)); - for_each_cpu_mask(i, *cptab->ctb_parts[cpt].cpt_cpumask) { + for_each_cpu(i, cptab->ctb_parts[cpt].cpt_cpumask) { /* this CPT has other CPU belonging to this node? */ if (cpu_to_node(i) == node) break; } - if (i == NR_CPUS) + if (i >= nr_cpu_ids) node_clear(node, *cptab->ctb_parts[cpt].cpt_nodemask); - for_each_cpu_mask(i, *cptab->ctb_cpumask) { + for_each_cpu(i, cptab->ctb_cpumask) { /* this CPT-table has other CPU belonging to this node? */ if (cpu_to_node(i) == node) break; } - if (i == NR_CPUS) + if (i >= nr_cpu_ids) node_clear(node, *cptab->ctb_nodemask); return; @@ -383,13 +385,14 @@ cfs_cpt_set_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask) { int i; - if (cpus_weight(*mask) == 0 || any_online_cpu(*mask) == NR_CPUS) { + if (cpumask_weight(mask) == 0 || + cpumask_any_and(mask, cpu_online_mask) >= nr_cpu_ids) { CDEBUG(D_INFO, "No online CPU is found in the CPU mask for CPU partition %d\n", cpt); return 0; } - for_each_cpu_mask(i, *mask) { + for_each_cpu(i, mask) { if (!cfs_cpt_set_cpu(cptab, cpt, i)) return 0; } @@ -403,7 +406,7 @@ cfs_cpt_unset_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask) { int i; - for_each_cpu_mask(i, *mask) + for_each_cpu(i, mask) cfs_cpt_unset_cpu(cptab, cpt, i); } EXPORT_SYMBOL(cfs_cpt_unset_cpumask); @@ -493,7 +496,7 @@ cfs_cpt_clear(struct cfs_cpt_table *cptab, int cpt) } for (; cpt <= last; cpt++) { - for_each_cpu_mask(i, *cptab->ctb_parts[cpt].cpt_cpumask) + for_each_cpu(i, cptab->ctb_parts[cpt].cpt_cpumask) cfs_cpt_unset_cpu(cptab, cpt, i); } } @@ -554,7 +557,7 @@ EXPORT_SYMBOL(cfs_cpt_current); int cfs_cpt_of_cpu(struct cfs_cpt_table *cptab, int cpu) { - LASSERT(cpu >= 0 && cpu < NR_CPUS); + LASSERT(cpu >= 0 && cpu < nr_cpu_ids); return cptab->ctb_cpu2cpt[cpu]; } @@ -578,14 +581,14 @@ cfs_cpt_bind(struct cfs_cpt_table *cptab, int cpt) nodemask = cptab->ctb_parts[cpt].cpt_nodemask; } - if (any_online_cpu(*cpumask) == NR_CPUS) { + if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids) { CERROR("No online CPU found in CPU partition %d, did someone do CPU hotplug on system? You might need to reload Lustre modules to keep system working well.\n", cpt); return -EINVAL; } for_each_online_cpu(i) { - if (cpu_isset(i, *cpumask)) + if (cpumask_test_cpu(i, cpumask)) continue; rc = set_cpus_allowed_ptr(current, cpumask); @@ -616,14 +619,14 @@ cfs_cpt_choose_ncpus(struct cfs_cpt_table *cptab, int cpt, LASSERT(number > 0); - if (number >= cpus_weight(*node)) { - while (!cpus_empty(*node)) { - cpu = first_cpu(*node); + if (number >= cpumask_weight(node)) { + while (!cpumask_empty(node)) { + cpu = cpumask_first(node); rc = cfs_cpt_set_cpu(cptab, cpt, cpu); if (!rc) return -EINVAL; - cpu_clear(cpu, *node); + cpumask_clear_cpu(cpu, node); } return 0; } @@ -636,27 +639,27 @@ cfs_cpt_choose_ncpus(struct cfs_cpt_table *cptab, int cpt, goto out; } - while (!cpus_empty(*node)) { - cpu = first_cpu(*node); + while (!cpumask_empty(node)) { + cpu = cpumask_first(node); /* get cpumask for cores in the same socket */ cfs_cpu_core_siblings(cpu, socket); - cpus_and(*socket, *socket, *node); + cpumask_and(socket, socket, node); - LASSERT(!cpus_empty(*socket)); + LASSERT(!cpumask_empty(socket)); - while (!cpus_empty(*socket)) { + while (!cpumask_empty(socket)) { int i; /* get cpumask for hts in the same core */ cfs_cpu_ht_siblings(cpu, core); - cpus_and(*core, *core, *node); + cpumask_and(core, core, node); - LASSERT(!cpus_empty(*core)); + LASSERT(!cpumask_empty(core)); - for_each_cpu_mask(i, *core) { - cpu_clear(i, *socket); - cpu_clear(i, *node); + for_each_cpu(i, core) { + cpumask_clear_cpu(i, socket); + cpumask_clear_cpu(i, node); rc = cfs_cpt_set_cpu(cptab, cpt, i); if (!rc) { @@ -667,7 +670,7 @@ cfs_cpt_choose_ncpus(struct cfs_cpt_table *cptab, int cpt, if (--number == 0) goto out; } - cpu = first_cpu(*socket); + cpu = cpumask_first(socket); } } @@ -767,7 +770,7 @@ cfs_cpt_table_create(int ncpt) for_each_online_node(i) { cfs_node_to_cpumask(i, mask); - while (!cpus_empty(*mask)) { + while (!cpumask_empty(mask)) { struct cfs_cpu_partition *part; int n; @@ -776,24 +779,24 @@ cfs_cpt_table_create(int ncpt) part = &cptab->ctb_parts[cpt]; - n = num - cpus_weight(*part->cpt_cpumask); + n = num - cpumask_weight(part->cpt_cpumask); LASSERT(n > 0); rc = cfs_cpt_choose_ncpus(cptab, cpt, mask, n); if (rc < 0) goto failed; - LASSERT(num >= cpus_weight(*part->cpt_cpumask)); - if (num == cpus_weight(*part->cpt_cpumask)) + LASSERT(num >= cpumask_weight(part->cpt_cpumask)); + if (num == cpumask_weight(part->cpt_cpumask)) cpt++; } } if (cpt != ncpt || - num != cpus_weight(*cptab->ctb_parts[ncpt - 1].cpt_cpumask)) { + num != cpumask_weight(cptab->ctb_parts[ncpt - 1].cpt_cpumask)) { CERROR("Expect %d(%d) CPU partitions but got %d(%d), CPU hotplug/unplug while setting?\n", cptab->ctb_nparts, num, cpt, - cpus_weight(*cptab->ctb_parts[ncpt - 1].cpt_cpumask)); + cpumask_weight(cptab->ctb_parts[ncpt - 1].cpt_cpumask)); goto failed; } @@ -845,7 +848,7 @@ cfs_cpt_table_create_pattern(char *pattern) return NULL; } - high = node ? MAX_NUMNODES - 1 : NR_CPUS - 1; + high = node ? MAX_NUMNODES - 1 : nr_cpu_ids - 1; cptab = cfs_cpt_table_alloc(ncpt); if (cptab == NULL) { @@ -965,7 +968,8 @@ cfs_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) mutex_lock(&cpt_data.cpt_mutex); /* if all HTs in a core are offline, it may break affinity */ cfs_cpu_ht_siblings(cpu, cpt_data.cpt_cpumask); - warn = any_online_cpu(*cpt_data.cpt_cpumask) >= nr_cpu_ids; + warn = cpumask_any_and(cpt_data.cpt_cpumask, + cpu_online_mask) >= nr_cpu_ids; mutex_unlock(&cpt_data.cpt_mutex); CDEBUG(warn ? D_WARNING : D_INFO, "Lustre: can't support CPU plug-out well now, performance and stability could be impacted [CPU %u action: %lx]\n", -- cgit From e97636b487d0767500a1b898513577a5ce000ad9 Mon Sep 17 00:00:00 2001 From: Oleg Drokin Date: Wed, 4 Mar 2015 01:07:53 -0500 Subject: staging/lustre/o2iblnd: Don't use cpus_weight cpus_weight and for_each_cpu_mask are deprecated, so replace them with cpumask_weight and for_each_cpu respectively. Signed-off-by: Oleg Drokin Signed-off-by: Rusty Russell --- drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c index 651016919669..a25816ab9e53 100644 --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c @@ -638,8 +638,8 @@ kiblnd_get_completion_vector(kib_conn_t *conn, int cpt) return 0; /* hash NID to CPU id in this partition... */ - off = do_div(nid, cpus_weight(*mask)); - for_each_cpu_mask(i, *mask) { + off = do_div(nid, cpumask_weight(mask)); + for_each_cpu(i, mask) { if (off-- == 0) return i % vectors; } -- cgit From 9941a383df98193aa9a9b3662af7c1fcbc3070ee Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Thu, 5 Mar 2015 10:49:19 +1030 Subject: CPU_MASK_ALL/CPU_MASK_NONE: remove from deprecated region. They're used to initialize various static fields, though static cpumasks should generally be avoided. Signed-off-by: Rusty Russell --- include/linux/cpumask.h | 31 ++++++++++++------------------- 1 file changed, 12 insertions(+), 19 deletions(-) diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 086549a665e2..dc037ae6f4f2 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -768,7 +768,7 @@ static inline const struct cpumask *get_cpu_mask(unsigned int cpu) #if NR_CPUS <= BITS_PER_LONG #define CPU_BITS_ALL \ { \ - [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ + [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \ } #else /* NR_CPUS > BITS_PER_LONG */ @@ -776,7 +776,7 @@ static inline const struct cpumask *get_cpu_mask(unsigned int cpu) #define CPU_BITS_ALL \ { \ [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \ - [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ + [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \ } #endif /* NR_CPUS > BITS_PER_LONG */ @@ -797,38 +797,31 @@ cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask) nr_cpu_ids); } -/* - * - * From here down, all obsolete. Use cpumask_ variants! - * - */ -#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS -#define cpumask_of_cpu(cpu) (*get_cpu_mask(cpu)) - -#define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS) - #if NR_CPUS <= BITS_PER_LONG - #define CPU_MASK_ALL \ (cpumask_t) { { \ - [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ + [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \ } } - #else - #define CPU_MASK_ALL \ (cpumask_t) { { \ [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \ - [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ + [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \ } } - -#endif +#endif /* NR_CPUS > BITS_PER_LONG */ #define CPU_MASK_NONE \ (cpumask_t) { { \ [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \ } } +/* + * + * From here down, all obsolete. Use cpumask_ variants! + * + */ +#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS + #define CPU_MASK_CPU0 \ (cpumask_t) { { \ [0] = 1UL \ -- cgit From 87313df7b47c5048d8920a486f77835a49406754 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Tue, 10 Mar 2015 12:28:29 +1030 Subject: powerpc: fix deprecated CPU_MASK_CPU0 usage. Signed-off-by: Rusty Russell --- arch/powerpc/include/asm/cputhreads.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/include/asm/cputhreads.h b/arch/powerpc/include/asm/cputhreads.h index 2bf8e9307be9..42e04a6a2b58 100644 --- a/arch/powerpc/include/asm/cputhreads.h +++ b/arch/powerpc/include/asm/cputhreads.h @@ -25,7 +25,7 @@ extern cpumask_t threads_core_mask; #define threads_per_core 1 #define threads_per_subcore 1 #define threads_shift 0 -#define threads_core_mask (CPU_MASK_CPU0) +#define threads_core_mask (*get_cpu_mask(0)) #endif /* cpu_thread_mask_to_cores - Return a cpumask of one per cores -- cgit From 6a4bd8d1412a6cd72b65d7c86a2c08e9ec1d6b97 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Tue, 10 Mar 2015 12:42:03 +1030 Subject: ia64: remove deprecated cpus_ usage. Signed-off-by: Rusty Russell --- arch/ia64/kernel/irq_ia64.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index 9f40d972969c..eaa3199f98c8 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c @@ -173,7 +173,7 @@ static void __clear_irq_vector(int irq) cfg->vector = IRQ_VECTOR_UNASSIGNED; cfg->domain = CPU_MASK_NONE; irq_status[irq] = IRQ_UNUSED; - cpus_andnot(vector_table[vector], vector_table[vector], domain); + cpumask_andnot(&vector_table[vector], &vector_table[vector], &domain); } static void clear_irq_vector(int irq) @@ -259,7 +259,7 @@ static enum vector_domain_type { static cpumask_t vector_allocation_domain(int cpu) { if (vector_domain_type == VECTOR_DOMAIN_PERCPU) - return cpumask_of_cpu(cpu); + return *cpumask_of(cpu); return CPU_MASK_ALL; } -- cgit From fdaf3a6539d6b9b6a7240c3b00bd81c813b2760d Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Tue, 10 Mar 2015 12:43:45 +1030 Subject: x86: fix more deprecated cpu function usage. Signed-off-by: Rusty Russell --- arch/x86/kernel/apic/x2apic_cluster.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c index d9d0bd2faaf4..ab3219b3fbda 100644 --- a/arch/x86/kernel/apic/x2apic_cluster.c +++ b/arch/x86/kernel/apic/x2apic_cluster.c @@ -171,8 +171,8 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu) for_each_online_cpu(cpu) { if (x2apic_cluster(this_cpu) != x2apic_cluster(cpu)) continue; - __cpu_clear(this_cpu, per_cpu(cpus_in_cluster, cpu)); - __cpu_clear(cpu, per_cpu(cpus_in_cluster, this_cpu)); + cpumask_clear_cpu(this_cpu, per_cpu(cpus_in_cluster, cpu)); + cpumask_clear_cpu(cpu, per_cpu(cpus_in_cluster, this_cpu)); } free_cpumask_var(per_cpu(cpus_in_cluster, this_cpu)); free_cpumask_var(per_cpu(ipi_mask, this_cpu)); -- cgit From 1ed1835f5fadf057ab081cbe31ac353d4547a25b Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Tue, 10 Mar 2015 13:00:43 +1030 Subject: mips: fix obsolete cpumask_of_cpu usage. Plus, it's weird. Signed-off-by: Rusty Russell --- arch/mips/include/asm/smp.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h index eacf865d21c2..bb02fac9b4fa 100644 --- a/arch/mips/include/asm/smp.h +++ b/arch/mips/include/asm/smp.h @@ -88,7 +88,7 @@ static inline void arch_send_call_function_single_ipi(int cpu) { extern struct plat_smp_ops *mp_ops; /* private */ - mp_ops->send_ipi_mask(&cpumask_of_cpu(cpu), SMP_CALL_FUNCTION); + mp_ops->send_ipi_mask(cpumask_of(cpu), SMP_CALL_FUNCTION); } static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask) -- cgit From 2f0f267ea0720ec6adbe9cf7386450425fac8258 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Thu, 5 Mar 2015 10:49:19 +1030 Subject: cpumask: remove deprecated functions. Using these functions with offstack cpus is unsafe. They use all NR_CPUS bits, unstead of nr_cpumask_bits. In particular, lustre (in staging) used cpus_ and that caused a bug. Reported-by: Oleg Drokin Signed-off-by: Rusty Russell --- include/linux/cpumask.h | 151 ------------------------------------------------ lib/Kconfig | 4 -- 2 files changed, 155 deletions(-) diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index dc037ae6f4f2..646fadee5caf 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -815,155 +815,4 @@ cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask) [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \ } } -/* - * - * From here down, all obsolete. Use cpumask_ variants! - * - */ -#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS - -#define CPU_MASK_CPU0 \ -(cpumask_t) { { \ - [0] = 1UL \ -} } - -#if NR_CPUS == 1 -#define first_cpu(src) ({ (void)(src); 0; }) -#define next_cpu(n, src) ({ (void)(src); 1; }) -#define any_online_cpu(mask) 0 -#define for_each_cpu_mask(cpu, mask) \ - for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) -#else /* NR_CPUS > 1 */ -int __first_cpu(const cpumask_t *srcp); -int __next_cpu(int n, const cpumask_t *srcp); - -#define first_cpu(src) __first_cpu(&(src)) -#define next_cpu(n, src) __next_cpu((n), &(src)) -#define any_online_cpu(mask) cpumask_any_and(&mask, cpu_online_mask) -#define for_each_cpu_mask(cpu, mask) \ - for ((cpu) = -1; \ - (cpu) = next_cpu((cpu), (mask)), \ - (cpu) < NR_CPUS; ) -#endif /* SMP */ - -#if NR_CPUS <= 64 - -#define for_each_cpu_mask_nr(cpu, mask) for_each_cpu_mask(cpu, mask) - -#else /* NR_CPUS > 64 */ - -int __next_cpu_nr(int n, const cpumask_t *srcp); -#define for_each_cpu_mask_nr(cpu, mask) \ - for ((cpu) = -1; \ - (cpu) = __next_cpu_nr((cpu), &(mask)), \ - (cpu) < nr_cpu_ids; ) - -#endif /* NR_CPUS > 64 */ - -#define cpus_addr(src) ((src).bits) - -#define cpu_set(cpu, dst) __cpu_set((cpu), &(dst)) -static inline void __cpu_set(int cpu, volatile cpumask_t *dstp) -{ - set_bit(cpu, dstp->bits); -} - -#define cpu_clear(cpu, dst) __cpu_clear((cpu), &(dst)) -static inline void __cpu_clear(int cpu, volatile cpumask_t *dstp) -{ - clear_bit(cpu, dstp->bits); -} - -#define cpus_setall(dst) __cpus_setall(&(dst), NR_CPUS) -static inline void __cpus_setall(cpumask_t *dstp, unsigned int nbits) -{ - bitmap_fill(dstp->bits, nbits); -} - -#define cpus_clear(dst) __cpus_clear(&(dst), NR_CPUS) -static inline void __cpus_clear(cpumask_t *dstp, unsigned int nbits) -{ - bitmap_zero(dstp->bits, nbits); -} - -/* No static inline type checking - see Subtlety (1) above. */ -#define cpu_isset(cpu, cpumask) test_bit((cpu), (cpumask).bits) - -#define cpu_test_and_set(cpu, cpumask) __cpu_test_and_set((cpu), &(cpumask)) -static inline int __cpu_test_and_set(int cpu, cpumask_t *addr) -{ - return test_and_set_bit(cpu, addr->bits); -} - -#define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS) -static inline int __cpus_and(cpumask_t *dstp, const cpumask_t *src1p, - const cpumask_t *src2p, unsigned int nbits) -{ - return bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits); -} - -#define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS) -static inline void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p, - const cpumask_t *src2p, unsigned int nbits) -{ - bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits); -} - -#define cpus_xor(dst, src1, src2) __cpus_xor(&(dst), &(src1), &(src2), NR_CPUS) -static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p, - const cpumask_t *src2p, unsigned int nbits) -{ - bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits); -} - -#define cpus_andnot(dst, src1, src2) \ - __cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS) -static inline int __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p, - const cpumask_t *src2p, unsigned int nbits) -{ - return bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits); -} - -#define cpus_equal(src1, src2) __cpus_equal(&(src1), &(src2), NR_CPUS) -static inline int __cpus_equal(const cpumask_t *src1p, - const cpumask_t *src2p, unsigned int nbits) -{ - return bitmap_equal(src1p->bits, src2p->bits, nbits); -} - -#define cpus_intersects(src1, src2) __cpus_intersects(&(src1), &(src2), NR_CPUS) -static inline int __cpus_intersects(const cpumask_t *src1p, - const cpumask_t *src2p, unsigned int nbits) -{ - return bitmap_intersects(src1p->bits, src2p->bits, nbits); -} - -#define cpus_subset(src1, src2) __cpus_subset(&(src1), &(src2), NR_CPUS) -static inline int __cpus_subset(const cpumask_t *src1p, - const cpumask_t *src2p, unsigned int nbits) -{ - return bitmap_subset(src1p->bits, src2p->bits, nbits); -} - -#define cpus_empty(src) __cpus_empty(&(src), NR_CPUS) -static inline int __cpus_empty(const cpumask_t *srcp, unsigned int nbits) -{ - return bitmap_empty(srcp->bits, nbits); -} - -#define cpus_weight(cpumask) __cpus_weight(&(cpumask), NR_CPUS) -static inline int __cpus_weight(const cpumask_t *srcp, unsigned int nbits) -{ - return bitmap_weight(srcp->bits, nbits); -} - -#define cpus_shift_left(dst, src, n) \ - __cpus_shift_left(&(dst), &(src), (n), NR_CPUS) -static inline void __cpus_shift_left(cpumask_t *dstp, - const cpumask_t *srcp, int n, int nbits) -{ - bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); -} -#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */ - #endif /* __LINUX_CPUMASK_H */ diff --git a/lib/Kconfig b/lib/Kconfig index 87da53bb1fef..47d262b3251e 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -397,10 +397,6 @@ config CPUMASK_OFFSTACK them on the stack. This is a bit more expensive, but avoids stack overflow. -config DISABLE_OBSOLETE_CPUMASK_FUNCTIONS - bool "Disable obsolete cpumask functions" if DEBUG_PER_CPU_MAPS - depends on BROKEN - config CPU_RMAP bool depends on SMP -- cgit From c8ed00107b60b4a890798677d75e5c2910f393bb Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Thu, 5 Mar 2015 10:49:19 +1030 Subject: Fix weird uses of num_online_cpus(). This may be OK in archs with contiguous CPU numbers and without hotplug CPUs, but it sets a terrible example. And open-coding it like drivers/scsi/hpsa.c is just weird. BTRFS has a weird comparison with num_online_cpus() too, but since BTRFS just screwed up my test machines' root partition, I'm not touching it :) Signed-off-by: Rusty Russell Reported-by: Oleg Drokin --- arch/m32r/kernel/smpboot.c | 2 +- arch/sh/include/asm/mmu_context.h | 2 +- arch/sh/kernel/smp.c | 6 +++--- arch/um/kernel/smp.c | 2 +- drivers/scsi/hpsa.c | 6 ++---- 5 files changed, 8 insertions(+), 10 deletions(-) diff --git a/arch/m32r/kernel/smpboot.c b/arch/m32r/kernel/smpboot.c index bb21f4f63170..a468467542f4 100644 --- a/arch/m32r/kernel/smpboot.c +++ b/arch/m32r/kernel/smpboot.c @@ -376,7 +376,7 @@ void __init smp_cpus_done(unsigned int max_cpus) if (!cpumask_equal(&cpu_callin_map, cpu_online_mask)) BUG(); - for (cpu_id = 0 ; cpu_id < num_online_cpus() ; cpu_id++) + for_each_online_cpu(cpu_id) show_cpu_info(cpu_id); /* diff --git a/arch/sh/include/asm/mmu_context.h b/arch/sh/include/asm/mmu_context.h index b9d9489a5012..9f417feaf6e8 100644 --- a/arch/sh/include/asm/mmu_context.h +++ b/arch/sh/include/asm/mmu_context.h @@ -99,7 +99,7 @@ static inline int init_new_context(struct task_struct *tsk, { int i; - for (i = 0; i < num_online_cpus(); i++) + for_each_online_cpu(i) cpu_context(i, mm) = NO_CONTEXT; return 0; diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c index fc5acfc93c92..de6be008fc01 100644 --- a/arch/sh/kernel/smp.c +++ b/arch/sh/kernel/smp.c @@ -363,7 +363,7 @@ void flush_tlb_mm(struct mm_struct *mm) smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1); } else { int i; - for (i = 0; i < num_online_cpus(); i++) + for_each_online_cpu(i) if (smp_processor_id() != i) cpu_context(i, mm) = 0; } @@ -400,7 +400,7 @@ void flush_tlb_range(struct vm_area_struct *vma, smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1); } else { int i; - for (i = 0; i < num_online_cpus(); i++) + for_each_online_cpu(i) if (smp_processor_id() != i) cpu_context(i, mm) = 0; } @@ -443,7 +443,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1); } else { int i; - for (i = 0; i < num_online_cpus(); i++) + for_each_online_cpu(i) if (smp_processor_id() != i) cpu_context(i, vma->vm_mm) = 0; } diff --git a/arch/um/kernel/smp.c b/arch/um/kernel/smp.c index 74077892b34a..525c3657a6af 100644 --- a/arch/um/kernel/smp.c +++ b/arch/um/kernel/smp.c @@ -45,7 +45,7 @@ void smp_send_stop(void) int i; printk(KERN_INFO "Stopping all CPUs..."); - for (i = 0; i < num_online_cpus(); i++) { + for_each_online_cpu(i) { if (i == current_thread->cpu) continue; os_write_file(cpu_data[i].ipi_pipe[1], "S", 1); diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index a1cfbd3dda47..8eab107b53fb 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -6632,14 +6632,12 @@ static void fail_all_outstanding_cmds(struct ctlr_info *h) static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value) { - int i, cpu; + int cpu; - cpu = cpumask_first(cpu_online_mask); - for (i = 0; i < num_online_cpus(); i++) { + for_each_online_cpu(cpu) { u32 *lockup_detected; lockup_detected = per_cpu_ptr(h->lockup_detected, cpu); *lockup_detected = value; - cpu = cpumask_next(cpu, cpu_online_mask); } wmb(); /* be sure the per-cpu variables are out to memory */ } -- cgit From cdfdef75e795fb5ab76c66f3329e509f3ab8b9b5 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Thu, 5 Mar 2015 10:49:19 +1030 Subject: cpumask: only allocate nr_cpumask_bits. Now we'll find out the hard way if anyone has CPUMASK_OFFSTACK and is returning these or assigning them. Signed-off-by: Rusty Russell --- include/linux/cpumask.h | 5 ++--- lib/cpumask.c | 7 ------- 2 files changed, 2 insertions(+), 10 deletions(-) diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 646fadee5caf..4ad2d3c8e21f 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -11,6 +11,7 @@ #include #include +/* Don't assign or return these: may not be this big! */ typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t; /** @@ -609,9 +610,7 @@ static inline int cpulist_parse(const char *buf, struct cpumask *dstp) */ static inline size_t cpumask_size(void) { - /* FIXME: Once all cpumask assignments are eliminated, this - * can be nr_cpumask_bits */ - return BITS_TO_LONGS(NR_CPUS) * sizeof(long); + return BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long); } /* diff --git a/lib/cpumask.c b/lib/cpumask.c index b6513a9f2892..ba379d12bb57 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c @@ -89,13 +89,6 @@ bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node) dump_stack(); } #endif - /* FIXME: Bandaid to save us from old primitives which go to NR_CPUS. */ - if (*mask) { - unsigned char *ptr = (unsigned char *)cpumask_bits(*mask); - unsigned int tail; - tail = BITS_TO_LONGS(NR_CPUS - nr_cpumask_bits) * sizeof(long); - memset(ptr + cpumask_size() - tail, 0, tail); - } return *mask != NULL; } -- cgit From 3bbf7f4624856751aa4cf279a472bd14a8eb16fd Mon Sep 17 00:00:00 2001 From: Rasmus Villemoes Date: Tue, 31 Mar 2015 13:25:05 +1030 Subject: linux/cpumask.h: add typechecking to cpumask_test_cpu The Subtlety (1) referred to vanished with 6ba2ef7baac2 ("cpumask: Move deprecated functions to end of header."). That used to mention some suboptimal code generation by a, by now, rather ancient gcc. With gcc 4.7, I don't see any change in the generated code by making it a static inline, so let's add type checking and get rid of the ghost reference. Signed-off-by: Rasmus Villemoes Signed-off-by: Rusty Russell --- include/linux/cpumask.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 4ad2d3c8e21f..89558d0b56ac 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -290,11 +290,11 @@ static inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp) * @cpumask: the cpumask pointer * * Returns 1 if @cpu is set in @cpumask, else returns 0 - * - * No static inline type checking - see Subtlety (1) above. */ -#define cpumask_test_cpu(cpu, cpumask) \ - test_bit(cpumask_check(cpu), cpumask_bits((cpumask))) +static inline int cpumask_test_cpu(int cpu, const struct cpumask *cpumask) +{ + return test_bit(cpumask_check(cpu), cpumask_bits((cpumask))); +} /** * cpumask_test_and_set_cpu - atomically test and set a cpu in a cpumask -- cgit From 1527781d228cd88af6c2f78c13a9cb43b3f69f30 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Thu, 16 Apr 2015 12:33:51 +0930 Subject: cpumask: resurrect CPU_MASK_CPU0 We removed it in 2f0f267ea072 (cpumask: remove deprecated functions.), but grep shows it still used by MIPS, and not unreasonably. Signed-off-by: Rusty Russell --- include/linux/cpumask.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 89558d0b56ac..27e285b92b5f 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -814,4 +814,9 @@ cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask) [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \ } } +#define CPU_MASK_CPU0 \ +(cpumask_t) { { \ + [0] = 1UL \ +} } + #endif /* __LINUX_CPUMASK_H */ -- cgit From e4afa120c98252e44390067c3a6cc775cde30659 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Sat, 18 Apr 2015 11:18:27 +0930 Subject: cpumask: remove __first_cpu / __next_cpu They were for use by the deprecated first_cpu() and next_cpu() wrappers, but sparc used them directly. They're now replaced by cpumask_first / cpumask_next. And __next_cpu_nr is completely obsolete. Signed-off-by: Rusty Russell Acked-by: David S. Miller --- arch/sparc/kernel/time_32.c | 4 ++-- lib/cpumask.c | 21 --------------------- 2 files changed, 2 insertions(+), 23 deletions(-) diff --git a/arch/sparc/kernel/time_32.c b/arch/sparc/kernel/time_32.c index 2f80d23a0a44..73825f431b0c 100644 --- a/arch/sparc/kernel/time_32.c +++ b/arch/sparc/kernel/time_32.c @@ -198,7 +198,7 @@ static __init int setup_timer_cs(void) static void percpu_ce_setup(enum clock_event_mode mode, struct clock_event_device *evt) { - int cpu = __first_cpu(evt->cpumask); + int cpu = cpumask_first(evt->cpumask); switch (mode) { case CLOCK_EVT_MODE_PERIODIC: @@ -218,7 +218,7 @@ static void percpu_ce_setup(enum clock_event_mode mode, static int percpu_ce_set_next_event(unsigned long delta, struct clock_event_device *evt) { - int cpu = __first_cpu(evt->cpumask); + int cpu = cpumask_first(evt->cpumask); unsigned int next = (unsigned int)delta; sparc_config.load_profile_irq(cpu, next); diff --git a/lib/cpumask.c b/lib/cpumask.c index ba379d12bb57..75379b759d3f 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c @@ -5,27 +5,6 @@ #include #include -int __first_cpu(const cpumask_t *srcp) -{ - return min_t(int, NR_CPUS, find_first_bit(srcp->bits, NR_CPUS)); -} -EXPORT_SYMBOL(__first_cpu); - -int __next_cpu(int n, const cpumask_t *srcp) -{ - return min_t(int, NR_CPUS, find_next_bit(srcp->bits, NR_CPUS, n+1)); -} -EXPORT_SYMBOL(__next_cpu); - -#if NR_CPUS > 64 -int __next_cpu_nr(int n, const cpumask_t *srcp) -{ - return min_t(int, nr_cpu_ids, - find_next_bit(srcp->bits, nr_cpu_ids, n+1)); -} -EXPORT_SYMBOL(__next_cpu_nr); -#endif - /** * cpumask_next_and - get the next cpu in *src1p & *src2p * @n: the cpu prior to the place to search (ie. return will be > @n) -- cgit