diff options
Diffstat (limited to 'arch/powerpc/include/asm/topology.h')
| -rw-r--r-- | arch/powerpc/include/asm/topology.h | 129 |
1 files changed, 102 insertions, 27 deletions
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h index 161ab662843b..66ed5fe1b718 100644 --- a/arch/powerpc/include/asm/topology.h +++ b/arch/powerpc/include/asm/topology.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_POWERPC_TOPOLOGY_H #define _ASM_POWERPC_TOPOLOGY_H #ifdef __KERNEL__ @@ -5,28 +6,18 @@ struct device; struct device_node; +struct drmem_lmb; #ifdef CONFIG_NUMA /* - * Before going off node we want the VM to try and reclaim from the local - * node. It does this if the remote distance is larger than RECLAIM_DISTANCE. - * With the default REMOTE_DISTANCE of 20 and the default RECLAIM_DISTANCE of - * 20, we never reclaim and go off node straight away. - * - * To fix this we choose a smaller value of RECLAIM_DISTANCE. + * If zone_reclaim_mode is enabled, a RECLAIM_DISTANCE of 10 will mean that + * all zones on all nodes will be eligible for zone_reclaim(). */ #define RECLAIM_DISTANCE 10 #include <asm/mmzone.h> -static inline int cpu_to_node(int cpu) -{ - return numa_cpu_lookup_table[cpu]; -} - -#define parent_node(node) (node) - #define cpumask_of_node(node) ((node) == -1 ? \ cpu_all_mask : \ node_to_cpumask_map[node]) @@ -45,6 +36,7 @@ static inline int pcibus_to_node(struct pci_bus *bus) cpu_all_mask : \ cpumask_of_node(pcibus_to_node(bus))) +int cpu_relative_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc); extern int __node_distance(int, int); #define node_distance(a, b) __node_distance(a, b) @@ -53,8 +45,36 @@ extern void __init dump_numa_cpu_topology(void); extern int sysfs_add_device_to_node(struct device *dev, int nid); extern void sysfs_remove_device_from_node(struct device *dev, int nid); +static inline void update_numa_cpu_lookup_table(unsigned int cpu, int node) +{ + numa_cpu_lookup_table[cpu] = node; +} + +static inline int early_cpu_to_node(int cpu) +{ + int nid; + + nid = numa_cpu_lookup_table[cpu]; + + /* + * Fall back to node 0 if nid is unset (it should be, except bugs). + * This allows callers to safely do NODE_DATA(early_cpu_to_node(cpu)). + */ + return (nid < 0) ? 0 : nid; +} + +int of_drconf_to_nid_single(struct drmem_lmb *lmb); +void update_numa_distance(struct device_node *node); + +extern void map_cpu_to_node(int cpu, int node); +#ifdef CONFIG_HOTPLUG_CPU +extern void unmap_cpu_from_node(unsigned long cpu); +#endif /* CONFIG_HOTPLUG_CPU */ + #else +static inline int early_cpu_to_node(int cpu) { return 0; } + static inline void dump_numa_cpu_topology(void) {} static inline int sysfs_add_device_to_node(struct device *dev, int nid) @@ -66,40 +86,95 @@ static inline void sysfs_remove_device_from_node(struct device *dev, int nid) { } -#endif /* CONFIG_NUMA */ -#if defined(CONFIG_NUMA) && defined(CONFIG_PPC_SPLPAR) -extern int start_topology_update(void); -extern int stop_topology_update(void); -extern int prrn_is_enabled(void); -#else -static inline int start_topology_update(void) +static inline void update_numa_cpu_lookup_table(unsigned int cpu, int node) {} + +static inline int cpu_relative_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc) { return 0; } -static inline int stop_topology_update(void) + +static inline int of_drconf_to_nid_single(struct drmem_lmb *lmb) { - return 0; + return first_online_node; } -static inline int prrn_is_enabled(void) + +static inline void update_numa_distance(struct device_node *node) {} + +#ifdef CONFIG_SMP +static inline void map_cpu_to_node(int cpu, int node) {} +#ifdef CONFIG_HOTPLUG_CPU +static inline void unmap_cpu_from_node(unsigned long cpu) {} +#endif /* CONFIG_HOTPLUG_CPU */ +#endif /* CONFIG_SMP */ + +#endif /* CONFIG_NUMA */ + +#if defined(CONFIG_NUMA) && defined(CONFIG_PPC_SPLPAR) +void find_and_update_cpu_nid(int cpu); +extern int cpu_to_coregroup_id(int cpu); +#else +static inline void find_and_update_cpu_nid(int cpu) {} +static inline int cpu_to_coregroup_id(int cpu) { +#ifdef CONFIG_SMP + return cpu_to_core_id(cpu); +#else return 0; +#endif } + #endif /* CONFIG_NUMA && CONFIG_PPC_SPLPAR */ #include <asm-generic/topology.h> #ifdef CONFIG_SMP #include <asm/cputable.h> -#define smt_capable() (cpu_has_feature(CPU_FTR_SMT)) + +struct cpumask *cpu_coregroup_mask(int cpu); +const struct cpumask *cpu_die_mask(int cpu); +int cpu_die_id(int cpu); #ifdef CONFIG_PPC64 #include <asm/smp.h> -#define topology_thread_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu)) -#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu)) -#define topology_core_id(cpu) (cpu_to_core_id(cpu)) +#define topology_physical_package_id(cpu) (cpu_to_chip_id(cpu)) +#define topology_sibling_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu)) +#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu)) +#define topology_core_id(cpu) (cpu_to_core_id(cpu)) +#define topology_die_id(cpu) (cpu_die_id(cpu)) +#define topology_die_cpumask(cpu) (cpu_die_mask(cpu)) + +#endif #endif + +#ifdef CONFIG_HOTPLUG_SMT +#include <linux/cpu_smt.h> +#include <linux/cpumask.h> +#include <asm/cputhreads.h> + +static inline bool topology_is_primary_thread(unsigned int cpu) +{ + return cpu == cpu_first_thread_sibling(cpu); +} +#define topology_is_primary_thread topology_is_primary_thread + +static inline bool topology_smt_thread_allowed(unsigned int cpu) +{ + return cpu_thread_in_core(cpu) < cpu_smt_num_threads; +} + +#define topology_is_core_online topology_is_core_online +static inline bool topology_is_core_online(unsigned int cpu) +{ + int i, first_cpu = cpu_first_thread_sibling(cpu); + + for (i = first_cpu; i < first_cpu + threads_per_core; ++i) { + if (cpu_online(i)) + return true; + } + return false; +} #endif #endif /* __KERNEL__ */ |
