summaryrefslogtreecommitdiff
path: root/drivers/acpi/numa
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/acpi/numa')
-rw-r--r--drivers/acpi/numa/Kconfig5
-rw-r--r--drivers/acpi/numa/hmat.c83
-rw-r--r--drivers/acpi/numa/srat.c226
3 files changed, 233 insertions, 81 deletions
diff --git a/drivers/acpi/numa/Kconfig b/drivers/acpi/numa/Kconfig
index 849c2bd820b9..f33194d1e43f 100644
--- a/drivers/acpi/numa/Kconfig
+++ b/drivers/acpi/numa/Kconfig
@@ -1,9 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
config ACPI_NUMA
- bool "NUMA support"
- depends on NUMA
- depends on (X86 || ARM64 || LOONGARCH)
- default y if ARM64
+ def_bool NUMA && !X86
config ACPI_HMAT
bool "ACPI Heterogeneous Memory Attribute Table Support"
diff --git a/drivers/acpi/numa/hmat.c b/drivers/acpi/numa/hmat.c
index 2c8ccc91ebe6..9d9052258e92 100644
--- a/drivers/acpi/numa/hmat.c
+++ b/drivers/acpi/numa/hmat.c
@@ -108,6 +108,45 @@ static struct memory_target *find_mem_target(unsigned int mem_pxm)
return NULL;
}
+/**
+ * hmat_get_extended_linear_cache_size - Retrieve the extended linear cache size
+ * @backing_res: resource from the backing media
+ * @nid: node id for the memory region
+ * @cache_size: (Output) size of extended linear cache.
+ *
+ * Return: 0 on success. Errno on failure.
+ *
+ */
+int hmat_get_extended_linear_cache_size(struct resource *backing_res, int nid,
+ resource_size_t *cache_size)
+{
+ unsigned int pxm = node_to_pxm(nid);
+ struct memory_target *target;
+ struct target_cache *tcache;
+ struct resource *res;
+
+ target = find_mem_target(pxm);
+ if (!target)
+ return -ENOENT;
+
+ list_for_each_entry(tcache, &target->caches, node) {
+ if (tcache->cache_attrs.address_mode !=
+ NODE_CACHE_ADDR_MODE_EXTENDED_LINEAR)
+ continue;
+
+ res = &target->memregions;
+ if (!resource_contains(res, backing_res))
+ continue;
+
+ *cache_size = tcache->cache_attrs.size;
+ return 0;
+ }
+
+ *cache_size = 0;
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(hmat_get_extended_linear_cache_size, "CXL");
+
static struct memory_target *acpi_find_genport_target(u32 uid)
{
struct memory_target *target;
@@ -151,7 +190,7 @@ int acpi_get_genport_coordinates(u32 uid,
return 0;
}
-EXPORT_SYMBOL_NS_GPL(acpi_get_genport_coordinates, CXL);
+EXPORT_SYMBOL_NS_GPL(acpi_get_genport_coordinates, "CXL");
static __init void alloc_memory_initiator(unsigned int cpu_pxm)
{
@@ -408,7 +447,7 @@ static __init void hmat_update_target(unsigned int tgt_pxm, unsigned int init_px
if (target && target->processor_pxm == init_pxm) {
hmat_update_target_access(target, type, value,
ACCESS_COORDINATE_LOCAL);
- /* If the node has a CPU, update access 1 */
+ /* If the node has a CPU, update access ACCESS_COORDINATE_CPU */
if (node_state(pxm_to_node(init_pxm), N_CPU))
hmat_update_target_access(target, type, value,
ACCESS_COORDINATE_CPU);
@@ -442,9 +481,9 @@ static __init int hmat_parse_locality(union acpi_subtable_headers *header,
return -EINVAL;
}
- pr_info("Locality: Flags:%02x Type:%s Initiator Domains:%u Target Domains:%u Base:%lld\n",
- hmat_loc->flags, hmat_data_type(type), ipds, tpds,
- hmat_loc->entry_base_unit);
+ pr_debug("Locality: Flags:%02x Type:%s Initiator Domains:%u Target Domains:%u Base:%lld\n",
+ hmat_loc->flags, hmat_data_type(type), ipds, tpds,
+ hmat_loc->entry_base_unit);
inits = (u32 *)(hmat_loc + 1);
targs = inits + ipds;
@@ -455,9 +494,9 @@ static __init int hmat_parse_locality(union acpi_subtable_headers *header,
value = hmat_normalize(entries[init * tpds + targ],
hmat_loc->entry_base_unit,
type);
- pr_info(" Initiator-Target[%u-%u]:%u%s\n",
- inits[init], targs[targ], value,
- hmat_data_type_suffix(type));
+ pr_debug(" Initiator-Target[%u-%u]:%u%s\n",
+ inits[init], targs[targ], value,
+ hmat_data_type_suffix(type));
hmat_update_target(targs[targ], inits[init],
mem_hier, type, value);
@@ -485,9 +524,9 @@ static __init int hmat_parse_cache(union acpi_subtable_headers *header,
}
attrs = cache->cache_attributes;
- pr_info("Cache: Domain:%u Size:%llu Attrs:%08x SMBIOS Handles:%d\n",
- cache->memory_PD, cache->cache_size, attrs,
- cache->number_of_SMBIOShandles);
+ pr_debug("Cache: Domain:%u Size:%llu Attrs:%08x SMBIOS Handles:%d\n",
+ cache->memory_PD, cache->cache_size, attrs,
+ cache->number_of_SMBIOShandles);
target = find_mem_target(cache->memory_PD);
if (!target)
@@ -506,6 +545,11 @@ static __init int hmat_parse_cache(union acpi_subtable_headers *header,
switch ((attrs & ACPI_HMAT_CACHE_ASSOCIATIVITY) >> 8) {
case ACPI_HMAT_CA_DIRECT_MAPPED:
tcache->cache_attrs.indexing = NODE_CACHE_DIRECT_MAP;
+ /* Extended Linear mode is only valid if cache is direct mapped */
+ if (cache->address_mode == ACPI_HMAT_CACHE_MODE_EXTENDED_LINEAR) {
+ tcache->cache_attrs.address_mode =
+ NODE_CACHE_ADDR_MODE_EXTENDED_LINEAR;
+ }
break;
case ACPI_HMAT_CA_COMPLEX_CACHE_INDEXING:
tcache->cache_attrs.indexing = NODE_CACHE_INDEXED;
@@ -546,9 +590,9 @@ static int __init hmat_parse_proximity_domain(union acpi_subtable_headers *heade
}
if (hmat_revision == 1)
- pr_info("Memory (%#llx length %#llx) Flags:%04x Processor Domain:%u Memory Domain:%u\n",
- p->reserved3, p->reserved4, p->flags, p->processor_PD,
- p->memory_PD);
+ pr_debug("Memory (%#llx length %#llx) Flags:%04x Processor Domain:%u Memory Domain:%u\n",
+ p->reserved3, p->reserved4, p->flags, p->processor_PD,
+ p->memory_PD);
else
pr_info("Memory Flags:%04x Processor Domain:%u Memory Domain:%u\n",
p->flags, p->processor_PD, p->memory_PD);
@@ -933,22 +977,19 @@ static int hmat_callback(struct notifier_block *self,
return NOTIFY_OK;
}
-static int hmat_set_default_dram_perf(void)
+static int __init hmat_set_default_dram_perf(void)
{
int rc;
int nid, pxm;
struct memory_target *target;
struct access_coordinate *attrs;
- if (!default_dram_type)
- return -EIO;
-
- for_each_node_mask(nid, default_dram_type->nodes) {
+ for_each_node_mask(nid, default_dram_nodes) {
pxm = node_to_pxm(nid);
target = find_mem_target(pxm);
if (!target)
continue;
- attrs = &target->coord[1];
+ attrs = &target->coord[ACCESS_COORDINATE_CPU];
rc = mt_set_default_dram_perf(nid, attrs, "ACPI HMAT");
if (rc)
return rc;
@@ -975,7 +1016,7 @@ static int hmat_calculate_adistance(struct notifier_block *self,
hmat_update_target_attrs(target, p_nodes, ACCESS_COORDINATE_CPU);
mutex_unlock(&target_lock);
- perf = &target->coord[1];
+ perf = &target->coord[ACCESS_COORDINATE_CPU];
if (mt_perf_to_adistance(perf, adist))
return NOTIFY_OK;
diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c
index e45e64993c50..53816dfab645 100644
--- a/drivers/acpi/numa/srat.c
+++ b/drivers/acpi/numa/srat.c
@@ -14,9 +14,12 @@
#include <linux/errno.h>
#include <linux/acpi.h>
#include <linux/memblock.h>
+#include <linux/memory.h>
#include <linux/numa.h>
#include <linux/nodemask.h>
#include <linux/topology.h>
+#include <linux/numa_memblks.h>
+#include <linux/string_choices.h>
static nodemask_t nodes_found_map = NODE_MASK_NONE;
@@ -50,6 +53,7 @@ int node_to_pxm(int node)
return PXM_INVAL;
return node_to_pxm_map[node];
}
+EXPORT_SYMBOL_GPL(node_to_pxm);
static void __acpi_map_pxm_to_node(int pxm, int node)
{
@@ -80,6 +84,101 @@ int acpi_map_pxm_to_node(int pxm)
}
EXPORT_SYMBOL(acpi_map_pxm_to_node);
+#ifdef CONFIG_NUMA_EMU
+/*
+ * Take max_nid - 1 fake-numa nodes into account in both
+ * pxm_to_node_map()/node_to_pxm_map[] tables.
+ */
+int __init fix_pxm_node_maps(int max_nid)
+{
+ static int pxm_to_node_map_copy[MAX_PXM_DOMAINS] __initdata
+ = { [0 ... MAX_PXM_DOMAINS - 1] = NUMA_NO_NODE };
+ static int node_to_pxm_map_copy[MAX_NUMNODES] __initdata
+ = { [0 ... MAX_NUMNODES - 1] = PXM_INVAL };
+ int i, j, index = -1, count = 0;
+ nodemask_t nodes_to_enable;
+
+ if (numa_off)
+ return -1;
+
+ /* no or incomplete node/PXM mapping set, nothing to do */
+ if (srat_disabled())
+ return 0;
+
+ /* find fake nodes PXM mapping */
+ for (i = 0; i < MAX_NUMNODES; i++) {
+ if (node_to_pxm_map[i] != PXM_INVAL) {
+ for (j = 0; j <= max_nid; j++) {
+ if ((emu_nid_to_phys[j] == i) &&
+ WARN(node_to_pxm_map_copy[j] != PXM_INVAL,
+ "Node %d is already binded to PXM %d\n",
+ j, node_to_pxm_map_copy[j]))
+ return -1;
+ if (emu_nid_to_phys[j] == i) {
+ node_to_pxm_map_copy[j] =
+ node_to_pxm_map[i];
+ if (j > index)
+ index = j;
+ count++;
+ }
+ }
+ }
+ }
+ if (index == -1) {
+ pr_debug("No node/PXM mapping has been set\n");
+ /* nothing more to be done */
+ return 0;
+ }
+ if (WARN(index != max_nid, "%d max nid when expected %d\n",
+ index, max_nid))
+ return -1;
+
+ nodes_clear(nodes_to_enable);
+
+ /* map phys nodes not used for fake nodes */
+ for (i = 0; i < MAX_NUMNODES; i++) {
+ if (node_to_pxm_map[i] != PXM_INVAL) {
+ for (j = 0; j <= max_nid; j++)
+ if (emu_nid_to_phys[j] == i)
+ break;
+ /* fake nodes PXM mapping has been done */
+ if (j <= max_nid)
+ continue;
+ /* find first hole */
+ for (j = 0;
+ j < MAX_NUMNODES &&
+ node_to_pxm_map_copy[j] != PXM_INVAL;
+ j++)
+ ;
+ if (WARN(j == MAX_NUMNODES,
+ "Number of nodes exceeds MAX_NUMNODES\n"))
+ return -1;
+ node_to_pxm_map_copy[j] = node_to_pxm_map[i];
+ node_set(j, nodes_to_enable);
+ count++;
+ }
+ }
+
+ /* creating reverse mapping in pxm_to_node_map[] */
+ for (i = 0; i < MAX_NUMNODES; i++)
+ if (node_to_pxm_map_copy[i] != PXM_INVAL &&
+ pxm_to_node_map_copy[node_to_pxm_map_copy[i]] == NUMA_NO_NODE)
+ pxm_to_node_map_copy[node_to_pxm_map_copy[i]] = i;
+
+ /* overwrite with new mapping */
+ for (i = 0; i < MAX_NUMNODES; i++) {
+ node_to_pxm_map[i] = node_to_pxm_map_copy[i];
+ pxm_to_node_map[i] = pxm_to_node_map_copy[i];
+ }
+
+ /* enable other nodes found in PXM for hotplug */
+ nodes_or(numa_nodes_parsed, nodes_to_enable, numa_nodes_parsed);
+
+ pr_debug("found %d total number of nodes\n", count);
+ return 0;
+}
+#endif
+
static void __init
acpi_table_print_srat_entry(struct acpi_subtable_header *header)
{
@@ -91,8 +190,7 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header)
pr_debug("SRAT Processor (id[0x%02x] eid[0x%02x]) in proximity domain %d %s\n",
p->apic_id, p->local_sapic_eid,
p->proximity_domain_lo,
- (p->flags & ACPI_SRAT_CPU_ENABLED) ?
- "enabled" : "disabled");
+ str_enabled_disabled(p->flags & ACPI_SRAT_CPU_ENABLED));
}
break;
@@ -104,8 +202,7 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header)
(unsigned long long)p->base_address,
(unsigned long long)p->length,
p->proximity_domain,
- (p->flags & ACPI_SRAT_MEM_ENABLED) ?
- "enabled" : "disabled",
+ str_enabled_disabled(p->flags & ACPI_SRAT_MEM_ENABLED),
(p->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) ?
" hot-pluggable" : "",
(p->flags & ACPI_SRAT_MEM_NON_VOLATILE) ?
@@ -120,8 +217,7 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header)
pr_debug("SRAT Processor (x2apicid[0x%08x]) in proximity domain %d %s\n",
p->apic_id,
p->proximity_domain,
- (p->flags & ACPI_SRAT_CPU_ENABLED) ?
- "enabled" : "disabled");
+ str_enabled_disabled(p->flags & ACPI_SRAT_CPU_ENABLED));
}
break;
@@ -132,8 +228,7 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header)
pr_debug("SRAT Processor (acpi id[0x%04x]) in proximity domain %d %s\n",
p->acpi_processor_uid,
p->proximity_domain,
- (p->flags & ACPI_SRAT_GICC_ENABLED) ?
- "enabled" : "disabled");
+ str_enabled_disabled(p->flags & ACPI_SRAT_GICC_ENABLED));
}
break;
@@ -151,8 +246,7 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header)
*(u16 *)(&p->device_handle[0]),
*(u16 *)(&p->device_handle[2]),
p->proximity_domain,
- (p->flags & ACPI_SRAT_GENERIC_AFFINITY_ENABLED) ?
- "enabled" : "disabled");
+ str_enabled_disabled(p->flags & ACPI_SRAT_GENERIC_AFFINITY_ENABLED));
} else {
/*
* In this case we can rely on the device having a
@@ -162,11 +256,22 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header)
(char *)(&p->device_handle[0]),
(char *)(&p->device_handle[8]),
p->proximity_domain,
- (p->flags & ACPI_SRAT_GENERIC_AFFINITY_ENABLED) ?
- "enabled" : "disabled");
+ str_enabled_disabled(p->flags & ACPI_SRAT_GENERIC_AFFINITY_ENABLED));
}
}
break;
+
+ case ACPI_SRAT_TYPE_RINTC_AFFINITY:
+ {
+ struct acpi_srat_rintc_affinity *p =
+ (struct acpi_srat_rintc_affinity *)header;
+ pr_debug("SRAT Processor (acpi id[0x%04x]) in proximity domain %d %s\n",
+ p->acpi_processor_uid,
+ p->proximity_domain,
+ str_enabled_disabled(p->flags & ACPI_SRAT_RINTC_ENABLED));
+ }
+ break;
+
default:
pr_warn("Found unsupported SRAT entry (type = 0x%x)\n",
header->type);
@@ -208,16 +313,26 @@ int __init srat_disabled(void)
return acpi_numa < 0;
}
-#if defined(CONFIG_X86) || defined(CONFIG_ARM64) || defined(CONFIG_LOONGARCH)
+__weak int __init numa_fill_memblks(u64 start, u64 end)
+{
+ return NUMA_NO_MEMBLK;
+}
+
/*
* Callback for SLIT parsing. pxm_to_node() returns NUMA_NO_NODE for
* I/O localities since SRAT does not list them. I/O localities are
* not supported at this point.
*/
-void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
+static int __init acpi_parse_slit(struct acpi_table_header *table)
{
+ struct acpi_table_slit *slit = (struct acpi_table_slit *)table;
int i, j;
+ if (!slit_valid(slit)) {
+ pr_info("SLIT table looks invalid. Not used.\n");
+ return -EINVAL;
+ }
+
for (i = 0; i < slit->locality_count; i++) {
const int from_node = pxm_to_node(i);
@@ -234,28 +349,34 @@ void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
slit->entry[slit->locality_count * i + j]);
}
}
+
+ return 0;
}
-/*
- * Default callback for parsing of the Proximity Domain <-> Memory
- * Area mappings
- */
-int __init
-acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
+static int parsed_numa_memblks __initdata;
+
+static int __init
+acpi_parse_memory_affinity(union acpi_subtable_headers *header,
+ const unsigned long table_end)
{
+ struct acpi_srat_mem_affinity *ma;
u64 start, end;
u32 hotpluggable;
int node, pxm;
+ ma = (struct acpi_srat_mem_affinity *)header;
+
+ acpi_table_print_srat_entry(&header->common);
+
if (srat_disabled())
- goto out_err;
+ return 0;
if (ma->header.length < sizeof(struct acpi_srat_mem_affinity)) {
pr_err("SRAT: Unexpected header length: %d\n",
ma->header.length);
goto out_err_bad_srat;
}
if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0)
- goto out_err;
+ return 0;
hotpluggable = IS_ENABLED(CONFIG_MEMORY_HOTPLUG) &&
(ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE);
@@ -293,11 +414,15 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
max_possible_pfn = max(max_possible_pfn, PFN_UP(end - 1));
+ parsed_numa_memblks++;
+
return 0;
+
out_err_bad_srat:
+ /* Just disable SRAT, but do not fail and ignore errors. */
bad_srat();
-out_err:
- return -EINVAL;
+
+ return 0;
}
static int __init acpi_parse_cfmws(union acpi_subtable_headers *header,
@@ -305,13 +430,23 @@ static int __init acpi_parse_cfmws(union acpi_subtable_headers *header,
{
struct acpi_cedt_cfmws *cfmws;
int *fake_pxm = arg;
- u64 start, end;
+ u64 start, end, align;
int node;
+ int err;
cfmws = (struct acpi_cedt_cfmws *)header;
start = cfmws->base_hpa;
end = cfmws->base_hpa + cfmws->window_size;
+ /* Align memblock size to CFMW regions if possible */
+ align = 1UL << __ffs(start | end);
+ if (align >= SZ_256M) {
+ err = memory_block_advise_max_size(align);
+ if (err)
+ pr_warn("CFMWS: memblock size advise failed (%d)\n", err);
+ } else
+ pr_err("CFMWS: [BIOS BUG] base/size alignment violates spec\n");
+
/*
* The SRAT may have already described NUMA details for all,
* or a portion of, this CFMWS HPA range. Extend the memblks
@@ -329,7 +464,7 @@ static int __init acpi_parse_cfmws(union acpi_subtable_headers *header,
return -EINVAL;
}
- if (numa_add_memblk(node, start, end) < 0) {
+ if (numa_add_reserved_memblk(node, start, end) < 0) {
/* CXL driver must handle the NUMA_NO_NODE case */
pr_warn("ACPI NUMA: Failed to add memblk for CFMWS node %d [mem %#llx-%#llx]\n",
node, start, end);
@@ -340,26 +475,6 @@ static int __init acpi_parse_cfmws(union acpi_subtable_headers *header,
(*fake_pxm)++;
return 0;
}
-#else
-static int __init acpi_parse_cfmws(union acpi_subtable_headers *header,
- void *arg, const unsigned long table_end)
-{
- return 0;
-}
-#endif /* defined(CONFIG_X86) || defined (CONFIG_ARM64) */
-
-static int __init acpi_parse_slit(struct acpi_table_header *table)
-{
- struct acpi_table_slit *slit = (struct acpi_table_slit *)table;
-
- if (!slit_valid(slit)) {
- pr_info("SLIT table looks invalid. Not used.\n");
- return -EINVAL;
- }
- acpi_numa_slit_init(slit);
-
- return 0;
-}
void __init __weak
acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa)
@@ -450,21 +565,18 @@ acpi_parse_gi_affinity(union acpi_subtable_headers *header,
}
#endif /* defined(CONFIG_X86) || defined (CONFIG_ARM64) */
-static int __initdata parsed_numa_memblks;
-
static int __init
-acpi_parse_memory_affinity(union acpi_subtable_headers * header,
- const unsigned long end)
+acpi_parse_rintc_affinity(union acpi_subtable_headers *header,
+ const unsigned long end)
{
- struct acpi_srat_mem_affinity *memory_affinity;
-
- memory_affinity = (struct acpi_srat_mem_affinity *)header;
+ struct acpi_srat_rintc_affinity *rintc_affinity;
+ rintc_affinity = (struct acpi_srat_rintc_affinity *)header;
acpi_table_print_srat_entry(&header->common);
/* let architecture-dependent part to do it */
- if (!acpi_numa_memory_affinity_init(memory_affinity))
- parsed_numa_memblks++;
+ acpi_numa_rintc_affinity_init(rintc_affinity);
+
return 0;
}
@@ -503,7 +615,7 @@ int __init acpi_numa_init(void)
/* SRAT: System Resource Affinity Table */
if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) {
- struct acpi_subtable_proc srat_proc[4];
+ struct acpi_subtable_proc srat_proc[5];
memset(srat_proc, 0, sizeof(srat_proc));
srat_proc[0].id = ACPI_SRAT_TYPE_CPU_AFFINITY;
@@ -514,6 +626,8 @@ int __init acpi_numa_init(void)
srat_proc[2].handler = acpi_parse_gicc_affinity;
srat_proc[3].id = ACPI_SRAT_TYPE_GENERIC_AFFINITY;
srat_proc[3].handler = acpi_parse_gi_affinity;
+ srat_proc[4].id = ACPI_SRAT_TYPE_RINTC_AFFINITY;
+ srat_proc[4].handler = acpi_parse_rintc_affinity;
acpi_table_parse_entries_array(ACPI_SIG_SRAT,
sizeof(struct acpi_table_srat),