diff options
Diffstat (limited to 'drivers/acpi/numa/hmat.c')
| -rw-r--r-- | drivers/acpi/numa/hmat.c | 476 |
1 files changed, 358 insertions, 118 deletions
diff --git a/drivers/acpi/numa/hmat.c b/drivers/acpi/numa/hmat.c index 23f49a2f4d14..77a81627aaef 100644 --- a/drivers/acpi/numa/hmat.c +++ b/drivers/acpi/numa/hmat.c @@ -24,6 +24,7 @@ #include <linux/node.h> #include <linux/sysfs.h> #include <linux/dax.h> +#include <linux/memory-tiers.h> static u8 hmat_revision; static int hmat_disable __initdata; @@ -57,14 +58,21 @@ struct target_cache { struct node_cache_attrs cache_attrs; }; +enum { + NODE_ACCESS_CLASS_GENPORT_SINK_LOCAL = ACCESS_COORDINATE_MAX, + NODE_ACCESS_CLASS_GENPORT_SINK_CPU, + NODE_ACCESS_CLASS_MAX, +}; + struct memory_target { struct list_head node; unsigned int memory_pxm; unsigned int processor_pxm; struct resource memregions; - struct node_hmem_attrs hmem_attrs[2]; + struct access_coordinate coord[NODE_ACCESS_CLASS_MAX]; struct list_head caches; struct node_cache_attrs cache_attrs; + u8 gen_port_device_handle[ACPI_SRAT_DEVICE_HANDLE_SIZE]; bool registered; }; @@ -99,6 +107,90 @@ static struct memory_target *find_mem_target(unsigned int mem_pxm) return NULL; } +/** + * hmat_get_extended_linear_cache_size - Retrieve the extended linear cache size + * @backing_res: resource from the backing media + * @nid: node id for the memory region + * @cache_size: (Output) size of extended linear cache. + * + * Return: 0 on success. Errno on failure. + * + */ +int hmat_get_extended_linear_cache_size(struct resource *backing_res, int nid, + resource_size_t *cache_size) +{ + unsigned int pxm = node_to_pxm(nid); + struct memory_target *target; + struct target_cache *tcache; + struct resource *res; + + target = find_mem_target(pxm); + if (!target) + return -ENOENT; + + list_for_each_entry(tcache, &target->caches, node) { + if (tcache->cache_attrs.address_mode != + NODE_CACHE_ADDR_MODE_EXTENDED_LINEAR) + continue; + + res = &target->memregions; + if (!resource_contains(res, backing_res)) + continue; + + *cache_size = tcache->cache_attrs.size; + return 0; + } + + *cache_size = 0; + return 0; +} +EXPORT_SYMBOL_NS_GPL(hmat_get_extended_linear_cache_size, "CXL"); + +static struct memory_target *acpi_find_genport_target(u32 uid) +{ + struct memory_target *target; + u32 target_uid; + u8 *uid_ptr; + + list_for_each_entry(target, &targets, node) { + uid_ptr = target->gen_port_device_handle + 8; + target_uid = *(u32 *)uid_ptr; + if (uid == target_uid) + return target; + } + + return NULL; +} + +/** + * acpi_get_genport_coordinates - Retrieve the access coordinates for a generic port + * @uid: ACPI unique id + * @coord: The access coordinates written back out for the generic port. + * Expect 2 levels array. + * + * Return: 0 on success. Errno on failure. + * + * Only supports device handles that are ACPI. Assume ACPI0016 HID for CXL. + */ +int acpi_get_genport_coordinates(u32 uid, + struct access_coordinate *coord) +{ + struct memory_target *target; + + guard(mutex)(&target_lock); + target = acpi_find_genport_target(uid); + if (!target) + return -ENOENT; + + coord[ACCESS_COORDINATE_LOCAL] = + target->coord[NODE_ACCESS_CLASS_GENPORT_SINK_LOCAL]; + coord[ACCESS_COORDINATE_CPU] = + target->coord[NODE_ACCESS_CLASS_GENPORT_SINK_CPU]; + + return 0; +} +EXPORT_SYMBOL_NS_GPL(acpi_get_genport_coordinates, "CXL"); + static __init void alloc_memory_initiator(unsigned int cpu_pxm) { struct memory_initiator *initiator; @@ -119,8 +211,7 @@ static __init void alloc_memory_initiator(unsigned int cpu_pxm) list_add_tail(&initiator->node, &initiators); } -static __init void alloc_memory_target(unsigned int mem_pxm, - resource_size_t start, resource_size_t len) +static __init struct memory_target *alloc_target(unsigned int mem_pxm) { struct memory_target *target; @@ -128,7 +219,7 @@ static __init void alloc_memory_target(unsigned int mem_pxm, if (!target) { target = kzalloc(sizeof(*target), GFP_KERNEL); if (!target) - return; + return NULL; target->memory_pxm = mem_pxm; target->processor_pxm = PXM_INVAL; target->memregions = (struct resource) { @@ -141,6 +232,19 @@ static __init void alloc_memory_target(unsigned int mem_pxm, INIT_LIST_HEAD(&target->caches); } + return target; +} + +static __init void alloc_memory_target(unsigned int mem_pxm, + resource_size_t start, + resource_size_t len) +{ + struct memory_target *target; + + target = alloc_target(mem_pxm); + if (!target) + return; + /* * There are potentially multiple ranges per PXM, so record each * in the per-target memregions resource tree. @@ -151,6 +255,18 @@ static __init void alloc_memory_target(unsigned int mem_pxm, start, start + len, mem_pxm); } +static __init void alloc_genport_target(unsigned int mem_pxm, u8 *handle) +{ + struct memory_target *target; + + target = alloc_target(mem_pxm); + if (!target) + return; + + memcpy(target->gen_port_device_handle, handle, + ACPI_SRAT_DEVICE_HANDLE_SIZE); +} + static __init const char *hmat_data_type(u8 type) { switch (type) { @@ -227,24 +343,24 @@ static void hmat_update_target_access(struct memory_target *target, { switch (type) { case ACPI_HMAT_ACCESS_LATENCY: - target->hmem_attrs[access].read_latency = value; - target->hmem_attrs[access].write_latency = value; + target->coord[access].read_latency = value; + target->coord[access].write_latency = value; break; case ACPI_HMAT_READ_LATENCY: - target->hmem_attrs[access].read_latency = value; + target->coord[access].read_latency = value; break; case ACPI_HMAT_WRITE_LATENCY: - target->hmem_attrs[access].write_latency = value; + target->coord[access].write_latency = value; break; case ACPI_HMAT_ACCESS_BANDWIDTH: - target->hmem_attrs[access].read_bandwidth = value; - target->hmem_attrs[access].write_bandwidth = value; + target->coord[access].read_bandwidth = value; + target->coord[access].write_bandwidth = value; break; case ACPI_HMAT_READ_BANDWIDTH: - target->hmem_attrs[access].read_bandwidth = value; + target->coord[access].read_bandwidth = value; break; case ACPI_HMAT_WRITE_BANDWIDTH: - target->hmem_attrs[access].write_bandwidth = value; + target->coord[access].write_bandwidth = value; break; default: break; @@ -290,11 +406,28 @@ static __init void hmat_add_locality(struct acpi_hmat_locality *hmat_loc) } } +static __init void hmat_update_target(unsigned int tgt_pxm, unsigned int init_pxm, + u8 mem_hier, u8 type, u32 value) +{ + struct memory_target *target = find_mem_target(tgt_pxm); + + if (mem_hier != ACPI_HMAT_MEMORY) + return; + + if (target && target->processor_pxm == init_pxm) { + hmat_update_target_access(target, type, value, + ACCESS_COORDINATE_LOCAL); + /* If the node has a CPU, update access ACCESS_COORDINATE_CPU */ + if (node_state(pxm_to_node(init_pxm), N_CPU)) + hmat_update_target_access(target, type, value, + ACCESS_COORDINATE_CPU); + } +} + static __init int hmat_parse_locality(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_hmat_locality *hmat_loc = (void *)header; - struct memory_target *target; unsigned int init, targ, total_size, ipds, tpds; u32 *inits, *targs, value; u16 *entries; @@ -318,9 +451,9 @@ static __init int hmat_parse_locality(union acpi_subtable_headers *header, return -EINVAL; } - pr_info("Locality: Flags:%02x Type:%s Initiator Domains:%u Target Domains:%u Base:%lld\n", - hmat_loc->flags, hmat_data_type(type), ipds, tpds, - hmat_loc->entry_base_unit); + pr_debug("Locality: Flags:%02x Type:%s Initiator Domains:%u Target Domains:%u Base:%lld\n", + hmat_loc->flags, hmat_data_type(type), ipds, tpds, + hmat_loc->entry_base_unit); inits = (u32 *)(hmat_loc + 1); targs = inits + ipds; @@ -331,19 +464,12 @@ static __init int hmat_parse_locality(union acpi_subtable_headers *header, value = hmat_normalize(entries[init * tpds + targ], hmat_loc->entry_base_unit, type); - pr_info(" Initiator-Target[%u-%u]:%u%s\n", - inits[init], targs[targ], value, - hmat_data_type_suffix(type)); - - if (mem_hier == ACPI_HMAT_MEMORY) { - target = find_mem_target(targs[targ]); - if (target && target->processor_pxm == inits[init]) { - hmat_update_target_access(target, type, value, 0); - /* If the node has a CPU, update access 1 */ - if (node_state(pxm_to_node(inits[init]), N_CPU)) - hmat_update_target_access(target, type, value, 1); - } - } + pr_debug(" Initiator-Target[%u-%u]:%u%s\n", + inits[init], targs[targ], value, + hmat_data_type_suffix(type)); + + hmat_update_target(targs[targ], inits[init], + mem_hier, type, value); } } @@ -368,9 +494,9 @@ static __init int hmat_parse_cache(union acpi_subtable_headers *header, } attrs = cache->cache_attributes; - pr_info("Cache: Domain:%u Size:%llu Attrs:%08x SMBIOS Handles:%d\n", - cache->memory_PD, cache->cache_size, attrs, - cache->number_of_SMBIOShandles); + pr_debug("Cache: Domain:%u Size:%llu Attrs:%08x SMBIOS Handles:%d\n", + cache->memory_PD, cache->cache_size, attrs, + cache->number_of_SMBIOShandles); target = find_mem_target(cache->memory_PD); if (!target) @@ -389,6 +515,11 @@ static __init int hmat_parse_cache(union acpi_subtable_headers *header, switch ((attrs & ACPI_HMAT_CACHE_ASSOCIATIVITY) >> 8) { case ACPI_HMAT_CA_DIRECT_MAPPED: tcache->cache_attrs.indexing = NODE_CACHE_DIRECT_MAP; + /* Extended Linear mode is only valid if cache is direct mapped */ + if (cache->address_mode == ACPI_HMAT_CACHE_MODE_EXTENDED_LINEAR) { + tcache->cache_attrs.address_mode = + NODE_CACHE_ADDR_MODE_EXTENDED_LINEAR; + } break; case ACPI_HMAT_CA_COMPLEX_CACHE_INDEXING: tcache->cache_attrs.indexing = NODE_CACHE_INDEXED; @@ -429,9 +560,9 @@ static int __init hmat_parse_proximity_domain(union acpi_subtable_headers *heade } if (hmat_revision == 1) - pr_info("Memory (%#llx length %#llx) Flags:%04x Processor Domain:%u Memory Domain:%u\n", - p->reserved3, p->reserved4, p->flags, p->processor_PD, - p->memory_PD); + pr_debug("Memory (%#llx length %#llx) Flags:%04x Processor Domain:%u Memory Domain:%u\n", + p->reserved3, p->reserved4, p->flags, p->processor_PD, + p->memory_PD); else pr_info("Memory Flags:%04x Processor Domain:%u Memory Domain:%u\n", p->flags, p->processor_PD, p->memory_PD); @@ -490,6 +621,27 @@ static __init int srat_parse_mem_affinity(union acpi_subtable_headers *header, return 0; } +static __init int srat_parse_genport_affinity(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_srat_generic_affinity *ga = (void *)header; + + if (!ga) + return -EINVAL; + + if (!(ga->flags & ACPI_SRAT_GENERIC_AFFINITY_ENABLED)) + return 0; + + /* Skip PCI device_handle for now */ + if (ga->device_handle_type != 0) + return 0; + + alloc_genport_target(ga->proximity_domain, + (u8 *)ga->device_handle); + + return 0; +} + static u32 hmat_initiator_perf(struct memory_target *target, struct memory_initiator *initiator, struct acpi_hmat_locality *hmat_loc) @@ -562,39 +714,52 @@ static int initiator_cmp(void *priv, const struct list_head *a, { struct memory_initiator *ia; struct memory_initiator *ib; - unsigned long *p_nodes = priv; ia = list_entry(a, struct memory_initiator, node); ib = list_entry(b, struct memory_initiator, node); - set_bit(ia->processor_pxm, p_nodes); - set_bit(ib->processor_pxm, p_nodes); - return ia->processor_pxm - ib->processor_pxm; } -static void hmat_register_target_initiators(struct memory_target *target) +static int initiators_to_nodemask(unsigned long *p_nodes) { - static DECLARE_BITMAP(p_nodes, MAX_NUMNODES); struct memory_initiator *initiator; - unsigned int mem_nid, cpu_nid; + + if (list_empty(&initiators)) + return -ENXIO; + + list_for_each_entry(initiator, &initiators, node) + set_bit(initiator->processor_pxm, p_nodes); + + return 0; +} + +static void hmat_update_target_attrs(struct memory_target *target, + unsigned long *p_nodes, int access) +{ + struct memory_initiator *initiator; + unsigned int cpu_nid; struct memory_locality *loc = NULL; u32 best = 0; - bool access0done = false; int i; - mem_nid = pxm_to_node(target->memory_pxm); + /* Don't update for generic port if there's no device handle */ + if ((access == NODE_ACCESS_CLASS_GENPORT_SINK_LOCAL || + access == NODE_ACCESS_CLASS_GENPORT_SINK_CPU) && + !(*(u16 *)target->gen_port_device_handle)) + return; + + bitmap_zero(p_nodes, MAX_NUMNODES); /* - * If the Address Range Structure provides a local processor pxm, link + * If the Address Range Structure provides a local processor pxm, set * only that one. Otherwise, find the best performance attributes and - * register all initiators that match. + * collect all initiators that match. */ if (target->processor_pxm != PXM_INVAL) { cpu_nid = pxm_to_node(target->processor_pxm); - register_memory_node_under_compute_node(mem_nid, cpu_nid, 0); - access0done = true; - if (node_state(cpu_nid, N_CPU)) { - register_memory_node_under_compute_node(mem_nid, cpu_nid, 1); + if (access == ACCESS_COORDINATE_LOCAL || + node_state(cpu_nid, N_CPU)) { + set_bit(target->processor_pxm, p_nodes); return; } } @@ -608,43 +773,10 @@ static void hmat_register_target_initiators(struct memory_target *target) * We'll also use the sorting to prime the candidate nodes with known * initiators. */ - bitmap_zero(p_nodes, MAX_NUMNODES); - list_sort(p_nodes, &initiators, initiator_cmp); - if (!access0done) { - for (i = WRITE_LATENCY; i <= READ_BANDWIDTH; i++) { - loc = localities_types[i]; - if (!loc) - continue; - - best = 0; - list_for_each_entry(initiator, &initiators, node) { - u32 value; - - if (!test_bit(initiator->processor_pxm, p_nodes)) - continue; - - value = hmat_initiator_perf(target, initiator, - loc->hmat_loc); - if (hmat_update_best(loc->hmat_loc->data_type, value, &best)) - bitmap_clear(p_nodes, 0, initiator->processor_pxm); - if (value != best) - clear_bit(initiator->processor_pxm, p_nodes); - } - if (best) - hmat_update_target_access(target, loc->hmat_loc->data_type, - best, 0); - } - - for_each_set_bit(i, p_nodes, MAX_NUMNODES) { - cpu_nid = pxm_to_node(i); - register_memory_node_under_compute_node(mem_nid, cpu_nid, 0); - } - } + list_sort(NULL, &initiators, initiator_cmp); + if (initiators_to_nodemask(p_nodes) < 0) + return; - /* Access 1 ignores Generic Initiators */ - bitmap_zero(p_nodes, MAX_NUMNODES); - list_sort(p_nodes, &initiators, initiator_cmp); - best = 0; for (i = WRITE_LATENCY; i <= READ_BANDWIDTH; i++) { loc = localities_types[i]; if (!loc) @@ -654,7 +786,9 @@ static void hmat_register_target_initiators(struct memory_target *target) list_for_each_entry(initiator, &initiators, node) { u32 value; - if (!initiator->has_cpu) { + if ((access == ACCESS_COORDINATE_CPU || + access == NODE_ACCESS_CLASS_GENPORT_SINK_CPU) && + !initiator->has_cpu) { clear_bit(initiator->processor_pxm, p_nodes); continue; } @@ -668,14 +802,45 @@ static void hmat_register_target_initiators(struct memory_target *target) clear_bit(initiator->processor_pxm, p_nodes); } if (best) - hmat_update_target_access(target, loc->hmat_loc->data_type, best, 1); + hmat_update_target_access(target, loc->hmat_loc->data_type, best, access); } +} + +static void __hmat_register_target_initiators(struct memory_target *target, + unsigned long *p_nodes, + int access) +{ + unsigned int mem_nid, cpu_nid; + int i; + + mem_nid = pxm_to_node(target->memory_pxm); + hmat_update_target_attrs(target, p_nodes, access); for_each_set_bit(i, p_nodes, MAX_NUMNODES) { cpu_nid = pxm_to_node(i); - register_memory_node_under_compute_node(mem_nid, cpu_nid, 1); + register_memory_node_under_compute_node(mem_nid, cpu_nid, access); } } +static void hmat_update_generic_target(struct memory_target *target) +{ + static DECLARE_BITMAP(p_nodes, MAX_NUMNODES); + + hmat_update_target_attrs(target, p_nodes, + NODE_ACCESS_CLASS_GENPORT_SINK_LOCAL); + hmat_update_target_attrs(target, p_nodes, + NODE_ACCESS_CLASS_GENPORT_SINK_CPU); +} + +static void hmat_register_target_initiators(struct memory_target *target) +{ + static DECLARE_BITMAP(p_nodes, MAX_NUMNODES); + + __hmat_register_target_initiators(target, p_nodes, + ACCESS_COORDINATE_LOCAL); + __hmat_register_target_initiators(target, p_nodes, + ACCESS_COORDINATE_CPU); +} + static void hmat_register_target_cache(struct memory_target *target) { unsigned mem_nid = pxm_to_node(target->memory_pxm); @@ -688,7 +853,7 @@ static void hmat_register_target_cache(struct memory_target *target) static void hmat_register_target_perf(struct memory_target *target, int access) { unsigned mem_nid = pxm_to_node(target->memory_pxm); - node_set_perf_attrs(mem_nid, &target->hmem_attrs[access], access); + node_set_perf_attrs(mem_nid, &target->coord[access], access); } static void hmat_register_target_devices(struct memory_target *target) @@ -705,39 +870,55 @@ static void hmat_register_target_devices(struct memory_target *target) for (res = target->memregions.child; res; res = res->sibling) { int target_nid = pxm_to_node(target->memory_pxm); - hmem_register_device(target_nid, res); + hmem_register_resource(target_nid, res); } } -static void hmat_register_target(struct memory_target *target) +static void hmat_hotplug_target(struct memory_target *target) { int nid = pxm_to_node(target->memory_pxm); /* + * Skip offline nodes. This can happen when memory marked EFI_MEMORY_SP, + * "specific purpose", is applied to all the memory in a proximity + * domain leading to * the node being marked offline / unplugged, or if + * memory-only "hotplug" node is offline. + */ + if (nid == NUMA_NO_NODE || !node_online(nid)) + return; + + guard(mutex)(&target_lock); + if (target->registered) + return; + + hmat_register_target_initiators(target); + hmat_register_target_cache(target); + hmat_register_target_perf(target, ACCESS_COORDINATE_LOCAL); + hmat_register_target_perf(target, ACCESS_COORDINATE_CPU); + target->registered = true; +} + +static void hmat_register_target(struct memory_target *target) +{ + /* * Devices may belong to either an offline or online * node, so unconditionally add them. */ hmat_register_target_devices(target); /* - * Skip offline nodes. This can happen when memory - * marked EFI_MEMORY_SP, "specific purpose", is applied - * to all the memory in a proximity domain leading to - * the node being marked offline / unplugged, or if - * memory-only "hotplug" node is offline. + * Register generic port perf numbers. The nid may not be + * initialized and is still NUMA_NO_NODE. */ - if (nid == NUMA_NO_NODE || !node_online(nid)) - return; - - mutex_lock(&target_lock); - if (!target->registered) { - hmat_register_target_initiators(target); - hmat_register_target_cache(target); - hmat_register_target_perf(target, 0); - hmat_register_target_perf(target, 1); - target->registered = true; + scoped_guard(mutex, &target_lock) { + if (*(u16 *)target->gen_port_device_handle) { + hmat_update_generic_target(target); + target->registered = true; + return; + } } - mutex_unlock(&target_lock); + + hmat_hotplug_target(target); } static void hmat_register_targets(void) @@ -752,10 +933,10 @@ static int hmat_callback(struct notifier_block *self, unsigned long action, void *arg) { struct memory_target *target; - struct memory_notify *mnb = arg; - int pxm, nid = mnb->status_change_nid; + struct node_notify *nn = arg; + int pxm, nid = nn->nid; - if (nid == NUMA_NO_NODE || action != MEM_ONLINE) + if (action != NODE_ADDED_FIRST_MEMORY) return NOTIFY_OK; pxm = node_to_pxm(nid); @@ -763,13 +944,60 @@ static int hmat_callback(struct notifier_block *self, if (!target) return NOTIFY_OK; - hmat_register_target(target); + hmat_hotplug_target(target); return NOTIFY_OK; } -static struct notifier_block hmat_callback_nb = { - .notifier_call = hmat_callback, - .priority = 2, +static int __init hmat_set_default_dram_perf(void) +{ + int rc; + int nid, pxm; + struct memory_target *target; + struct access_coordinate *attrs; + + for_each_node_mask(nid, default_dram_nodes) { + pxm = node_to_pxm(nid); + target = find_mem_target(pxm); + if (!target) + continue; + attrs = &target->coord[ACCESS_COORDINATE_CPU]; + rc = mt_set_default_dram_perf(nid, attrs, "ACPI HMAT"); + if (rc) + return rc; + } + + return 0; +} + +static int hmat_calculate_adistance(struct notifier_block *self, + unsigned long nid, void *data) +{ + static DECLARE_BITMAP(p_nodes, MAX_NUMNODES); + struct memory_target *target; + struct access_coordinate *perf; + int *adist = data; + int pxm; + + pxm = node_to_pxm(nid); + target = find_mem_target(pxm); + if (!target) + return NOTIFY_OK; + + mutex_lock(&target_lock); + hmat_update_target_attrs(target, p_nodes, ACCESS_COORDINATE_CPU); + mutex_unlock(&target_lock); + + perf = &target->coord[ACCESS_COORDINATE_CPU]; + + if (mt_perf_to_adistance(perf, adist)) + return NOTIFY_OK; + + return NOTIFY_STOP; +} + +static struct notifier_block hmat_adist_nb __meminitdata = { + .notifier_call = hmat_calculate_adistance, + .priority = 100, }; static __init void hmat_free_structures(void) @@ -827,6 +1055,13 @@ static __init int hmat_init(void) ACPI_SRAT_TYPE_MEMORY_AFFINITY, srat_parse_mem_affinity, 0) < 0) goto out_put; + + if (acpi_table_parse_entries(ACPI_SIG_SRAT, + sizeof(struct acpi_table_srat), + ACPI_SRAT_TYPE_GENERIC_PORT_AFFINITY, + srat_parse_genport_affinity, 0) < 0) + goto out_put; + acpi_put_table(tbl); status = acpi_get_table(ACPI_SIG_HMAT, 0, &tbl); @@ -854,11 +1089,16 @@ static __init int hmat_init(void) hmat_register_targets(); /* Keep the table and structures if the notifier may use them */ - if (!register_hotmemory_notifier(&hmat_callback_nb)) - return 0; + if (hotplug_node_notifier(hmat_callback, HMAT_CALLBACK_PRI)) + goto out_put; + + if (!hmat_set_default_dram_perf()) + register_mt_adistance_algorithm(&hmat_adist_nb); + + return 0; out_put: hmat_free_structures(); acpi_put_table(tbl); return 0; } -device_initcall(hmat_init); +subsys_initcall(hmat_init); |
