summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-08-10 11:07:26 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-08-10 11:07:26 -0700
commitc235698355fa94df7073b51befda7d4be00a0e23 (patch)
tree3cb9d2fa40862f2484eda9c148b4326505a8534d /kernel
parent5e2e7383b57fa03ec2b00c82bb7f49a4a707c1f7 (diff)
parent1cd8a2537eb07751d405ab7e2223f20338a90506 (diff)
Merge tag 'cxl-for-6.0' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl
Pull cxl updates from Dan Williams: "Compute Express Link (CXL) updates for 6.0: - Introduce a 'struct cxl_region' object with support for provisioning and assembling persistent memory regions. - Introduce alloc_free_mem_region() to accompany the existing request_free_mem_region() as a method to allocate physical memory capacity out of an existing resource. - Export insert_resource_expand_to_fit() for the CXL subsystem to late-publish CXL platform windows in iomem_resource. - Add a polled mode PCI DOE (Data Object Exchange) driver service and use it in cxl_pci to retrieve the CDAT (Coherent Device Attribute Table)" * tag 'cxl-for-6.0' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl: (74 commits) cxl/hdm: Fix skip allocations vs multiple pmem allocations cxl/region: Disallow region granularity != window granularity cxl/region: Fix x1 interleave to greater than x1 interleave routing cxl/region: Move HPA setup to cxl_region_attach() cxl/region: Fix decoder interleave programming Documentation: cxl: remove dangling kernel-doc reference cxl/region: describe targets and nr_targets members of cxl_region_params cxl/regions: add padding for cxl_rr_ep_add nested lists cxl/region: Fix IS_ERR() vs NULL check cxl/region: Fix region reference target accounting cxl/region: Fix region commit uninitialized variable warning cxl/region: Fix port setup uninitialized variable warnings cxl/region: Stop initializing interleave granularity cxl/hdm: Fix DPA reservation vs cxl_endpoint_decoder lifetime cxl/acpi: Minimize granularity for x1 interleaves cxl/region: Delete 'region' attribute from root decoders cxl/acpi: Autoload driver for 'cxl_acpi' test devices cxl/region: decrement ->nr_targets on error in cxl_region_attach() cxl/region: prevent underflow in ways_to_cxl() cxl/region: uninitialized variable in alloc_hpa() ...
Diffstat (limited to 'kernel')
-rw-r--r--kernel/resource.c185
1 files changed, 150 insertions, 35 deletions
diff --git a/kernel/resource.c b/kernel/resource.c
index 34eaee179689..4c5e80b92f2f 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -489,8 +489,9 @@ int __weak page_is_ram(unsigned long pfn)
}
EXPORT_SYMBOL_GPL(page_is_ram);
-static int __region_intersects(resource_size_t start, size_t size,
- unsigned long flags, unsigned long desc)
+static int __region_intersects(struct resource *parent, resource_size_t start,
+ size_t size, unsigned long flags,
+ unsigned long desc)
{
struct resource res;
int type = 0; int other = 0;
@@ -499,7 +500,7 @@ static int __region_intersects(resource_size_t start, size_t size,
res.start = start;
res.end = start + size - 1;
- for (p = iomem_resource.child; p ; p = p->sibling) {
+ for (p = parent->child; p ; p = p->sibling) {
bool is_type = (((p->flags & flags) == flags) &&
((desc == IORES_DESC_NONE) ||
(desc == p->desc)));
@@ -543,7 +544,7 @@ int region_intersects(resource_size_t start, size_t size, unsigned long flags,
int ret;
read_lock(&resource_lock);
- ret = __region_intersects(start, size, flags, desc);
+ ret = __region_intersects(&iomem_resource, start, size, flags, desc);
read_unlock(&resource_lock);
return ret;
@@ -891,6 +892,13 @@ void insert_resource_expand_to_fit(struct resource *root, struct resource *new)
}
write_unlock(&resource_lock);
}
+/*
+ * Not for general consumption, only early boot memory map parsing, PCI
+ * resource discovery, and late discovery of CXL resources are expected
+ * to use this interface. The former are built-in and only the latter,
+ * CXL, is a module.
+ */
+EXPORT_SYMBOL_NS_GPL(insert_resource_expand_to_fit, CXL);
/**
* remove_resource - Remove a resource in the resource tree
@@ -1773,62 +1781,139 @@ void resource_list_free(struct list_head *head)
}
EXPORT_SYMBOL(resource_list_free);
-#ifdef CONFIG_DEVICE_PRIVATE
-static struct resource *__request_free_mem_region(struct device *dev,
- struct resource *base, unsigned long size, const char *name)
+#ifdef CONFIG_GET_FREE_REGION
+#define GFR_DESCENDING (1UL << 0)
+#define GFR_REQUEST_REGION (1UL << 1)
+#define GFR_DEFAULT_ALIGN (1UL << PA_SECTION_SHIFT)
+
+static resource_size_t gfr_start(struct resource *base, resource_size_t size,
+ resource_size_t align, unsigned long flags)
+{
+ if (flags & GFR_DESCENDING) {
+ resource_size_t end;
+
+ end = min_t(resource_size_t, base->end,
+ (1ULL << MAX_PHYSMEM_BITS) - 1);
+ return end - size + 1;
+ }
+
+ return ALIGN(base->start, align);
+}
+
+static bool gfr_continue(struct resource *base, resource_size_t addr,
+ resource_size_t size, unsigned long flags)
+{
+ if (flags & GFR_DESCENDING)
+ return addr > size && addr >= base->start;
+ /*
+ * In the ascend case be careful that the last increment by
+ * @size did not wrap 0.
+ */
+ return addr > addr - size &&
+ addr <= min_t(resource_size_t, base->end,
+ (1ULL << MAX_PHYSMEM_BITS) - 1);
+}
+
+static resource_size_t gfr_next(resource_size_t addr, resource_size_t size,
+ unsigned long flags)
{
- resource_size_t end, addr;
+ if (flags & GFR_DESCENDING)
+ return addr - size;
+ return addr + size;
+}
+
+static void remove_free_mem_region(void *_res)
+{
+ struct resource *res = _res;
+
+ if (res->parent)
+ remove_resource(res);
+ free_resource(res);
+}
+
+static struct resource *
+get_free_mem_region(struct device *dev, struct resource *base,
+ resource_size_t size, const unsigned long align,
+ const char *name, const unsigned long desc,
+ const unsigned long flags)
+{
+ resource_size_t addr;
struct resource *res;
struct region_devres *dr = NULL;
- size = ALIGN(size, 1UL << PA_SECTION_SHIFT);
- end = min_t(unsigned long, base->end, (1UL << MAX_PHYSMEM_BITS) - 1);
- addr = end - size + 1UL;
+ size = ALIGN(size, align);
res = alloc_resource(GFP_KERNEL);
if (!res)
return ERR_PTR(-ENOMEM);
- if (dev) {
+ if (dev && (flags & GFR_REQUEST_REGION)) {
dr = devres_alloc(devm_region_release,
sizeof(struct region_devres), GFP_KERNEL);
if (!dr) {
free_resource(res);
return ERR_PTR(-ENOMEM);
}
+ } else if (dev) {
+ if (devm_add_action_or_reset(dev, remove_free_mem_region, res))
+ return ERR_PTR(-ENOMEM);
}
write_lock(&resource_lock);
- for (; addr > size && addr >= base->start; addr -= size) {
- if (__region_intersects(addr, size, 0, IORES_DESC_NONE) !=
- REGION_DISJOINT)
+ for (addr = gfr_start(base, size, align, flags);
+ gfr_continue(base, addr, size, flags);
+ addr = gfr_next(addr, size, flags)) {
+ if (__region_intersects(base, addr, size, 0, IORES_DESC_NONE) !=
+ REGION_DISJOINT)
continue;
- if (__request_region_locked(res, &iomem_resource, addr, size,
- name, 0))
- break;
+ if (flags & GFR_REQUEST_REGION) {
+ if (__request_region_locked(res, &iomem_resource, addr,
+ size, name, 0))
+ break;
- if (dev) {
- dr->parent = &iomem_resource;
- dr->start = addr;
- dr->n = size;
- devres_add(dev, dr);
- }
+ if (dev) {
+ dr->parent = &iomem_resource;
+ dr->start = addr;
+ dr->n = size;
+ devres_add(dev, dr);
+ }
- res->desc = IORES_DESC_DEVICE_PRIVATE_MEMORY;
- write_unlock(&resource_lock);
+ res->desc = desc;
+ write_unlock(&resource_lock);
+
+
+ /*
+ * A driver is claiming this region so revoke any
+ * mappings.
+ */
+ revoke_iomem(res);
+ } else {
+ res->start = addr;
+ res->end = addr + size - 1;
+ res->name = name;
+ res->desc = desc;
+ res->flags = IORESOURCE_MEM;
+
+ /*
+ * Only succeed if the resource hosts an exclusive
+ * range after the insert
+ */
+ if (__insert_resource(base, res) || res->child)
+ break;
+
+ write_unlock(&resource_lock);
+ }
- /*
- * A driver is claiming this region so revoke any mappings.
- */
- revoke_iomem(res);
return res;
}
write_unlock(&resource_lock);
- free_resource(res);
- if (dr)
+ if (flags & GFR_REQUEST_REGION) {
+ free_resource(res);
devres_free(dr);
+ } else if (dev)
+ devm_release_action(dev, remove_free_mem_region, res);
return ERR_PTR(-ERANGE);
}
@@ -1847,18 +1932,48 @@ static struct resource *__request_free_mem_region(struct device *dev,
struct resource *devm_request_free_mem_region(struct device *dev,
struct resource *base, unsigned long size)
{
- return __request_free_mem_region(dev, base, size, dev_name(dev));
+ unsigned long flags = GFR_DESCENDING | GFR_REQUEST_REGION;
+
+ return get_free_mem_region(dev, base, size, GFR_DEFAULT_ALIGN,
+ dev_name(dev),
+ IORES_DESC_DEVICE_PRIVATE_MEMORY, flags);
}
EXPORT_SYMBOL_GPL(devm_request_free_mem_region);
struct resource *request_free_mem_region(struct resource *base,
unsigned long size, const char *name)
{
- return __request_free_mem_region(NULL, base, size, name);
+ unsigned long flags = GFR_DESCENDING | GFR_REQUEST_REGION;
+
+ return get_free_mem_region(NULL, base, size, GFR_DEFAULT_ALIGN, name,
+ IORES_DESC_DEVICE_PRIVATE_MEMORY, flags);
}
EXPORT_SYMBOL_GPL(request_free_mem_region);
-#endif /* CONFIG_DEVICE_PRIVATE */
+/**
+ * alloc_free_mem_region - find a free region relative to @base
+ * @base: resource that will parent the new resource
+ * @size: size in bytes of memory to allocate from @base
+ * @align: alignment requirements for the allocation
+ * @name: resource name
+ *
+ * Buses like CXL, that can dynamically instantiate new memory regions,
+ * need a method to allocate physical address space for those regions.
+ * Allocate and insert a new resource to cover a free, unclaimed by a
+ * descendant of @base, range in the span of @base.
+ */
+struct resource *alloc_free_mem_region(struct resource *base,
+ unsigned long size, unsigned long align,
+ const char *name)
+{
+ /* Default of ascending direction and insert resource */
+ unsigned long flags = 0;
+
+ return get_free_mem_region(NULL, base, size, align, name,
+ IORES_DESC_NONE, flags);
+}
+EXPORT_SYMBOL_NS_GPL(alloc_free_mem_region, CXL);
+#endif /* CONFIG_GET_FREE_REGION */
static int __init strict_iomem(char *str)
{