summaryrefslogtreecommitdiff
path: root/drivers/of/of_reserved_mem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/of/of_reserved_mem.c')
-rw-r--r--drivers/of/of_reserved_mem.c371
1 files changed, 299 insertions, 72 deletions
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index 7ec94cfcbddb..ee2e31522d7e 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -12,6 +12,7 @@
#define pr_fmt(fmt) "OF: reserved mem: " fmt
#include <linux/err.h>
+#include <linux/libfdt.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/of_platform.h>
@@ -26,8 +27,9 @@
#include "of_private.h"
-#define MAX_RESERVED_REGIONS 64
-static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS];
+static struct reserved_mem reserved_mem_array[MAX_RESERVED_REGIONS] __initdata;
+static struct reserved_mem *reserved_mem __refdata = reserved_mem_array;
+static int total_reserved_mem_cnt = MAX_RESERVED_REGIONS;
static int reserved_mem_count;
static int __init early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
@@ -50,20 +52,66 @@ static int __init early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
memblock_phys_free(base, size);
}
- kmemleak_ignore_phys(base);
+ if (!err)
+ kmemleak_ignore_phys(base);
return err;
}
/*
+ * alloc_reserved_mem_array() - allocate memory for the reserved_mem
+ * array using memblock
+ *
+ * This function is used to allocate memory for the reserved_mem
+ * array according to the total number of reserved memory regions
+ * defined in the DT.
+ * After the new array is allocated, the information stored in
+ * the initial static array is copied over to this new array and
+ * the new array is used from this point on.
+ */
+static void __init alloc_reserved_mem_array(void)
+{
+ struct reserved_mem *new_array;
+ size_t alloc_size, copy_size, memset_size;
+
+ alloc_size = array_size(total_reserved_mem_cnt, sizeof(*new_array));
+ if (alloc_size == SIZE_MAX) {
+ pr_err("Failed to allocate memory for reserved_mem array with err: %d", -EOVERFLOW);
+ return;
+ }
+
+ new_array = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
+ if (!new_array) {
+ pr_err("Failed to allocate memory for reserved_mem array with err: %d", -ENOMEM);
+ return;
+ }
+
+ copy_size = array_size(reserved_mem_count, sizeof(*new_array));
+ if (copy_size == SIZE_MAX) {
+ memblock_free(new_array, alloc_size);
+ total_reserved_mem_cnt = MAX_RESERVED_REGIONS;
+ pr_err("Failed to allocate memory for reserved_mem array with err: %d", -EOVERFLOW);
+ return;
+ }
+
+ memset_size = alloc_size - copy_size;
+
+ memcpy(new_array, reserved_mem, copy_size);
+ memset(new_array + reserved_mem_count, 0, memset_size);
+
+ reserved_mem = new_array;
+}
+
+static void __init fdt_init_reserved_mem_node(struct reserved_mem *rmem);
+/*
* fdt_reserved_mem_save_node() - save fdt node for second pass initialization
*/
-void __init fdt_reserved_mem_save_node(unsigned long node, const char *uname,
- phys_addr_t base, phys_addr_t size)
+static void __init fdt_reserved_mem_save_node(unsigned long node, const char *uname,
+ phys_addr_t base, phys_addr_t size)
{
struct reserved_mem *rmem = &reserved_mem[reserved_mem_count];
- if (reserved_mem_count == ARRAY_SIZE(reserved_mem)) {
+ if (reserved_mem_count == total_reserved_mem_cnt) {
pr_err("not enough space for all defined regions.\n");
return;
}
@@ -73,10 +121,223 @@ void __init fdt_reserved_mem_save_node(unsigned long node, const char *uname,
rmem->base = base;
rmem->size = size;
+ /* Call the region specific initialization function */
+ fdt_init_reserved_mem_node(rmem);
+
reserved_mem_count++;
return;
}
+static int __init early_init_dt_reserve_memory(phys_addr_t base,
+ phys_addr_t size, bool nomap)
+{
+ if (nomap) {
+ /*
+ * If the memory is already reserved (by another region), we
+ * should not allow it to be marked nomap, but don't worry
+ * if the region isn't memory as it won't be mapped.
+ */
+ if (memblock_overlaps_region(&memblock.memory, base, size) &&
+ memblock_is_region_reserved(base, size))
+ return -EBUSY;
+
+ return memblock_mark_nomap(base, size);
+ }
+ return memblock_reserve(base, size);
+}
+
+/*
+ * __reserved_mem_reserve_reg() - reserve all memory described in 'reg' property
+ */
+static int __init __reserved_mem_reserve_reg(unsigned long node,
+ const char *uname)
+{
+ int t_len = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32);
+ phys_addr_t base, size;
+ int len;
+ const __be32 *prop;
+ bool nomap;
+
+ prop = of_get_flat_dt_prop(node, "reg", &len);
+ if (!prop)
+ return -ENOENT;
+
+ if (len && len % t_len != 0) {
+ pr_err("Reserved memory: invalid reg property in '%s', skipping node.\n",
+ uname);
+ return -EINVAL;
+ }
+
+ nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
+
+ while (len >= t_len) {
+ base = dt_mem_next_cell(dt_root_addr_cells, &prop);
+ size = dt_mem_next_cell(dt_root_size_cells, &prop);
+
+ if (size &&
+ early_init_dt_reserve_memory(base, size, nomap) == 0)
+ pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %lu MiB\n",
+ uname, &base, (unsigned long)(size / SZ_1M));
+ else
+ pr_err("Reserved memory: failed to reserve memory for node '%s': base %pa, size %lu MiB\n",
+ uname, &base, (unsigned long)(size / SZ_1M));
+
+ len -= t_len;
+ }
+ return 0;
+}
+
+/*
+ * __reserved_mem_check_root() - check if #size-cells, #address-cells provided
+ * in /reserved-memory matches the values supported by the current implementation,
+ * also check if ranges property has been provided
+ */
+static int __init __reserved_mem_check_root(unsigned long node)
+{
+ const __be32 *prop;
+
+ prop = of_get_flat_dt_prop(node, "#size-cells", NULL);
+ if (!prop || be32_to_cpup(prop) != dt_root_size_cells)
+ return -EINVAL;
+
+ prop = of_get_flat_dt_prop(node, "#address-cells", NULL);
+ if (!prop || be32_to_cpup(prop) != dt_root_addr_cells)
+ return -EINVAL;
+
+ prop = of_get_flat_dt_prop(node, "ranges", NULL);
+ if (!prop)
+ return -EINVAL;
+ return 0;
+}
+
+static void __init __rmem_check_for_overlap(void);
+
+/**
+ * fdt_scan_reserved_mem_reg_nodes() - Store info for the "reg" defined
+ * reserved memory regions.
+ *
+ * This function is used to scan through the DT and store the
+ * information for the reserved memory regions that are defined using
+ * the "reg" property. The region node number, name, base address, and
+ * size are all stored in the reserved_mem array by calling the
+ * fdt_reserved_mem_save_node() function.
+ */
+void __init fdt_scan_reserved_mem_reg_nodes(void)
+{
+ int t_len = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32);
+ const void *fdt = initial_boot_params;
+ phys_addr_t base, size;
+ const __be32 *prop;
+ int node, child;
+ int len;
+
+ if (!fdt)
+ return;
+
+ node = fdt_path_offset(fdt, "/reserved-memory");
+ if (node < 0) {
+ pr_info("Reserved memory: No reserved-memory node in the DT\n");
+ return;
+ }
+
+ /* Attempt dynamic allocation of a new reserved_mem array */
+ alloc_reserved_mem_array();
+
+ if (__reserved_mem_check_root(node)) {
+ pr_err("Reserved memory: unsupported node format, ignoring\n");
+ return;
+ }
+
+ fdt_for_each_subnode(child, fdt, node) {
+ const char *uname;
+
+ prop = of_get_flat_dt_prop(child, "reg", &len);
+ if (!prop)
+ continue;
+ if (!of_fdt_device_is_available(fdt, child))
+ continue;
+
+ uname = fdt_get_name(fdt, child, NULL);
+ if (len && len % t_len != 0) {
+ pr_err("Reserved memory: invalid reg property in '%s', skipping node.\n",
+ uname);
+ continue;
+ }
+
+ if (len > t_len)
+ pr_warn("%s() ignores %d regions in node '%s'\n",
+ __func__, len / t_len - 1, uname);
+
+ base = dt_mem_next_cell(dt_root_addr_cells, &prop);
+ size = dt_mem_next_cell(dt_root_size_cells, &prop);
+
+ if (size)
+ fdt_reserved_mem_save_node(child, uname, base, size);
+ }
+
+ /* check for overlapping reserved regions */
+ __rmem_check_for_overlap();
+}
+
+static int __init __reserved_mem_alloc_size(unsigned long node, const char *uname);
+
+/*
+ * fdt_scan_reserved_mem() - scan a single FDT node for reserved memory
+ */
+int __init fdt_scan_reserved_mem(void)
+{
+ int node, child;
+ int dynamic_nodes_cnt = 0, count = 0;
+ int dynamic_nodes[MAX_RESERVED_REGIONS];
+ const void *fdt = initial_boot_params;
+
+ node = fdt_path_offset(fdt, "/reserved-memory");
+ if (node < 0)
+ return -ENODEV;
+
+ if (__reserved_mem_check_root(node) != 0) {
+ pr_err("Reserved memory: unsupported node format, ignoring\n");
+ return -EINVAL;
+ }
+
+ fdt_for_each_subnode(child, fdt, node) {
+ const char *uname;
+ int err;
+
+ if (!of_fdt_device_is_available(fdt, child))
+ continue;
+
+ uname = fdt_get_name(fdt, child, NULL);
+
+ err = __reserved_mem_reserve_reg(child, uname);
+ if (!err)
+ count++;
+ /*
+ * Save the nodes for the dynamically-placed regions
+ * into an array which will be used for allocation right
+ * after all the statically-placed regions are reserved
+ * or marked as no-map. This is done to avoid dynamically
+ * allocating from one of the statically-placed regions.
+ */
+ if (err == -ENOENT && of_get_flat_dt_prop(child, "size", NULL)) {
+ dynamic_nodes[dynamic_nodes_cnt] = child;
+ dynamic_nodes_cnt++;
+ }
+ }
+ for (int i = 0; i < dynamic_nodes_cnt; i++) {
+ const char *uname;
+ int err;
+
+ child = dynamic_nodes[i];
+ uname = fdt_get_name(fdt, child, NULL);
+ err = __reserved_mem_alloc_size(child, uname);
+ if (!err)
+ count++;
+ }
+ total_reserved_mem_cnt = count;
+ return 0;
+}
+
/*
* __reserved_mem_alloc_in_range() - allocate reserved memory described with
* 'alloc-ranges'. Choose bottom-up/top-down depending on nearby existing
@@ -132,8 +393,7 @@ static int __init __reserved_mem_alloc_in_range(phys_addr_t size,
* __reserved_mem_alloc_size() - allocate reserved memory described by
* 'size', 'alignment' and 'alloc-ranges' properties.
*/
-static int __init __reserved_mem_alloc_size(unsigned long node,
- const char *uname, phys_addr_t *res_base, phys_addr_t *res_size)
+static int __init __reserved_mem_alloc_size(unsigned long node, const char *uname)
{
int t_len = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32);
phys_addr_t start = 0, end = 0;
@@ -181,13 +441,12 @@ static int __init __reserved_mem_alloc_size(unsigned long node,
return -EINVAL;
}
- base = 0;
-
while (len > 0) {
start = dt_mem_next_cell(dt_root_addr_cells, &prop);
end = start + dt_mem_next_cell(dt_root_size_cells,
&prop);
+ base = 0;
ret = __reserved_mem_alloc_in_range(size, align,
start, end, nomap, &base);
if (ret == 0) {
@@ -213,9 +472,8 @@ static int __init __reserved_mem_alloc_size(unsigned long node,
return -ENOMEM;
}
- *res_base = base;
- *res_size = size;
-
+ /* Save region in the reserved_mem array */
+ fdt_reserved_mem_save_node(node, uname, base, size);
return 0;
}
@@ -304,71 +562,40 @@ static void __init __rmem_check_for_overlap(void)
}
/**
- * fdt_init_reserved_mem() - allocate and init all saved reserved memory regions
+ * fdt_init_reserved_mem_node() - Initialize a reserved memory region
+ * @rmem: reserved_mem struct of the memory region to be initialized.
+ *
+ * This function is used to call the region specific initialization
+ * function for a reserved memory region.
*/
-void __init fdt_init_reserved_mem(void)
+static void __init fdt_init_reserved_mem_node(struct reserved_mem *rmem)
{
- int i;
-
- /* check for overlapping reserved regions */
- __rmem_check_for_overlap();
+ unsigned long node = rmem->fdt_node;
+ int err = 0;
+ bool nomap;
- for (i = 0; i < reserved_mem_count; i++) {
- struct reserved_mem *rmem = &reserved_mem[i];
- unsigned long node = rmem->fdt_node;
- int len;
- const __be32 *prop;
- int err = 0;
- bool nomap;
-
- nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
- prop = of_get_flat_dt_prop(node, "phandle", &len);
- if (!prop)
- prop = of_get_flat_dt_prop(node, "linux,phandle", &len);
- if (prop)
- rmem->phandle = of_read_number(prop, len/4);
+ nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
- if (rmem->size == 0)
- err = __reserved_mem_alloc_size(node, rmem->name,
- &rmem->base, &rmem->size);
- if (err == 0) {
- err = __reserved_mem_init_node(rmem);
- if (err != 0 && err != -ENOENT) {
- pr_info("node %s compatible matching fail\n",
- rmem->name);
- if (nomap)
- memblock_clear_nomap(rmem->base, rmem->size);
- else
- memblock_phys_free(rmem->base,
- rmem->size);
- } else {
- phys_addr_t end = rmem->base + rmem->size - 1;
- bool reusable =
- (of_get_flat_dt_prop(node, "reusable", NULL)) != NULL;
-
- pr_info("%pa..%pa (%lu KiB) %s %s %s\n",
- &rmem->base, &end, (unsigned long)(rmem->size / SZ_1K),
- nomap ? "nomap" : "map",
- reusable ? "reusable" : "non-reusable",
- rmem->name ? rmem->name : "unknown");
- }
- }
+ err = __reserved_mem_init_node(rmem);
+ if (err != 0 && err != -ENOENT) {
+ pr_info("node %s compatible matching fail\n", rmem->name);
+ if (nomap)
+ memblock_clear_nomap(rmem->base, rmem->size);
+ else
+ memblock_phys_free(rmem->base, rmem->size);
+ } else {
+ phys_addr_t end = rmem->base + rmem->size - 1;
+ bool reusable =
+ (of_get_flat_dt_prop(node, "reusable", NULL)) != NULL;
+
+ pr_info("%pa..%pa (%lu KiB) %s %s %s\n",
+ &rmem->base, &end, (unsigned long)(rmem->size / SZ_1K),
+ nomap ? "nomap" : "map",
+ reusable ? "reusable" : "non-reusable",
+ rmem->name ? rmem->name : "unknown");
}
}
-static inline struct reserved_mem *__find_rmem(struct device_node *node)
-{
- unsigned int i;
-
- if (!node->phandle)
- return NULL;
-
- for (i = 0; i < reserved_mem_count; i++)
- if (reserved_mem[i].phandle == node->phandle)
- return &reserved_mem[i];
- return NULL;
-}
-
struct rmem_assigned_device {
struct device *dev;
struct reserved_mem *rmem;
@@ -413,7 +640,7 @@ int of_reserved_mem_device_init_by_idx(struct device *dev,
return 0;
}
- rmem = __find_rmem(target);
+ rmem = of_reserved_mem_lookup(target);
of_node_put(target);
if (!rmem || !rmem->ops || !rmem->ops->device_init)