summaryrefslogtreecommitdiff
path: root/tools/perf/util/maps.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/util/maps.c')
-rw-r--r--tools/perf/util/maps.c1400
1 files changed, 1133 insertions, 267 deletions
diff --git a/tools/perf/util/maps.c b/tools/perf/util/maps.c
index 233438c95b53..c321d4f4d846 100644
--- a/tools/perf/util/maps.c
+++ b/tools/perf/util/maps.c
@@ -6,158 +6,240 @@
#include "dso.h"
#include "map.h"
#include "maps.h"
+#include "rwsem.h"
#include "thread.h"
#include "ui/ui.h"
#include "unwind.h"
+#include <internal/rc_check.h>
-static void maps__init(struct maps *maps, struct machine *machine)
+/*
+ * Locking/sorting note:
+ *
+ * Sorting is done with the write lock, iteration and binary searching happens
+ * under the read lock requiring being sorted. There is a race between sorting
+ * releasing the write lock and acquiring the read lock for iteration/searching
+ * where another thread could insert and break the sorting of the maps. In
+ * practice inserting maps should be rare meaning that the race shouldn't lead
+ * to live lock. Removal of maps doesn't break being sorted.
+ */
+
+DECLARE_RC_STRUCT(maps) {
+ struct rw_semaphore lock;
+ /**
+ * @maps_by_address: array of maps sorted by their starting address if
+ * maps_by_address_sorted is true.
+ */
+ struct map **maps_by_address;
+ /**
+ * @maps_by_name: optional array of maps sorted by their dso name if
+ * maps_by_name_sorted is true.
+ */
+ struct map **maps_by_name;
+ struct machine *machine;
+#ifdef HAVE_LIBUNWIND_SUPPORT
+ void *addr_space;
+ const struct unwind_libunwind_ops *unwind_libunwind_ops;
+#endif
+ refcount_t refcnt;
+ /**
+ * @nr_maps: number of maps_by_address, and possibly maps_by_name,
+ * entries that contain maps.
+ */
+ unsigned int nr_maps;
+ /**
+ * @nr_maps_allocated: number of entries in maps_by_address and possibly
+ * maps_by_name.
+ */
+ unsigned int nr_maps_allocated;
+ /**
+ * @last_search_by_name_idx: cache of last found by name entry's index
+ * as frequent searches for the same dso name are common.
+ */
+ unsigned int last_search_by_name_idx;
+ /** @maps_by_address_sorted: is maps_by_address sorted. */
+ bool maps_by_address_sorted;
+ /** @maps_by_name_sorted: is maps_by_name sorted. */
+ bool maps_by_name_sorted;
+ /** @ends_broken: does the map contain a map where end values are unset/unsorted? */
+ bool ends_broken;
+};
+
+static void check_invariants(const struct maps *maps __maybe_unused)
{
- refcount_set(maps__refcnt(maps), 1);
- init_rwsem(maps__lock(maps));
- RC_CHK_ACCESS(maps)->entries = RB_ROOT;
- RC_CHK_ACCESS(maps)->machine = machine;
- RC_CHK_ACCESS(maps)->last_search_by_name = NULL;
- RC_CHK_ACCESS(maps)->nr_maps = 0;
- RC_CHK_ACCESS(maps)->maps_by_name = NULL;
+#ifndef NDEBUG
+ assert(RC_CHK_ACCESS(maps)->nr_maps <= RC_CHK_ACCESS(maps)->nr_maps_allocated);
+ for (unsigned int i = 0; i < RC_CHK_ACCESS(maps)->nr_maps; i++) {
+ struct map *map = RC_CHK_ACCESS(maps)->maps_by_address[i];
+
+ /* Check map is well-formed. */
+ assert(map__end(map) == 0 || map__start(map) <= map__end(map));
+ /* Expect at least 1 reference count. */
+ assert(refcount_read(map__refcnt(map)) > 0);
+
+ if (map__dso(map) && dso__kernel(map__dso(map)))
+ assert(RC_CHK_EQUAL(map__kmap(map)->kmaps, maps));
+
+ if (i > 0) {
+ struct map *prev = RC_CHK_ACCESS(maps)->maps_by_address[i - 1];
+
+ /* If addresses are sorted... */
+ if (RC_CHK_ACCESS(maps)->maps_by_address_sorted) {
+ /* Maps should be in start address order. */
+ assert(map__start(prev) <= map__start(map));
+ /*
+ * If the ends of maps aren't broken (during
+ * construction) then they should be ordered
+ * too.
+ */
+ if (!RC_CHK_ACCESS(maps)->ends_broken) {
+ assert(map__end(prev) <= map__end(map));
+ assert(map__end(prev) <= map__start(map) ||
+ map__start(prev) == map__start(map));
+ }
+ }
+ }
+ }
+ if (RC_CHK_ACCESS(maps)->maps_by_name) {
+ for (unsigned int i = 0; i < RC_CHK_ACCESS(maps)->nr_maps; i++) {
+ struct map *map = RC_CHK_ACCESS(maps)->maps_by_name[i];
+
+ /*
+ * Maps by name maps should be in maps_by_address, so
+ * the reference count should be higher.
+ */
+ assert(refcount_read(map__refcnt(map)) > 1);
+ }
+ }
+#endif
}
-static void __maps__free_maps_by_name(struct maps *maps)
+static struct map **maps__maps_by_address(const struct maps *maps)
{
- /*
- * Free everything to try to do it from the rbtree in the next search
- */
- for (unsigned int i = 0; i < maps__nr_maps(maps); i++)
- map__put(maps__maps_by_name(maps)[i]);
-
- zfree(&RC_CHK_ACCESS(maps)->maps_by_name);
- RC_CHK_ACCESS(maps)->nr_maps_allocated = 0;
+ return RC_CHK_ACCESS(maps)->maps_by_address;
}
-static int __maps__insert(struct maps *maps, struct map *map)
+static void maps__set_maps_by_address(struct maps *maps, struct map **new)
{
- struct rb_node **p = &maps__entries(maps)->rb_node;
- struct rb_node *parent = NULL;
- const u64 ip = map__start(map);
- struct map_rb_node *m, *new_rb_node;
-
- new_rb_node = malloc(sizeof(*new_rb_node));
- if (!new_rb_node)
- return -ENOMEM;
+ RC_CHK_ACCESS(maps)->maps_by_address = new;
- RB_CLEAR_NODE(&new_rb_node->rb_node);
- new_rb_node->map = map__get(map);
+}
- while (*p != NULL) {
- parent = *p;
- m = rb_entry(parent, struct map_rb_node, rb_node);
- if (ip < map__start(m->map))
- p = &(*p)->rb_left;
- else
- p = &(*p)->rb_right;
- }
+static void maps__set_nr_maps_allocated(struct maps *maps, unsigned int nr_maps_allocated)
+{
+ RC_CHK_ACCESS(maps)->nr_maps_allocated = nr_maps_allocated;
+}
- rb_link_node(&new_rb_node->rb_node, parent, p);
- rb_insert_color(&new_rb_node->rb_node, maps__entries(maps));
- return 0;
+static void maps__set_nr_maps(struct maps *maps, unsigned int nr_maps)
+{
+ RC_CHK_ACCESS(maps)->nr_maps = nr_maps;
}
-int maps__insert(struct maps *maps, struct map *map)
+/* Not in the header, to aid reference counting. */
+static struct map **maps__maps_by_name(const struct maps *maps)
{
- int err;
- const struct dso *dso = map__dso(map);
+ return RC_CHK_ACCESS(maps)->maps_by_name;
- down_write(maps__lock(maps));
- err = __maps__insert(maps, map);
- if (err)
- goto out;
+}
- ++RC_CHK_ACCESS(maps)->nr_maps;
+static void maps__set_maps_by_name(struct maps *maps, struct map **new)
+{
+ RC_CHK_ACCESS(maps)->maps_by_name = new;
- if (dso && dso->kernel) {
- struct kmap *kmap = map__kmap(map);
+}
- if (kmap)
- kmap->kmaps = maps;
- else
- pr_err("Internal error: kernel dso with non kernel map\n");
- }
+static bool maps__maps_by_address_sorted(const struct maps *maps)
+{
+ return RC_CHK_ACCESS(maps)->maps_by_address_sorted;
+}
+static void maps__set_maps_by_address_sorted(struct maps *maps, bool value)
+{
+ RC_CHK_ACCESS(maps)->maps_by_address_sorted = value;
+}
- /*
- * If we already performed some search by name, then we need to add the just
- * inserted map and resort.
- */
- if (maps__maps_by_name(maps)) {
- if (maps__nr_maps(maps) > RC_CHK_ACCESS(maps)->nr_maps_allocated) {
- int nr_allocate = maps__nr_maps(maps) * 2;
- struct map **maps_by_name = realloc(maps__maps_by_name(maps),
- nr_allocate * sizeof(map));
+static bool maps__maps_by_name_sorted(const struct maps *maps)
+{
+ return RC_CHK_ACCESS(maps)->maps_by_name_sorted;
+}
- if (maps_by_name == NULL) {
- __maps__free_maps_by_name(maps);
- err = -ENOMEM;
- goto out;
- }
+static void maps__set_maps_by_name_sorted(struct maps *maps, bool value)
+{
+ RC_CHK_ACCESS(maps)->maps_by_name_sorted = value;
+}
- RC_CHK_ACCESS(maps)->maps_by_name = maps_by_name;
- RC_CHK_ACCESS(maps)->nr_maps_allocated = nr_allocate;
- }
- maps__maps_by_name(maps)[maps__nr_maps(maps) - 1] = map__get(map);
- __maps__sort_by_name(maps);
- }
- out:
- up_write(maps__lock(maps));
- return err;
+struct machine *maps__machine(const struct maps *maps)
+{
+ return RC_CHK_ACCESS(maps)->machine;
}
-static void __maps__remove(struct maps *maps, struct map_rb_node *rb_node)
+unsigned int maps__nr_maps(const struct maps *maps)
{
- rb_erase_init(&rb_node->rb_node, maps__entries(maps));
- map__put(rb_node->map);
- free(rb_node);
+ return RC_CHK_ACCESS(maps)->nr_maps;
}
-void maps__remove(struct maps *maps, struct map *map)
+refcount_t *maps__refcnt(struct maps *maps)
{
- struct map_rb_node *rb_node;
+ return &RC_CHK_ACCESS(maps)->refcnt;
+}
- down_write(maps__lock(maps));
- if (RC_CHK_ACCESS(maps)->last_search_by_name == map)
- RC_CHK_ACCESS(maps)->last_search_by_name = NULL;
-
- rb_node = maps__find_node(maps, map);
- assert(rb_node->RC_CHK_ACCESS(map) == RC_CHK_ACCESS(map));
- __maps__remove(maps, rb_node);
- if (maps__maps_by_name(maps))
- __maps__free_maps_by_name(maps);
- --RC_CHK_ACCESS(maps)->nr_maps;
- up_write(maps__lock(maps));
+#ifdef HAVE_LIBUNWIND_SUPPORT
+void *maps__addr_space(const struct maps *maps)
+{
+ return RC_CHK_ACCESS(maps)->addr_space;
+}
+
+void maps__set_addr_space(struct maps *maps, void *addr_space)
+{
+ RC_CHK_ACCESS(maps)->addr_space = addr_space;
}
-static void __maps__purge(struct maps *maps)
+const struct unwind_libunwind_ops *maps__unwind_libunwind_ops(const struct maps *maps)
{
- struct map_rb_node *pos, *next;
+ return RC_CHK_ACCESS(maps)->unwind_libunwind_ops;
+}
- if (maps__maps_by_name(maps))
- __maps__free_maps_by_name(maps);
+void maps__set_unwind_libunwind_ops(struct maps *maps, const struct unwind_libunwind_ops *ops)
+{
+ RC_CHK_ACCESS(maps)->unwind_libunwind_ops = ops;
+}
+#endif
- maps__for_each_entry_safe(maps, pos, next) {
- rb_erase_init(&pos->rb_node, maps__entries(maps));
- map__put(pos->map);
- free(pos);
- }
+static struct rw_semaphore *maps__lock(struct maps *maps)
+{
+ return &RC_CHK_ACCESS(maps)->lock;
}
-static void maps__exit(struct maps *maps)
+static void maps__init(struct maps *maps, struct machine *machine)
{
- down_write(maps__lock(maps));
- __maps__purge(maps);
- up_write(maps__lock(maps));
+ init_rwsem(maps__lock(maps));
+ RC_CHK_ACCESS(maps)->maps_by_address = NULL;
+ RC_CHK_ACCESS(maps)->maps_by_name = NULL;
+ RC_CHK_ACCESS(maps)->machine = machine;
+#ifdef HAVE_LIBUNWIND_SUPPORT
+ RC_CHK_ACCESS(maps)->addr_space = NULL;
+ RC_CHK_ACCESS(maps)->unwind_libunwind_ops = NULL;
+#endif
+ refcount_set(maps__refcnt(maps), 1);
+ RC_CHK_ACCESS(maps)->nr_maps = 0;
+ RC_CHK_ACCESS(maps)->nr_maps_allocated = 0;
+ RC_CHK_ACCESS(maps)->last_search_by_name_idx = 0;
+ RC_CHK_ACCESS(maps)->maps_by_address_sorted = true;
+ RC_CHK_ACCESS(maps)->maps_by_name_sorted = false;
}
-bool maps__empty(struct maps *maps)
+static void maps__exit(struct maps *maps)
{
- return !maps__first(maps);
+ struct map **maps_by_address = maps__maps_by_address(maps);
+ struct map **maps_by_name = maps__maps_by_name(maps);
+
+ for (unsigned int i = 0; i < maps__nr_maps(maps); i++) {
+ map__zput(maps_by_address[i]);
+ if (maps_by_name)
+ map__zput(maps_by_name[i]);
+ }
+ zfree(&maps_by_address);
+ zfree(&maps_by_name);
+ unwind__finish_access(maps);
}
struct maps *maps__new(struct machine *machine)
@@ -174,7 +256,6 @@ struct maps *maps__new(struct machine *machine)
static void maps__delete(struct maps *maps)
{
maps__exit(maps);
- unwind__finish_access(maps);
RC_CHK_FREE(maps);
}
@@ -196,45 +277,398 @@ void maps__put(struct maps *maps)
RC_CHK_PUT(maps);
}
-struct symbol *maps__find_symbol(struct maps *maps, u64 addr, struct map **mapp)
+static void __maps__free_maps_by_name(struct maps *maps)
{
- struct map *map = maps__find(maps, addr);
+ if (!maps__maps_by_name(maps))
+ return;
- /* Ensure map is loaded before using map->map_ip */
- if (map != NULL && map__load(map) >= 0) {
- if (mapp != NULL)
- *mapp = map;
- return map__find_symbol(map, map__map_ip(map, addr));
+ /*
+ * Free everything to try to do it from the rbtree in the next search
+ */
+ for (unsigned int i = 0; i < maps__nr_maps(maps); i++)
+ map__put(maps__maps_by_name(maps)[i]);
+
+ zfree(&RC_CHK_ACCESS(maps)->maps_by_name);
+
+ /* Consistent with maps__init(). When maps_by_name == NULL, maps_by_name_sorted == false */
+ maps__set_maps_by_name_sorted(maps, false);
+}
+
+static int map__start_cmp(const void *a, const void *b)
+{
+ const struct map *map_a = *(const struct map * const *)a;
+ const struct map *map_b = *(const struct map * const *)b;
+ u64 map_a_start = map__start(map_a);
+ u64 map_b_start = map__start(map_b);
+
+ if (map_a_start == map_b_start) {
+ u64 map_a_end = map__end(map_a);
+ u64 map_b_end = map__end(map_b);
+
+ if (map_a_end == map_b_end) {
+ /* Ensure maps with the same addresses have a fixed order. */
+ if (RC_CHK_ACCESS(map_a) == RC_CHK_ACCESS(map_b))
+ return 0;
+ return (intptr_t)RC_CHK_ACCESS(map_a) > (intptr_t)RC_CHK_ACCESS(map_b)
+ ? 1 : -1;
+ }
+ return map_a_end > map_b_end ? 1 : -1;
}
+ return map_a_start > map_b_start ? 1 : -1;
+}
- return NULL;
+static void __maps__sort_by_address(struct maps *maps)
+{
+ if (maps__maps_by_address_sorted(maps))
+ return;
+
+ qsort(maps__maps_by_address(maps),
+ maps__nr_maps(maps),
+ sizeof(struct map *),
+ map__start_cmp);
+ maps__set_maps_by_address_sorted(maps, true);
}
-struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, struct map **mapp)
+static void maps__sort_by_address(struct maps *maps)
{
- struct symbol *sym;
- struct map_rb_node *pos;
+ down_write(maps__lock(maps));
+ __maps__sort_by_address(maps);
+ up_write(maps__lock(maps));
+}
- down_read(maps__lock(maps));
+static int map__strcmp(const void *a, const void *b)
+{
+ const struct map *map_a = *(const struct map * const *)a;
+ const struct map *map_b = *(const struct map * const *)b;
+ const struct dso *dso_a = map__dso(map_a);
+ const struct dso *dso_b = map__dso(map_b);
+ int ret = strcmp(dso__short_name(dso_a), dso__short_name(dso_b));
+
+ if (ret == 0 && RC_CHK_ACCESS(map_a) != RC_CHK_ACCESS(map_b)) {
+ /* Ensure distinct but name equal maps have an order. */
+ return map__start_cmp(a, b);
+ }
+ return ret;
+}
+
+static int maps__sort_by_name(struct maps *maps)
+{
+ int err = 0;
+
+ down_write(maps__lock(maps));
+ if (!maps__maps_by_name_sorted(maps)) {
+ struct map **maps_by_name = maps__maps_by_name(maps);
+
+ if (!maps_by_name) {
+ maps_by_name = malloc(RC_CHK_ACCESS(maps)->nr_maps_allocated *
+ sizeof(*maps_by_name));
+ if (!maps_by_name)
+ err = -ENOMEM;
+ else {
+ struct map **maps_by_address = maps__maps_by_address(maps);
+ unsigned int n = maps__nr_maps(maps);
+
+ maps__set_maps_by_name(maps, maps_by_name);
+ for (unsigned int i = 0; i < n; i++)
+ maps_by_name[i] = map__get(maps_by_address[i]);
+ }
+ }
+ if (!err) {
+ qsort(maps_by_name,
+ maps__nr_maps(maps),
+ sizeof(struct map *),
+ map__strcmp);
+ maps__set_maps_by_name_sorted(maps, true);
+ }
+ }
+ check_invariants(maps);
+ up_write(maps__lock(maps));
+ return err;
+}
+
+static unsigned int maps__by_address_index(const struct maps *maps, const struct map *map)
+{
+ struct map **maps_by_address = maps__maps_by_address(maps);
+
+ if (maps__maps_by_address_sorted(maps)) {
+ struct map **mapp =
+ bsearch(&map, maps__maps_by_address(maps), maps__nr_maps(maps),
+ sizeof(*mapp), map__start_cmp);
+
+ if (mapp)
+ return mapp - maps_by_address;
+ } else {
+ for (unsigned int i = 0; i < maps__nr_maps(maps); i++) {
+ if (RC_CHK_ACCESS(maps_by_address[i]) == RC_CHK_ACCESS(map))
+ return i;
+ }
+ }
+ pr_err("Map missing from maps");
+ return -1;
+}
+
+static unsigned int maps__by_name_index(const struct maps *maps, const struct map *map)
+{
+ struct map **maps_by_name = maps__maps_by_name(maps);
+
+ if (maps__maps_by_name_sorted(maps)) {
+ struct map **mapp =
+ bsearch(&map, maps_by_name, maps__nr_maps(maps),
+ sizeof(*mapp), map__strcmp);
+
+ if (mapp)
+ return mapp - maps_by_name;
+ } else {
+ for (unsigned int i = 0; i < maps__nr_maps(maps); i++) {
+ if (RC_CHK_ACCESS(maps_by_name[i]) == RC_CHK_ACCESS(map))
+ return i;
+ }
+ }
+ pr_err("Map missing from maps");
+ return -1;
+}
- maps__for_each_entry(maps, pos) {
- sym = map__find_symbol_by_name(pos->map, name);
+static void map__set_kmap_maps(struct map *map, struct maps *maps)
+{
+ struct dso *dso;
+
+ if (map == NULL)
+ return;
+
+ dso = map__dso(map);
+
+ if (dso && dso__kernel(dso)) {
+ struct kmap *kmap = map__kmap(map);
+
+ if (kmap)
+ kmap->kmaps = maps;
+ else
+ pr_err("Internal error: kernel dso with non kernel map\n");
+ }
+}
- if (sym == NULL)
- continue;
- if (!map__contains_symbol(pos->map, sym)) {
- sym = NULL;
- continue;
+static int __maps__insert(struct maps *maps, struct map *new)
+{
+ struct map **maps_by_address = maps__maps_by_address(maps);
+ struct map **maps_by_name = maps__maps_by_name(maps);
+ unsigned int nr_maps = maps__nr_maps(maps);
+ unsigned int nr_allocate = RC_CHK_ACCESS(maps)->nr_maps_allocated;
+
+ if (nr_maps + 1 > nr_allocate) {
+ nr_allocate = !nr_allocate ? 32 : nr_allocate * 2;
+
+ maps_by_address = realloc(maps_by_address, nr_allocate * sizeof(new));
+ if (!maps_by_address)
+ return -ENOMEM;
+
+ maps__set_maps_by_address(maps, maps_by_address);
+ if (maps_by_name) {
+ maps_by_name = realloc(maps_by_name, nr_allocate * sizeof(new));
+ if (!maps_by_name) {
+ /*
+ * If by name fails, just disable by name and it will
+ * recompute next time it is required.
+ */
+ __maps__free_maps_by_name(maps);
+ }
+ maps__set_maps_by_name(maps, maps_by_name);
}
- if (mapp != NULL)
- *mapp = pos->map;
- goto out;
+ RC_CHK_ACCESS(maps)->nr_maps_allocated = nr_allocate;
+ }
+ /* Insert the value at the end. */
+ maps_by_address[nr_maps] = map__get(new);
+ map__set_kmap_maps(new, maps);
+ if (maps_by_name)
+ maps_by_name[nr_maps] = map__get(new);
+
+ nr_maps++;
+ RC_CHK_ACCESS(maps)->nr_maps = nr_maps;
+
+ /*
+ * Recompute if things are sorted. If things are inserted in a sorted
+ * manner, for example by processing /proc/pid/maps, then no
+ * sorting/resorting will be necessary.
+ */
+ if (nr_maps == 1) {
+ /* If there's just 1 entry then maps are sorted. */
+ maps__set_maps_by_address_sorted(maps, true);
+ maps__set_maps_by_name_sorted(maps, maps_by_name != NULL);
+ } else {
+ /* Sorted if maps were already sorted and this map starts after the last one. */
+ maps__set_maps_by_address_sorted(maps,
+ maps__maps_by_address_sorted(maps) &&
+ map__end(maps_by_address[nr_maps - 2]) <= map__start(new));
+ maps__set_maps_by_name_sorted(maps, false);
+ }
+ if (map__end(new) < map__start(new))
+ RC_CHK_ACCESS(maps)->ends_broken = true;
+
+ return 0;
+}
+
+int maps__insert(struct maps *maps, struct map *map)
+{
+ int ret;
+
+ down_write(maps__lock(maps));
+ ret = __maps__insert(maps, map);
+ check_invariants(maps);
+ up_write(maps__lock(maps));
+ return ret;
+}
+
+static void __maps__remove(struct maps *maps, struct map *map)
+{
+ struct map **maps_by_address = maps__maps_by_address(maps);
+ struct map **maps_by_name = maps__maps_by_name(maps);
+ unsigned int nr_maps = maps__nr_maps(maps);
+ unsigned int address_idx;
+
+ /* Slide later mappings over the one to remove */
+ address_idx = maps__by_address_index(maps, map);
+ map__put(maps_by_address[address_idx]);
+ memmove(&maps_by_address[address_idx],
+ &maps_by_address[address_idx + 1],
+ (nr_maps - address_idx - 1) * sizeof(*maps_by_address));
+
+ if (maps_by_name) {
+ unsigned int name_idx = maps__by_name_index(maps, map);
+
+ map__put(maps_by_name[name_idx]);
+ memmove(&maps_by_name[name_idx],
+ &maps_by_name[name_idx + 1],
+ (nr_maps - name_idx - 1) * sizeof(*maps_by_name));
}
- sym = NULL;
-out:
+ --RC_CHK_ACCESS(maps)->nr_maps;
+}
+
+void maps__remove(struct maps *maps, struct map *map)
+{
+ down_write(maps__lock(maps));
+ __maps__remove(maps, map);
+ check_invariants(maps);
+ up_write(maps__lock(maps));
+}
+
+bool maps__empty(struct maps *maps)
+{
+ bool res;
+
+ down_read(maps__lock(maps));
+ res = maps__nr_maps(maps) == 0;
up_read(maps__lock(maps));
- return sym;
+
+ return res;
+}
+
+bool maps__equal(struct maps *a, struct maps *b)
+{
+ return RC_CHK_EQUAL(a, b);
+}
+
+int maps__for_each_map(struct maps *maps, int (*cb)(struct map *map, void *data), void *data)
+{
+ bool done = false;
+ int ret = 0;
+
+ /* See locking/sorting note. */
+ while (!done) {
+ down_read(maps__lock(maps));
+ if (maps__maps_by_address_sorted(maps)) {
+ /*
+ * maps__for_each_map callbacks may buggily/unsafely
+ * insert into maps_by_address. Deliberately reload
+ * maps__nr_maps and maps_by_address on each iteration
+ * to avoid using memory freed by maps__insert growing
+ * the array - this may cause maps to be skipped or
+ * repeated.
+ */
+ for (unsigned int i = 0; i < maps__nr_maps(maps); i++) {
+ struct map **maps_by_address = maps__maps_by_address(maps);
+ struct map *map = maps_by_address[i];
+
+ ret = cb(map, data);
+ if (ret)
+ break;
+ }
+ done = true;
+ }
+ up_read(maps__lock(maps));
+ if (!done)
+ maps__sort_by_address(maps);
+ }
+ return ret;
+}
+
+void maps__remove_maps(struct maps *maps, bool (*cb)(struct map *map, void *data), void *data)
+{
+ struct map **maps_by_address;
+
+ down_write(maps__lock(maps));
+
+ maps_by_address = maps__maps_by_address(maps);
+ for (unsigned int i = 0; i < maps__nr_maps(maps);) {
+ if (cb(maps_by_address[i], data))
+ __maps__remove(maps, maps_by_address[i]);
+ else
+ i++;
+ }
+ check_invariants(maps);
+ up_write(maps__lock(maps));
+}
+
+struct symbol *maps__find_symbol(struct maps *maps, u64 addr, struct map **mapp)
+{
+ struct map *map = maps__find(maps, addr);
+ struct symbol *result = NULL;
+
+ /* Ensure map is loaded before using map->map_ip */
+ if (map != NULL && map__load(map) >= 0)
+ result = map__find_symbol(map, map__map_ip(map, addr));
+
+ if (mapp)
+ *mapp = map;
+ else
+ map__put(map);
+
+ return result;
+}
+
+struct maps__find_symbol_by_name_args {
+ struct map **mapp;
+ const char *name;
+ struct symbol *sym;
+};
+
+static int maps__find_symbol_by_name_cb(struct map *map, void *data)
+{
+ struct maps__find_symbol_by_name_args *args = data;
+
+ args->sym = map__find_symbol_by_name(map, args->name);
+ if (!args->sym)
+ return 0;
+
+ if (!map__contains_symbol(map, args->sym)) {
+ args->sym = NULL;
+ return 0;
+ }
+
+ if (args->mapp != NULL)
+ *args->mapp = map__get(map);
+ return 1;
+}
+
+struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, struct map **mapp)
+{
+ struct maps__find_symbol_by_name_args args = {
+ .mapp = mapp,
+ .name = name,
+ .sym = NULL,
+ };
+
+ maps__for_each_map(maps, maps__find_symbol_by_name_cb, &args);
+ return args.sym;
}
int maps__find_ams(struct maps *maps, struct addr_map_symbol *ams)
@@ -253,225 +687,657 @@ int maps__find_ams(struct maps *maps, struct addr_map_symbol *ams)
return ams->ms.sym ? 0 : -1;
}
-size_t maps__fprintf(struct maps *maps, FILE *fp)
-{
- size_t printed = 0;
- struct map_rb_node *pos;
+struct maps__fprintf_args {
+ FILE *fp;
+ size_t printed;
+};
- down_read(maps__lock(maps));
+static int maps__fprintf_cb(struct map *map, void *data)
+{
+ struct maps__fprintf_args *args = data;
- maps__for_each_entry(maps, pos) {
- printed += fprintf(fp, "Map:");
- printed += map__fprintf(pos->map, fp);
- if (verbose > 2) {
- printed += dso__fprintf(map__dso(pos->map), fp);
- printed += fprintf(fp, "--\n");
- }
+ args->printed += fprintf(args->fp, "Map:");
+ args->printed += map__fprintf(map, args->fp);
+ if (verbose > 2) {
+ args->printed += dso__fprintf(map__dso(map), args->fp);
+ args->printed += fprintf(args->fp, "--\n");
}
+ return 0;
+}
- up_read(maps__lock(maps));
+size_t maps__fprintf(struct maps *maps, FILE *fp)
+{
+ struct maps__fprintf_args args = {
+ .fp = fp,
+ .printed = 0,
+ };
- return printed;
+ maps__for_each_map(maps, maps__fprintf_cb, &args);
+
+ return args.printed;
}
-int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp)
+/*
+ * Find first map where end > map->start.
+ * Same as find_vma() in kernel.
+ */
+static unsigned int first_ending_after(struct maps *maps, const struct map *map)
{
- struct rb_root *root;
- struct rb_node *next, *first;
- int err = 0;
+ struct map **maps_by_address = maps__maps_by_address(maps);
+ int low = 0, high = (int)maps__nr_maps(maps) - 1, first = high + 1;
- down_write(maps__lock(maps));
+ assert(maps__maps_by_address_sorted(maps));
+ if (low <= high && map__end(maps_by_address[0]) > map__start(map))
+ return 0;
- root = maps__entries(maps);
+ while (low <= high) {
+ int mid = (low + high) / 2;
+ struct map *pos = maps_by_address[mid];
- /*
- * Find first map where end > map->start.
- * Same as find_vma() in kernel.
- */
- next = root->rb_node;
- first = NULL;
- while (next) {
- struct map_rb_node *pos = rb_entry(next, struct map_rb_node, rb_node);
-
- if (map__end(pos->map) > map__start(map)) {
- first = next;
- if (map__start(pos->map) <= map__start(map))
+ if (map__end(pos) > map__start(map)) {
+ first = mid;
+ if (map__start(pos) <= map__start(map)) {
+ /* Entry overlaps map. */
break;
- next = next->rb_left;
+ }
+ high = mid - 1;
} else
- next = next->rb_right;
+ low = mid + 1;
}
+ return first;
+}
+
+static int __maps__insert_sorted(struct maps *maps, unsigned int first_after_index,
+ struct map *new1, struct map *new2)
+{
+ struct map **maps_by_address = maps__maps_by_address(maps);
+ struct map **maps_by_name = maps__maps_by_name(maps);
+ unsigned int nr_maps = maps__nr_maps(maps);
+ unsigned int nr_allocate = RC_CHK_ACCESS(maps)->nr_maps_allocated;
+ unsigned int to_add = new2 ? 2 : 1;
+
+ assert(maps__maps_by_address_sorted(maps));
+ assert(first_after_index == nr_maps ||
+ map__end(new1) <= map__start(maps_by_address[first_after_index]));
+ assert(!new2 || map__end(new1) <= map__start(new2));
+ assert(first_after_index == nr_maps || !new2 ||
+ map__end(new2) <= map__start(maps_by_address[first_after_index]));
+
+ if (nr_maps + to_add > nr_allocate) {
+ nr_allocate = !nr_allocate ? 32 : nr_allocate * 2;
+
+ maps_by_address = realloc(maps_by_address, nr_allocate * sizeof(new1));
+ if (!maps_by_address)
+ return -ENOMEM;
+
+ maps__set_maps_by_address(maps, maps_by_address);
+ if (maps_by_name) {
+ maps_by_name = realloc(maps_by_name, nr_allocate * sizeof(new1));
+ if (!maps_by_name) {
+ /*
+ * If by name fails, just disable by name and it will
+ * recompute next time it is required.
+ */
+ __maps__free_maps_by_name(maps);
+ }
+ maps__set_maps_by_name(maps, maps_by_name);
+ }
+ RC_CHK_ACCESS(maps)->nr_maps_allocated = nr_allocate;
+ }
+ memmove(&maps_by_address[first_after_index+to_add],
+ &maps_by_address[first_after_index],
+ (nr_maps - first_after_index) * sizeof(new1));
+ maps_by_address[first_after_index] = map__get(new1);
+ if (maps_by_name)
+ maps_by_name[nr_maps] = map__get(new1);
+ if (new2) {
+ maps_by_address[first_after_index + 1] = map__get(new2);
+ if (maps_by_name)
+ maps_by_name[nr_maps + 1] = map__get(new2);
+ }
+ RC_CHK_ACCESS(maps)->nr_maps = nr_maps + to_add;
+ maps__set_maps_by_name_sorted(maps, false);
+ map__set_kmap_maps(new1, maps);
+ map__set_kmap_maps(new2, maps);
+
+ check_invariants(maps);
+ return 0;
+}
+
+/*
+ * Adds new to maps, if new overlaps existing entries then the existing maps are
+ * adjusted or removed so that new fits without overlapping any entries.
+ */
+static int __maps__fixup_overlap_and_insert(struct maps *maps, struct map *new)
+{
+ int err = 0;
+ FILE *fp = debug_file();
+ unsigned int i, ni = INT_MAX; // Some gcc complain, but depends on maps_by_name...
+
+ if (!maps__maps_by_address_sorted(maps))
+ __maps__sort_by_address(maps);
- next = first;
- while (next && !err) {
- struct map_rb_node *pos = rb_entry(next, struct map_rb_node, rb_node);
- next = rb_next(&pos->rb_node);
+ /*
+ * Iterate through entries where the end of the existing entry is
+ * greater-than the new map's start.
+ */
+ for (i = first_ending_after(maps, new); i < maps__nr_maps(maps); ) {
+ struct map **maps_by_address = maps__maps_by_address(maps);
+ struct map **maps_by_name = maps__maps_by_name(maps);
+ struct map *pos = maps_by_address[i];
+ struct map *before = NULL, *after = NULL;
/*
* Stop if current map starts after map->end.
* Maps are ordered by start: next will not overlap for sure.
*/
- if (map__start(pos->map) >= map__end(map))
+ if (map__start(pos) >= map__end(new))
break;
- if (verbose >= 2) {
-
- if (use_browser) {
- pr_debug("overlapping maps in %s (disable tui for more info)\n",
- map__dso(map)->name);
- } else {
- fputs("overlapping maps:\n", fp);
- map__fprintf(map, fp);
- map__fprintf(pos->map, fp);
- }
+ if (use_browser) {
+ pr_debug("overlapping maps in %s (disable tui for more info)\n",
+ dso__name(map__dso(new)));
+ } else if (verbose >= 2) {
+ pr_debug("overlapping maps:\n");
+ map__fprintf(new, fp);
+ map__fprintf(pos, fp);
}
- rb_erase_init(&pos->rb_node, root);
+ if (maps_by_name)
+ ni = maps__by_name_index(maps, pos);
+
/*
* Now check if we need to create new maps for areas not
* overlapped by the new map:
*/
- if (map__start(map) > map__start(pos->map)) {
- struct map *before = map__clone(pos->map);
+ if (map__start(new) > map__start(pos)) {
+ /* Map starts within existing map. Need to shorten the existing map. */
+ before = map__clone(pos);
if (before == NULL) {
err = -ENOMEM;
- goto put_map;
- }
-
- map__set_end(before, map__start(map));
- err = __maps__insert(maps, before);
- if (err) {
- map__put(before);
- goto put_map;
+ goto out_err;
}
+ map__set_end(before, map__start(new));
if (verbose >= 2 && !use_browser)
map__fprintf(before, fp);
- map__put(before);
}
-
- if (map__end(map) < map__end(pos->map)) {
- struct map *after = map__clone(pos->map);
+ if (map__end(new) < map__end(pos)) {
+ /* The new map isn't as long as the existing map. */
+ after = map__clone(pos);
if (after == NULL) {
+ map__zput(before);
err = -ENOMEM;
- goto put_map;
+ goto out_err;
}
- map__set_start(after, map__end(map));
- map__add_pgoff(after, map__end(map) - map__start(pos->map));
- assert(map__map_ip(pos->map, map__end(map)) ==
- map__map_ip(after, map__end(map)));
- err = __maps__insert(maps, after);
- if (err) {
- map__put(after);
- goto put_map;
- }
+ map__set_start(after, map__end(new));
+ map__add_pgoff(after, map__end(new) - map__start(pos));
+ assert(map__map_ip(pos, map__end(new)) ==
+ map__map_ip(after, map__end(new)));
+
if (verbose >= 2 && !use_browser)
map__fprintf(after, fp);
+ }
+ /*
+ * If adding one entry, for `before` or `after`, we can replace
+ * the existing entry. If both `before` and `after` are
+ * necessary than an insert is needed. If the existing entry
+ * entirely overlaps the existing entry it can just be removed.
+ */
+ if (before) {
+ map__put(maps_by_address[i]);
+ maps_by_address[i] = before;
+ map__set_kmap_maps(before, maps);
+
+ if (maps_by_name) {
+ map__put(maps_by_name[ni]);
+ maps_by_name[ni] = map__get(before);
+ }
+
+ /* Maps are still ordered, go to next one. */
+ i++;
+ if (after) {
+ /*
+ * 'before' and 'after' mean 'new' split the
+ * 'pos' mapping and therefore there are no
+ * later mappings.
+ */
+ err = __maps__insert_sorted(maps, i, new, after);
+ map__put(after);
+ check_invariants(maps);
+ return err;
+ }
+ check_invariants(maps);
+ } else if (after) {
+ /*
+ * 'after' means 'new' split 'pos' and there are no
+ * later mappings.
+ */
+ map__put(maps_by_address[i]);
+ maps_by_address[i] = map__get(new);
+ map__set_kmap_maps(new, maps);
+
+ if (maps_by_name) {
+ map__put(maps_by_name[ni]);
+ maps_by_name[ni] = map__get(new);
+ }
+
+ err = __maps__insert_sorted(maps, i + 1, after, NULL);
map__put(after);
+ check_invariants(maps);
+ return err;
+ } else {
+ struct map *next = NULL;
+ unsigned int nr_maps = maps__nr_maps(maps);
+
+ if (i + 1 < nr_maps)
+ next = maps_by_address[i + 1];
+
+ if (!next || map__start(next) >= map__end(new)) {
+ /*
+ * Replace existing mapping and end knowing
+ * there aren't later overlapping or any
+ * mappings.
+ */
+ map__put(maps_by_address[i]);
+ maps_by_address[i] = map__get(new);
+ map__set_kmap_maps(new, maps);
+
+ if (maps_by_name) {
+ map__put(maps_by_name[ni]);
+ maps_by_name[ni] = map__get(new);
+ }
+
+ check_invariants(maps);
+ return err;
+ }
+ /*
+ * pos fully covers the previous mapping so remove
+ * it. The following is an inlined version of
+ * maps__remove that reuses the already computed
+ * indices.
+ */
+ map__put(maps_by_address[i]);
+ memmove(&maps_by_address[i],
+ &maps_by_address[i + 1],
+ (nr_maps - i - 1) * sizeof(*maps_by_address));
+
+ if (maps_by_name) {
+ map__put(maps_by_name[ni]);
+ memmove(&maps_by_name[ni],
+ &maps_by_name[ni + 1],
+ (nr_maps - ni - 1) * sizeof(*maps_by_name));
+ }
+ --RC_CHK_ACCESS(maps)->nr_maps;
+ check_invariants(maps);
+ /*
+ * Maps are ordered but no need to increase `i` as the
+ * later maps were moved down.
+ */
}
-put_map:
- map__put(pos->map);
- free(pos);
}
- up_write(maps__lock(maps));
+ /* Add the map. */
+ err = __maps__insert_sorted(maps, i, new, NULL);
+out_err:
return err;
}
-/*
- * XXX This should not really _copy_ te maps, but refcount them.
- */
-int maps__clone(struct thread *thread, struct maps *parent)
+int maps__fixup_overlap_and_insert(struct maps *maps, struct map *new)
{
- struct maps *maps = thread__maps(thread);
int err;
- struct map_rb_node *rb_node;
+ down_write(maps__lock(maps));
+ err = __maps__fixup_overlap_and_insert(maps, new);
+ up_write(maps__lock(maps));
+ return err;
+}
+
+int maps__copy_from(struct maps *dest, struct maps *parent)
+{
+ /* Note, if struct map were immutable then cloning could use ref counts. */
+ struct map **parent_maps_by_address;
+ int err = 0;
+ unsigned int n;
+
+ down_write(maps__lock(dest));
down_read(maps__lock(parent));
- maps__for_each_entry(parent, rb_node) {
- struct map *new = map__clone(rb_node->map);
+ parent_maps_by_address = maps__maps_by_address(parent);
+ n = maps__nr_maps(parent);
+ if (maps__nr_maps(dest) == 0) {
+ /* No existing mappings so just copy from parent to avoid reallocs in insert. */
+ unsigned int nr_maps_allocated = RC_CHK_ACCESS(parent)->nr_maps_allocated;
+ struct map **dest_maps_by_address =
+ malloc(nr_maps_allocated * sizeof(struct map *));
+ struct map **dest_maps_by_name = NULL;
- if (new == NULL) {
+ if (!dest_maps_by_address)
err = -ENOMEM;
- goto out_unlock;
+ else {
+ if (maps__maps_by_name(parent)) {
+ dest_maps_by_name =
+ malloc(nr_maps_allocated * sizeof(struct map *));
+ }
+
+ RC_CHK_ACCESS(dest)->maps_by_address = dest_maps_by_address;
+ RC_CHK_ACCESS(dest)->maps_by_name = dest_maps_by_name;
+ RC_CHK_ACCESS(dest)->nr_maps_allocated = nr_maps_allocated;
}
- err = unwind__prepare_access(maps, new, NULL);
- if (err)
- goto out_unlock;
+ for (unsigned int i = 0; !err && i < n; i++) {
+ struct map *pos = parent_maps_by_address[i];
+ struct map *new = map__clone(pos);
- err = maps__insert(maps, new);
- if (err)
- goto out_unlock;
+ if (!new)
+ err = -ENOMEM;
+ else {
+ err = unwind__prepare_access(dest, new, NULL);
+ if (!err) {
+ dest_maps_by_address[i] = new;
+ map__set_kmap_maps(new, dest);
+ if (dest_maps_by_name)
+ dest_maps_by_name[i] = map__get(new);
+ RC_CHK_ACCESS(dest)->nr_maps = i + 1;
+ }
+ }
+ if (err)
+ map__put(new);
+ }
+ maps__set_maps_by_address_sorted(dest, maps__maps_by_address_sorted(parent));
+ if (!err) {
+ RC_CHK_ACCESS(dest)->last_search_by_name_idx =
+ RC_CHK_ACCESS(parent)->last_search_by_name_idx;
+ maps__set_maps_by_name_sorted(dest,
+ dest_maps_by_name &&
+ maps__maps_by_name_sorted(parent));
+ } else {
+ RC_CHK_ACCESS(dest)->last_search_by_name_idx = 0;
+ maps__set_maps_by_name_sorted(dest, false);
+ }
+ } else {
+ /* Unexpected copying to a maps containing entries. */
+ for (unsigned int i = 0; !err && i < n; i++) {
+ struct map *pos = parent_maps_by_address[i];
+ struct map *new = map__clone(pos);
- map__put(new);
+ if (!new)
+ err = -ENOMEM;
+ else {
+ err = unwind__prepare_access(dest, new, NULL);
+ if (!err)
+ err = __maps__insert(dest, new);
+ }
+ map__put(new);
+ }
}
+ check_invariants(dest);
- err = 0;
-out_unlock:
up_read(maps__lock(parent));
+ up_write(maps__lock(dest));
return err;
}
-struct map_rb_node *maps__find_node(struct maps *maps, struct map *map)
+static int map__addr_cmp(const void *key, const void *entry)
{
- struct map_rb_node *rb_node;
+ const u64 ip = *(const u64 *)key;
+ const struct map *map = *(const struct map * const *)entry;
- maps__for_each_entry(maps, rb_node) {
- if (rb_node->RC_CHK_ACCESS(map) == RC_CHK_ACCESS(map))
- return rb_node;
- }
- return NULL;
+ if (ip < map__start(map))
+ return -1;
+ if (ip >= map__end(map))
+ return 1;
+ return 0;
}
struct map *maps__find(struct maps *maps, u64 ip)
{
- struct rb_node *p;
- struct map_rb_node *m;
+ struct map *result = NULL;
+ bool done = false;
+
+ /* See locking/sorting note. */
+ while (!done) {
+ down_read(maps__lock(maps));
+ if (maps__maps_by_address_sorted(maps)) {
+ struct map **mapp = NULL;
+ struct map **maps_by_address = maps__maps_by_address(maps);
+ unsigned int nr_maps = maps__nr_maps(maps);
+
+ if (maps_by_address && nr_maps)
+ mapp = bsearch(&ip, maps_by_address, nr_maps, sizeof(*mapp),
+ map__addr_cmp);
+ if (mapp)
+ result = map__get(*mapp);
+ done = true;
+ }
+ up_read(maps__lock(maps));
+ if (!done)
+ maps__sort_by_address(maps);
+ }
+ return result;
+}
+static int map__strcmp_name(const void *name, const void *b)
+{
+ const struct dso *dso = map__dso(*(const struct map **)b);
- down_read(maps__lock(maps));
+ return strcmp(name, dso__short_name(dso));
+}
- p = maps__entries(maps)->rb_node;
- while (p != NULL) {
- m = rb_entry(p, struct map_rb_node, rb_node);
- if (ip < map__start(m->map))
- p = p->rb_left;
- else if (ip >= map__end(m->map))
- p = p->rb_right;
- else
- goto out;
+struct map *maps__find_by_name(struct maps *maps, const char *name)
+{
+ struct map *result = NULL;
+ bool done = false;
+
+ /* See locking/sorting note. */
+ while (!done) {
+ unsigned int i;
+
+ down_read(maps__lock(maps));
+
+ /* First check last found entry. */
+ i = RC_CHK_ACCESS(maps)->last_search_by_name_idx;
+ if (i < maps__nr_maps(maps) && maps__maps_by_name(maps)) {
+ struct dso *dso = map__dso(maps__maps_by_name(maps)[i]);
+
+ if (dso && strcmp(dso__short_name(dso), name) == 0) {
+ result = map__get(maps__maps_by_name(maps)[i]);
+ done = true;
+ }
+ }
+
+ /* Second search sorted array. */
+ if (!done && maps__maps_by_name_sorted(maps)) {
+ struct map **mapp =
+ bsearch(name, maps__maps_by_name(maps), maps__nr_maps(maps),
+ sizeof(*mapp), map__strcmp_name);
+
+ if (mapp) {
+ result = map__get(*mapp);
+ i = mapp - maps__maps_by_name(maps);
+ RC_CHK_ACCESS(maps)->last_search_by_name_idx = i;
+ }
+ done = true;
+ }
+ up_read(maps__lock(maps));
+ if (!done) {
+ /* Sort and retry binary search. */
+ if (maps__sort_by_name(maps)) {
+ /*
+ * Memory allocation failed do linear search
+ * through address sorted maps.
+ */
+ struct map **maps_by_address;
+ unsigned int n;
+
+ down_read(maps__lock(maps));
+ maps_by_address = maps__maps_by_address(maps);
+ n = maps__nr_maps(maps);
+ for (i = 0; i < n; i++) {
+ struct map *pos = maps_by_address[i];
+ struct dso *dso = map__dso(pos);
+
+ if (dso && strcmp(dso__short_name(dso), name) == 0) {
+ result = map__get(pos);
+ break;
+ }
+ }
+ up_read(maps__lock(maps));
+ done = true;
+ }
+ }
+ }
+ return result;
+}
+
+struct map *maps__find_next_entry(struct maps *maps, struct map *map)
+{
+ unsigned int i;
+ struct map *result = NULL;
+
+ down_read(maps__lock(maps));
+ while (!maps__maps_by_address_sorted(maps)) {
+ up_read(maps__lock(maps));
+ maps__sort_by_address(maps);
+ down_read(maps__lock(maps));
}
+ i = maps__by_address_index(maps, map);
+ if (++i < maps__nr_maps(maps))
+ result = map__get(maps__maps_by_address(maps)[i]);
- m = NULL;
-out:
up_read(maps__lock(maps));
- return m ? m->map : NULL;
+ return result;
}
-struct map_rb_node *maps__first(struct maps *maps)
+void maps__fixup_end(struct maps *maps)
{
- struct rb_node *first = rb_first(maps__entries(maps));
+ struct map **maps_by_address;
+ unsigned int n;
- if (first)
- return rb_entry(first, struct map_rb_node, rb_node);
- return NULL;
+ down_write(maps__lock(maps));
+ if (!maps__maps_by_address_sorted(maps))
+ __maps__sort_by_address(maps);
+
+ maps_by_address = maps__maps_by_address(maps);
+ n = maps__nr_maps(maps);
+ for (unsigned int i = 1; i < n; i++) {
+ struct map *prev = maps_by_address[i - 1];
+ struct map *curr = maps_by_address[i];
+
+ if (!map__end(prev) || map__end(prev) > map__start(curr))
+ map__set_end(prev, map__start(curr));
+ }
+
+ /*
+ * We still haven't the actual symbols, so guess the
+ * last map final address.
+ */
+ if (n > 0 && !map__end(maps_by_address[n - 1]))
+ map__set_end(maps_by_address[n - 1], ~0ULL);
+
+ RC_CHK_ACCESS(maps)->ends_broken = false;
+ check_invariants(maps);
+
+ up_write(maps__lock(maps));
}
-struct map_rb_node *map_rb_node__next(struct map_rb_node *node)
+/*
+ * Merges map into maps by splitting the new map within the existing map
+ * regions.
+ */
+int maps__merge_in(struct maps *kmaps, struct map *new_map)
{
- struct rb_node *next;
+ unsigned int first_after_, kmaps__nr_maps;
+ struct map **kmaps_maps_by_address;
+ struct map **merged_maps_by_address;
+ unsigned int merged_nr_maps_allocated;
+
+ /* First try under a read lock. */
+ while (true) {
+ down_read(maps__lock(kmaps));
+ if (maps__maps_by_address_sorted(kmaps))
+ break;
+
+ up_read(maps__lock(kmaps));
+
+ /* First after binary search requires sorted maps. Sort and try again. */
+ maps__sort_by_address(kmaps);
+ }
+ first_after_ = first_ending_after(kmaps, new_map);
+ kmaps_maps_by_address = maps__maps_by_address(kmaps);
+
+ if (first_after_ >= maps__nr_maps(kmaps) ||
+ map__start(kmaps_maps_by_address[first_after_]) >= map__end(new_map)) {
+ /* No overlap so regular insert suffices. */
+ up_read(maps__lock(kmaps));
+ return maps__insert(kmaps, new_map);
+ }
+ up_read(maps__lock(kmaps));
- if (!node)
- return NULL;
+ /* Plain insert with a read-lock failed, try again now with the write lock. */
+ down_write(maps__lock(kmaps));
+ if (!maps__maps_by_address_sorted(kmaps))
+ __maps__sort_by_address(kmaps);
+
+ first_after_ = first_ending_after(kmaps, new_map);
+ kmaps_maps_by_address = maps__maps_by_address(kmaps);
+ kmaps__nr_maps = maps__nr_maps(kmaps);
+
+ if (first_after_ >= kmaps__nr_maps ||
+ map__start(kmaps_maps_by_address[first_after_]) >= map__end(new_map)) {
+ /* No overlap so regular insert suffices. */
+ int ret = __maps__insert(kmaps, new_map);
+
+ check_invariants(kmaps);
+ up_write(maps__lock(kmaps));
+ return ret;
+ }
+ /* Array to merge into, possibly 1 more for the sake of new_map. */
+ merged_nr_maps_allocated = RC_CHK_ACCESS(kmaps)->nr_maps_allocated;
+ if (kmaps__nr_maps + 1 == merged_nr_maps_allocated)
+ merged_nr_maps_allocated++;
+
+ merged_maps_by_address = malloc(merged_nr_maps_allocated * sizeof(*merged_maps_by_address));
+ if (!merged_maps_by_address) {
+ up_write(maps__lock(kmaps));
+ return -ENOMEM;
+ }
+ maps__set_maps_by_address(kmaps, merged_maps_by_address);
+ maps__set_maps_by_address_sorted(kmaps, true);
+ __maps__free_maps_by_name(kmaps);
+ maps__set_nr_maps_allocated(kmaps, merged_nr_maps_allocated);
- next = rb_next(&node->rb_node);
+ /* Copy entries before the new_map that can't overlap. */
+ for (unsigned int i = 0; i < first_after_; i++)
+ merged_maps_by_address[i] = map__get(kmaps_maps_by_address[i]);
- if (!next)
- return NULL;
+ maps__set_nr_maps(kmaps, first_after_);
- return rb_entry(next, struct map_rb_node, rb_node);
+ /* Add the new map, it will be split when the later overlapping mappings are added. */
+ __maps__insert(kmaps, new_map);
+
+ /* Insert mappings after new_map, splitting new_map in the process. */
+ for (unsigned int i = first_after_; i < kmaps__nr_maps; i++)
+ __maps__fixup_overlap_and_insert(kmaps, kmaps_maps_by_address[i]);
+
+ /* Copy the maps from merged into kmaps. */
+ for (unsigned int i = 0; i < kmaps__nr_maps; i++)
+ map__zput(kmaps_maps_by_address[i]);
+
+ free(kmaps_maps_by_address);
+ check_invariants(kmaps);
+ up_write(maps__lock(kmaps));
+ return 0;
+}
+
+void maps__load_first(struct maps *maps)
+{
+ down_read(maps__lock(maps));
+
+ if (maps__nr_maps(maps) > 0)
+ map__load(maps__maps_by_address(maps)[0]);
+
+ up_read(maps__lock(maps));
}