summaryrefslogtreecommitdiff
path: root/tools/perf/util/annotate.c
diff options
context:
space:
mode:
authorIan Rogers <irogers@google.com>2023-06-14 21:07:15 -0700
committerNamhyung Kim <namhyung@kernel.org>2023-06-20 17:04:21 -0700
commit2e9f9d4a729f12b4bc3fa60406374327b1809abe (patch)
tree62b67e9e0f97568569251c7f72f25c40c65d5634 /tools/perf/util/annotate.c
parent0650b2b2e62edfa9510ba0c80f42d98c4a748b12 (diff)
perf annotation: Switch lock from a mutex to a sharded_mutex
Remove the "struct mutex lock" variable from annotation that is allocated per symbol. This removes in the region of 40 bytes per symbol allocation. Use a sharded mutex where the number of shards is set to the number of CPUs. Assuming good hashing of the annotation (done based on the pointer), this means in order to contend there needs to be more threads than CPUs, which is not currently true in any perf command. Were contention an issue it is straightforward to increase the number of shards in the mutex. On my Debian/glibc based machine, this reduces the size of struct annotation from 136 bytes to 96 bytes, or nearly 30%. Signed-off-by: Ian Rogers <irogers@google.com> Acked-by: Namhyung Kim <namhyung@kernel.org> Cc: Andres Freund <andres@anarazel.de> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Yuan Can <yuancan@huawei.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Arnaldo Carvalho de Melo <acme@kernel.org> Cc: Huacai Chen <chenhuacai@kernel.org> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Ingo Molnar <mingo@redhat.com> Link: https://lore.kernel.org/r/20230615040715.2064350-2-irogers@google.com Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Diffstat (limited to 'tools/perf/util/annotate.c')
-rw-r--r--tools/perf/util/annotate.c66
1 files changed, 57 insertions, 9 deletions
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 43865601f96c..77c816400719 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -32,6 +32,7 @@
#include "block-range.h"
#include "string2.h"
#include "util/event.h"
+#include "util/sharded_mutex.h"
#include "arch/common.h"
#include "namespaces.h"
#include <regex.h>
@@ -856,7 +857,7 @@ void symbol__annotate_zero_histograms(struct symbol *sym)
{
struct annotation *notes = symbol__annotation(sym);
- mutex_lock(&notes->lock);
+ annotation__lock(notes);
if (notes->src != NULL) {
memset(notes->src->histograms, 0,
notes->src->nr_histograms * notes->src->sizeof_sym_hist);
@@ -864,7 +865,7 @@ void symbol__annotate_zero_histograms(struct symbol *sym)
memset(notes->src->cycles_hist, 0,
symbol__size(sym) * sizeof(struct cyc_hist));
}
- mutex_unlock(&notes->lock);
+ annotation__unlock(notes);
}
static int __symbol__account_cycles(struct cyc_hist *ch,
@@ -1121,7 +1122,7 @@ void annotation__compute_ipc(struct annotation *notes, size_t size)
notes->hit_insn = 0;
notes->cover_insn = 0;
- mutex_lock(&notes->lock);
+ annotation__lock(notes);
for (offset = size - 1; offset >= 0; --offset) {
struct cyc_hist *ch;
@@ -1140,7 +1141,7 @@ void annotation__compute_ipc(struct annotation *notes, size_t size)
notes->have_cycles = true;
}
}
- mutex_unlock(&notes->lock);
+ annotation__unlock(notes);
}
int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, struct perf_sample *sample,
@@ -1291,17 +1292,64 @@ int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool r
return ins__scnprintf(&dl->ins, bf, size, &dl->ops, max_ins_name);
}
-void annotation__init(struct annotation *notes)
+void annotation__exit(struct annotation *notes)
{
- mutex_init(&notes->lock);
+ annotated_source__delete(notes->src);
}
-void annotation__exit(struct annotation *notes)
+static struct sharded_mutex *sharded_mutex;
+
+static void annotation__init_sharded_mutex(void)
{
- annotated_source__delete(notes->src);
- mutex_destroy(&notes->lock);
+ /* As many mutexes as there are CPUs. */
+ sharded_mutex = sharded_mutex__new(cpu__max_present_cpu().cpu);
+}
+
+static size_t annotation__hash(const struct annotation *notes)
+{
+ return (size_t)notes;
}
+static struct mutex *annotation__get_mutex(const struct annotation *notes)
+{
+ static pthread_once_t once = PTHREAD_ONCE_INIT;
+
+ pthread_once(&once, annotation__init_sharded_mutex);
+ if (!sharded_mutex)
+ return NULL;
+
+ return sharded_mutex__get_mutex(sharded_mutex, annotation__hash(notes));
+}
+
+void annotation__lock(struct annotation *notes)
+ NO_THREAD_SAFETY_ANALYSIS
+{
+ struct mutex *mutex = annotation__get_mutex(notes);
+
+ if (mutex)
+ mutex_lock(mutex);
+}
+
+void annotation__unlock(struct annotation *notes)
+ NO_THREAD_SAFETY_ANALYSIS
+{
+ struct mutex *mutex = annotation__get_mutex(notes);
+
+ if (mutex)
+ mutex_unlock(mutex);
+}
+
+bool annotation__trylock(struct annotation *notes)
+{
+ struct mutex *mutex = annotation__get_mutex(notes);
+
+ if (!mutex)
+ return false;
+
+ return mutex_trylock(mutex);
+}
+
+
static void annotation_line__add(struct annotation_line *al, struct list_head *head)
{
list_add_tail(&al->node, head);