summaryrefslogtreecommitdiff
path: root/tools/perf/builtin-lock.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/builtin-lock.c')
-rw-r--r--tools/perf/builtin-lock.c582
1 files changed, 368 insertions, 214 deletions
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
index c15386cb1033..e8962c985d34 100644
--- a/tools/perf/builtin-lock.c
+++ b/tools/perf/builtin-lock.c
@@ -10,6 +10,7 @@
#include "util/thread.h"
#include "util/header.h"
#include "util/target.h"
+#include "util/cgroup.h"
#include "util/callchain.h"
#include "util/lock-contention.h"
#include "util/bpf_skel/lock_data.h"
@@ -45,44 +46,27 @@
static struct perf_session *session;
static struct target target;
-/* based on kernel/lockdep.c */
-#define LOCKHASH_BITS 12
-#define LOCKHASH_SIZE (1UL << LOCKHASH_BITS)
-
-static struct hlist_head *lockhash_table;
-
-#define __lockhashfn(key) hash_long((unsigned long)key, LOCKHASH_BITS)
-#define lockhashentry(key) (lockhash_table + __lockhashfn((key)))
-
static struct rb_root thread_stats;
static bool combine_locks;
static bool show_thread_stats;
static bool show_lock_addrs;
static bool show_lock_owner;
+static bool show_lock_cgroups;
static bool use_bpf;
static unsigned long bpf_map_entries = MAX_ENTRIES;
static int max_stack_depth = CONTENTION_STACK_DEPTH;
static int stack_skip = CONTENTION_STACK_SKIP;
static int print_nr_entries = INT_MAX / 2;
-static LIST_HEAD(callstack_filters);
static const char *output_name = NULL;
static FILE *lock_output;
-struct callstack_filter {
- struct list_head list;
- char name[];
-};
-
static struct lock_filter filters;
+static struct lock_delay *delays;
+static int nr_delays;
static enum lock_aggr_mode aggr_mode = LOCK_AGGR_ADDR;
-static bool needs_callstack(void)
-{
- return !list_empty(&callstack_filters);
-}
-
static struct thread_stat *thread_stat_find(u32 tid)
{
struct rb_node *node;
@@ -436,16 +420,13 @@ static void combine_lock_stats(struct lock_stat *st)
rb_insert_color(&st->rb, &sorted);
}
-static void insert_to_result(struct lock_stat *st,
- int (*bigger)(struct lock_stat *, struct lock_stat *))
+static void insert_to(struct rb_root *rr, struct lock_stat *st,
+ int (*bigger)(struct lock_stat *, struct lock_stat *))
{
- struct rb_node **rb = &result.rb_node;
+ struct rb_node **rb = &rr->rb_node;
struct rb_node *parent = NULL;
struct lock_stat *p;
- if (combine_locks && st->combined)
- return;
-
while (*rb) {
p = container_of(*rb, struct lock_stat, rb);
parent = *rb;
@@ -457,13 +438,21 @@ static void insert_to_result(struct lock_stat *st,
}
rb_link_node(&st->rb, parent, rb);
- rb_insert_color(&st->rb, &result);
+ rb_insert_color(&st->rb, rr);
}
-/* returns left most element of result, and erase it */
-static struct lock_stat *pop_from_result(void)
+static inline void insert_to_result(struct lock_stat *st,
+ int (*bigger)(struct lock_stat *,
+ struct lock_stat *))
+{
+ if (combine_locks && st->combined)
+ return;
+ insert_to(&result, st, bigger);
+}
+
+static inline struct lock_stat *pop_from(struct rb_root *rr)
{
- struct rb_node *node = result.rb_node;
+ struct rb_node *node = rr->rb_node;
if (!node)
return NULL;
@@ -471,80 +460,15 @@ static struct lock_stat *pop_from_result(void)
while (node->rb_left)
node = node->rb_left;
- rb_erase(node, &result);
+ rb_erase(node, rr);
return container_of(node, struct lock_stat, rb);
-}
-struct lock_stat *lock_stat_find(u64 addr)
-{
- struct hlist_head *entry = lockhashentry(addr);
- struct lock_stat *ret;
-
- hlist_for_each_entry(ret, entry, hash_entry) {
- if (ret->addr == addr)
- return ret;
- }
- return NULL;
}
-struct lock_stat *lock_stat_findnew(u64 addr, const char *name, int flags)
-{
- struct hlist_head *entry = lockhashentry(addr);
- struct lock_stat *ret, *new;
-
- hlist_for_each_entry(ret, entry, hash_entry) {
- if (ret->addr == addr)
- return ret;
- }
-
- new = zalloc(sizeof(struct lock_stat));
- if (!new)
- goto alloc_failed;
-
- new->addr = addr;
- new->name = strdup(name);
- if (!new->name) {
- free(new);
- goto alloc_failed;
- }
-
- new->flags = flags;
- new->wait_time_min = ULLONG_MAX;
-
- hlist_add_head(&new->hash_entry, entry);
- return new;
-
-alloc_failed:
- pr_err("memory allocation failed\n");
- return NULL;
-}
-
-bool match_callstack_filter(struct machine *machine, u64 *callstack)
+/* returns left most element of result, and erase it */
+static struct lock_stat *pop_from_result(void)
{
- struct map *kmap;
- struct symbol *sym;
- u64 ip;
-
- if (list_empty(&callstack_filters))
- return true;
-
- for (int i = 0; i < max_stack_depth; i++) {
- struct callstack_filter *filter;
-
- if (!callstack || !callstack[i])
- break;
-
- ip = callstack[i];
- sym = machine__find_kernel_symbol(machine, ip, &kmap);
- if (sym == NULL)
- continue;
-
- list_for_each_entry(filter, &callstack_filters, list) {
- if (strstr(sym->name, filter->name))
- return true;
- }
- }
- return false;
+ return pop_from(&result);
}
struct trace_lock_handler {
@@ -619,6 +543,7 @@ static int get_key_by_aggr_mode_simple(u64 *key, u64 addr, u32 tid)
*key = tid;
break;
case LOCK_AGGR_CALLER:
+ case LOCK_AGGR_CGROUP:
default:
pr_err("Invalid aggregation mode: %d\n", aggr_mode);
return -EINVAL;
@@ -1103,6 +1028,7 @@ static int report_lock_contention_begin_event(struct evsel *evsel,
if (lock_contention_caller(evsel, sample, buf, sizeof(buf)) < 0)
name = "Unknown";
break;
+ case LOCK_AGGR_CGROUP:
case LOCK_AGGR_TASK:
default:
break;
@@ -1146,7 +1072,7 @@ static int report_lock_contention_begin_event(struct evsel *evsel,
if (callstack == NULL)
return -ENOMEM;
- if (!match_callstack_filter(machine, callstack)) {
+ if (!match_callstack_filter(machine, callstack, max_stack_depth)) {
free(callstack);
return 0;
}
@@ -1458,20 +1384,16 @@ static void dump_map(void)
fprintf(lock_output, " %#llx: %s\n", (unsigned long long)st->addr, st->name);
}
-static int dump_info(void)
+static void dump_info(void)
{
- int rc = 0;
-
if (info_threads)
dump_threads();
- else if (info_map)
+
+ if (info_map) {
+ if (info_threads)
+ fputc('\n', lock_output);
dump_map();
- else {
- rc = -1;
- pr_err("Unknown type of information\n");
}
-
- return rc;
}
static const struct evsel_str_handler lock_tracepoints[] = {
@@ -1486,7 +1408,7 @@ static const struct evsel_str_handler contention_tracepoints[] = {
{ "lock:contention_end", evsel__process_contention_end, },
};
-static int process_event_update(struct perf_tool *tool,
+static int process_event_update(const struct perf_tool *tool,
union perf_event *event,
struct evlist **pevlist)
{
@@ -1505,7 +1427,7 @@ static int process_event_update(struct perf_tool *tool,
typedef int (*tracepoint_handler)(struct evsel *evsel,
struct perf_sample *sample);
-static int process_sample_event(struct perf_tool *tool __maybe_unused,
+static int process_sample_event(const struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample,
struct evsel *evsel,
@@ -1560,8 +1482,13 @@ static void sort_result(void)
static const struct {
unsigned int flags;
- const char *str;
- const char *name;
+ /*
+ * Name of the lock flags (access), with delimeter ':'.
+ * For example, rwsem:R of rwsem:W.
+ */
+ const char *flags_name;
+ /* Name of the lock (type), for example, rwlock or rwsem. */
+ const char *lock_name;
} lock_type_table[] = {
{ 0, "semaphore", "semaphore" },
{ LCB_F_SPIN, "spinlock", "spinlock" },
@@ -1576,45 +1503,32 @@ static const struct {
{ LCB_F_PERCPU | LCB_F_WRITE, "pcpu-sem:W", "percpu-rwsem" },
{ LCB_F_MUTEX, "mutex", "mutex" },
{ LCB_F_MUTEX | LCB_F_SPIN, "mutex", "mutex" },
- /* alias for get_type_flag() */
- { LCB_F_MUTEX | LCB_F_SPIN, "mutex-spin", "mutex" },
+ /* alias for optimistic spinning only */
+ { LCB_F_MUTEX | LCB_F_SPIN, "mutex:spin", "mutex-spin" },
};
-static const char *get_type_str(unsigned int flags)
+static const char *get_type_flags_name(unsigned int flags)
{
- flags &= LCB_F_MAX_FLAGS - 1;
+ flags &= LCB_F_TYPE_MASK;
for (unsigned int i = 0; i < ARRAY_SIZE(lock_type_table); i++) {
if (lock_type_table[i].flags == flags)
- return lock_type_table[i].str;
+ return lock_type_table[i].flags_name;
}
return "unknown";
}
-static const char *get_type_name(unsigned int flags)
+static const char *get_type_lock_name(unsigned int flags)
{
- flags &= LCB_F_MAX_FLAGS - 1;
+ flags &= LCB_F_TYPE_MASK;
for (unsigned int i = 0; i < ARRAY_SIZE(lock_type_table); i++) {
if (lock_type_table[i].flags == flags)
- return lock_type_table[i].name;
+ return lock_type_table[i].lock_name;
}
return "unknown";
}
-static unsigned int get_type_flag(const char *str)
-{
- for (unsigned int i = 0; i < ARRAY_SIZE(lock_type_table); i++) {
- if (!strcmp(lock_type_table[i].name, str))
- return lock_type_table[i].flags;
- }
- for (unsigned int i = 0; i < ARRAY_SIZE(lock_type_table); i++) {
- if (!strcmp(lock_type_table[i].str, str))
- return lock_type_table[i].flags;
- }
- return UINT_MAX;
-}
-
static void lock_filter_finish(void)
{
zfree(&filters.types);
@@ -1628,6 +1542,15 @@ static void lock_filter_finish(void)
zfree(&filters.syms);
filters.nr_syms = 0;
+
+ zfree(&filters.cgrps);
+ filters.nr_cgrps = 0;
+
+ for (int i = 0; i < filters.nr_slabs; i++)
+ free(filters.slabs[i]);
+
+ zfree(&filters.slabs);
+ filters.nr_slabs = 0;
}
static void sort_contention_result(void)
@@ -1653,6 +1576,9 @@ static void print_header_stdio(void)
case LOCK_AGGR_ADDR:
fprintf(lock_output, " %16s %s\n\n", "address", "symbol");
break;
+ case LOCK_AGGR_CGROUP:
+ fprintf(lock_output, " %s\n\n", "cgroup");
+ break;
default:
break;
}
@@ -1680,6 +1606,9 @@ static void print_header_csv(const char *sep)
case LOCK_AGGR_ADDR:
fprintf(lock_output, "%s%s %s%s %s\n", "address", sep, "symbol", sep, "type");
break;
+ case LOCK_AGGR_CGROUP:
+ fprintf(lock_output, "%s\n", "cgroup");
+ break;
default:
break;
}
@@ -1708,7 +1637,7 @@ static void print_lock_stat_stdio(struct lock_contention *con, struct lock_stat
switch (aggr_mode) {
case LOCK_AGGR_CALLER:
- fprintf(lock_output, " %10s %s\n", get_type_str(st->flags), st->name);
+ fprintf(lock_output, " %10s %s\n", get_type_flags_name(st->flags), st->name);
break;
case LOCK_AGGR_TASK:
pid = st->addr;
@@ -1718,7 +1647,10 @@ static void print_lock_stat_stdio(struct lock_contention *con, struct lock_stat
break;
case LOCK_AGGR_ADDR:
fprintf(lock_output, " %016llx %s (%s)\n", (unsigned long long)st->addr,
- st->name, get_type_name(st->flags));
+ st->name, get_type_lock_name(st->flags));
+ break;
+ case LOCK_AGGR_CGROUP:
+ fprintf(lock_output, " %s\n", st->name);
break;
default:
break;
@@ -1756,7 +1688,7 @@ static void print_lock_stat_csv(struct lock_contention *con, struct lock_stat *s
switch (aggr_mode) {
case LOCK_AGGR_CALLER:
- fprintf(lock_output, "%s%s %s", get_type_str(st->flags), sep, st->name);
+ fprintf(lock_output, "%s%s %s", get_type_flags_name(st->flags), sep, st->name);
if (verbose <= 0)
fprintf(lock_output, "\n");
break;
@@ -1768,7 +1700,10 @@ static void print_lock_stat_csv(struct lock_contention *con, struct lock_stat *s
break;
case LOCK_AGGR_ADDR:
fprintf(lock_output, "%llx%s %s%s %s\n", (unsigned long long)st->addr, sep,
- st->name, sep, get_type_name(st->flags));
+ st->name, sep, get_type_lock_name(st->flags));
+ break;
+ case LOCK_AGGR_CGROUP:
+ fprintf(lock_output, "%s\n",st->name);
break;
default:
break;
@@ -1884,6 +1819,22 @@ static void print_contention_result(struct lock_contention *con)
break;
}
+ if (con->owner && con->save_callstack && verbose > 0) {
+ struct rb_root root = RB_ROOT;
+
+ if (symbol_conf.field_sep)
+ fprintf(lock_output, "# owner stack trace:\n");
+ else
+ fprintf(lock_output, "\n=== owner stack trace ===\n\n");
+ while ((st = pop_owner_stack_trace(con)))
+ insert_to(&root, st, compare);
+
+ while ((st = pop_from(&root))) {
+ print_lock_stat(con, st);
+ free(st);
+ }
+ }
+
if (print_nr_entries) {
/* update the total/bad stats */
while ((st = pop_from_result())) {
@@ -1903,22 +1854,22 @@ static bool force;
static int __cmd_report(bool display_info)
{
int err = -EINVAL;
- struct perf_tool eops = {
- .attr = perf_event__process_attr,
- .event_update = process_event_update,
- .sample = process_sample_event,
- .comm = perf_event__process_comm,
- .mmap = perf_event__process_mmap,
- .namespaces = perf_event__process_namespaces,
- .tracing_data = perf_event__process_tracing_data,
- .ordered_events = true,
- };
+ struct perf_tool eops;
struct perf_data data = {
.path = input_name,
.mode = PERF_DATA_MODE_READ,
.force = force,
};
+ perf_tool__init(&eops, /*ordered_events=*/true);
+ eops.attr = perf_event__process_attr;
+ eops.event_update = process_event_update;
+ eops.sample = process_sample_event;
+ eops.comm = perf_event__process_comm;
+ eops.mmap = perf_event__process_mmap;
+ eops.mmap2 = perf_event__process_mmap2;
+ eops.namespaces = perf_event__process_namespaces;
+ eops.tracing_data = perf_event__process_tracing_data;
session = perf_session__new(&data, &eops);
if (IS_ERR(session)) {
pr_err("Initializing perf session failed\n");
@@ -1926,7 +1877,7 @@ static int __cmd_report(bool display_info)
}
symbol_conf.allow_aliases = true;
- symbol__init(&session->header.env);
+ symbol__init(perf_session__env(session));
if (!data.is_pipe) {
if (!perf_session__has_traces(session, "lock record"))
@@ -1958,7 +1909,7 @@ static int __cmd_report(bool display_info)
setup_pager();
if (display_info) /* used for info subcommand */
- err = dump_info();
+ dump_info();
else {
combine_result();
sort_result();
@@ -1999,6 +1950,27 @@ static int check_lock_contention_options(const struct option *options,
return -1;
}
+ if (show_lock_cgroups && !use_bpf) {
+ pr_err("Cgroups are available only with BPF\n");
+ parse_options_usage(usage, options, "lock-cgroup", 0);
+ parse_options_usage(NULL, options, "use-bpf", 0);
+ return -1;
+ }
+
+ if (show_lock_cgroups && show_lock_addrs) {
+ pr_err("Cannot use cgroup and addr mode together\n");
+ parse_options_usage(usage, options, "lock-cgroup", 0);
+ parse_options_usage(NULL, options, "lock-addr", 0);
+ return -1;
+ }
+
+ if (show_lock_cgroups && show_thread_stats) {
+ pr_err("Cannot use cgroup and thread mode together\n");
+ parse_options_usage(usage, options, "lock-cgroup", 0);
+ parse_options_usage(NULL, options, "threads", 0);
+ return -1;
+ }
+
if (symbol_conf.field_sep) {
if (strstr(symbol_conf.field_sep, ":") || /* part of type flags */
strstr(symbol_conf.field_sep, "+") || /* part of caller offset */
@@ -2009,8 +1981,10 @@ static int check_lock_contention_options(const struct option *options,
}
}
- if (show_lock_owner)
- show_thread_stats = true;
+ if (show_lock_owner && !show_thread_stats) {
+ pr_warning("Now -o try to show owner's callstack instead of pid and comm.\n");
+ pr_warning("Please use -t option too to keep the old behavior.\n");
+ }
return 0;
}
@@ -2018,15 +1992,7 @@ static int check_lock_contention_options(const struct option *options,
static int __cmd_contention(int argc, const char **argv)
{
int err = -EINVAL;
- struct perf_tool eops = {
- .attr = perf_event__process_attr,
- .event_update = process_event_update,
- .sample = process_sample_event,
- .comm = perf_event__process_comm,
- .mmap = perf_event__process_mmap,
- .tracing_data = perf_event__process_tracing_data,
- .ordered_events = true,
- };
+ struct perf_tool eops;
struct perf_data data = {
.path = input_name,
.mode = PERF_DATA_MODE_READ,
@@ -2038,9 +2004,13 @@ static int __cmd_contention(int argc, const char **argv)
.max_stack = max_stack_depth,
.stack_skip = stack_skip,
.filters = &filters,
+ .delays = delays,
+ .nr_delays = nr_delays,
.save_callstack = needs_callstack(),
.owner = show_lock_owner,
+ .cgroups = RB_ROOT,
};
+ struct perf_env host_env;
lockhash_table = calloc(LOCKHASH_SIZE, sizeof(*lockhash_table));
if (!lockhash_table)
@@ -2048,23 +2018,37 @@ static int __cmd_contention(int argc, const char **argv)
con.result = &lockhash_table[0];
- session = perf_session__new(use_bpf ? NULL : &data, &eops);
+ perf_tool__init(&eops, /*ordered_events=*/true);
+ eops.attr = perf_event__process_attr;
+ eops.event_update = process_event_update;
+ eops.sample = process_sample_event;
+ eops.comm = perf_event__process_comm;
+ eops.mmap = perf_event__process_mmap;
+ eops.mmap2 = perf_event__process_mmap2;
+ eops.tracing_data = perf_event__process_tracing_data;
+
+ perf_env__init(&host_env);
+ session = __perf_session__new(use_bpf ? NULL : &data, &eops,
+ /*trace_event_repipe=*/false, &host_env);
+
if (IS_ERR(session)) {
pr_err("Initializing perf session failed\n");
err = PTR_ERR(session);
+ session = NULL;
goto out_delete;
}
con.machine = &session->machines.host;
con.aggr_mode = aggr_mode = show_thread_stats ? LOCK_AGGR_TASK :
- show_lock_addrs ? LOCK_AGGR_ADDR : LOCK_AGGR_CALLER;
+ show_lock_addrs ? LOCK_AGGR_ADDR :
+ show_lock_cgroups ? LOCK_AGGR_CGROUP : LOCK_AGGR_CALLER;
if (con.aggr_mode == LOCK_AGGR_CALLER)
con.save_callstack = true;
symbol_conf.allow_aliases = true;
- symbol__init(&session->header.env);
+ symbol__init(perf_session__env(session));
if (use_bpf) {
err = target__validate(&target);
@@ -2097,7 +2081,8 @@ static int __cmd_contention(int argc, const char **argv)
goto out_delete;
}
- if (lock_contention_prepare(&con) < 0) {
+ err = lock_contention_prepare(&con);
+ if (err < 0) {
pr_err("lock contention BPF setup failed\n");
goto out_delete;
}
@@ -2118,10 +2103,14 @@ static int __cmd_contention(int argc, const char **argv)
}
}
- if (setup_output_field(true, output_fields))
+ err = setup_output_field(true, output_fields);
+ if (err) {
+ pr_err("Failed to setup output field\n");
goto out_delete;
+ }
- if (select_key(true))
+ err = select_key(true);
+ if (err)
goto out_delete;
if (symbol_conf.field_sep) {
@@ -2157,8 +2146,9 @@ static int __cmd_contention(int argc, const char **argv)
out_delete:
lock_filter_finish();
evlist__delete(con.evlist);
- lock_contention_finish();
+ lock_contention_finish(&con);
perf_session__delete(session);
+ perf_env__exit(&host_env);
zfree(&lockhash_table);
return err;
}
@@ -2217,21 +2207,13 @@ setup_args:
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(record_args); i++)
- rec_argv[i] = strdup(record_args[i]);
+ rec_argv[i] = record_args[i];
for (j = 0; j < nr_tracepoints; j++) {
- const char *ev_name;
-
- if (has_lock_stat)
- ev_name = strdup(lock_tracepoints[j].name);
- else
- ev_name = strdup(contention_tracepoints[j].name);
-
- if (!ev_name)
- return -ENOMEM;
-
rec_argv[i++] = "-e";
- rec_argv[i++] = ev_name;
+ rec_argv[i++] = has_lock_stat
+ ? lock_tracepoints[j].name
+ : contention_tracepoints[j].name;
}
for (j = 0; j < nr_callgraph_args; j++, i++)
@@ -2305,29 +2287,61 @@ static int parse_lock_type(const struct option *opt __maybe_unused, const char *
int unset __maybe_unused)
{
char *s, *tmp, *tok;
- int ret = 0;
s = strdup(str);
if (s == NULL)
return -1;
for (tok = strtok_r(s, ", ", &tmp); tok; tok = strtok_r(NULL, ", ", &tmp)) {
- unsigned int flags = get_type_flag(tok);
+ bool found = false;
- if (flags == -1U) {
- pr_err("Unknown lock flags: %s\n", tok);
- ret = -1;
- break;
+ /* `tok` is a flags name if it contains ':'. */
+ if (strchr(tok, ':')) {
+ for (unsigned int i = 0; i < ARRAY_SIZE(lock_type_table); i++) {
+ if (!strcmp(lock_type_table[i].flags_name, tok) &&
+ add_lock_type(lock_type_table[i].flags)) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ pr_err("Unknown lock flags name: %s\n", tok);
+ free(s);
+ return -1;
+ }
+
+ continue;
}
- if (!add_lock_type(flags)) {
- ret = -1;
- break;
+ /*
+ * Otherwise `tok` is a lock name.
+ * Single lock name could contain multiple flags.
+ * Replace alias `pcpu-sem` with actual name `percpu-rwsem.
+ */
+ if (!strcmp(tok, "pcpu-sem"))
+ tok = (char *)"percpu-rwsem";
+ for (unsigned int i = 0; i < ARRAY_SIZE(lock_type_table); i++) {
+ if (!strcmp(lock_type_table[i].lock_name, tok)) {
+ if (add_lock_type(lock_type_table[i].flags)) {
+ found = true;
+ } else {
+ free(s);
+ return -1;
+ }
+ }
+ }
+
+ if (!found) {
+ pr_err("Unknown lock name: %s\n", tok);
+ free(s);
+ return -1;
}
+
}
free(s);
- return ret;
+ return 0;
}
static bool add_lock_addr(unsigned long addr)
@@ -2367,6 +2381,27 @@ static bool add_lock_sym(char *name)
return true;
}
+static bool add_lock_slab(char *name)
+{
+ char **tmp;
+ char *sym = strdup(name);
+
+ if (sym == NULL) {
+ pr_err("Memory allocation failure\n");
+ return false;
+ }
+
+ tmp = realloc(filters.slabs, (filters.nr_slabs + 1) * sizeof(*filters.slabs));
+ if (tmp == NULL) {
+ pr_err("Memory allocation failure\n");
+ return false;
+ }
+
+ tmp[filters.nr_slabs++] = sym;
+ filters.slabs = tmp;
+ return true;
+}
+
static int parse_lock_addr(const struct option *opt __maybe_unused, const char *str,
int unset __maybe_unused)
{
@@ -2390,6 +2425,14 @@ static int parse_lock_addr(const struct option *opt __maybe_unused, const char *
continue;
}
+ if (*tok == '&') {
+ if (!add_lock_slab(tok + 1)) {
+ ret = -1;
+ break;
+ }
+ continue;
+ }
+
/*
* At this moment, we don't have kernel symbols. Save the symbols
* in a separate list and resolve them to addresses later.
@@ -2404,8 +2447,55 @@ static int parse_lock_addr(const struct option *opt __maybe_unused, const char *
return ret;
}
-static int parse_call_stack(const struct option *opt __maybe_unused, const char *str,
- int unset __maybe_unused)
+static int parse_output(const struct option *opt __maybe_unused, const char *str,
+ int unset __maybe_unused)
+{
+ const char **name = (const char **)opt->value;
+
+ if (str == NULL)
+ return -1;
+
+ lock_output = fopen(str, "w");
+ if (lock_output == NULL) {
+ pr_err("Cannot open %s\n", str);
+ return -1;
+ }
+
+ *name = str;
+ return 0;
+}
+
+static bool add_lock_cgroup(char *name)
+{
+ u64 *tmp;
+ struct cgroup *cgrp;
+
+ cgrp = cgroup__new(name, /*do_open=*/false);
+ if (cgrp == NULL) {
+ pr_err("Failed to create cgroup: %s\n", name);
+ return false;
+ }
+
+ if (read_cgroup_id(cgrp) < 0) {
+ pr_err("Failed to read cgroup id for %s\n", name);
+ cgroup__put(cgrp);
+ return false;
+ }
+
+ tmp = realloc(filters.cgrps, (filters.nr_cgrps + 1) * sizeof(*filters.cgrps));
+ if (tmp == NULL) {
+ pr_err("Memory allocation failure\n");
+ return false;
+ }
+
+ tmp[filters.nr_cgrps++] = cgrp->id;
+ filters.cgrps = tmp;
+ cgroup__put(cgrp);
+ return true;
+}
+
+static int parse_cgroup_filter(const struct option *opt __maybe_unused, const char *str,
+ int unset __maybe_unused)
{
char *s, *tmp, *tok;
int ret = 0;
@@ -2415,38 +2505,87 @@ static int parse_call_stack(const struct option *opt __maybe_unused, const char
return -1;
for (tok = strtok_r(s, ", ", &tmp); tok; tok = strtok_r(NULL, ", ", &tmp)) {
- struct callstack_filter *entry;
-
- entry = malloc(sizeof(*entry) + strlen(tok) + 1);
- if (entry == NULL) {
- pr_err("Memory allocation failure\n");
- return -1;
+ if (!add_lock_cgroup(tok)) {
+ ret = -1;
+ break;
}
-
- strcpy(entry->name, tok);
- list_add_tail(&entry->list, &callstack_filters);
}
free(s);
return ret;
}
-static int parse_output(const struct option *opt __maybe_unused, const char *str,
- int unset __maybe_unused)
+static bool add_lock_delay(char *spec)
{
- const char **name = (const char **)opt->value;
+ char *at, *pos;
+ struct lock_delay *tmp;
+ unsigned long duration;
- if (str == NULL)
- return -1;
+ at = strchr(spec, '@');
+ if (at == NULL) {
+ pr_err("lock delay should have '@' sign: %s\n", spec);
+ return false;
+ }
+ if (at == spec) {
+ pr_err("lock delay should have time before '@': %s\n", spec);
+ return false;
+ }
- lock_output = fopen(str, "w");
- if (lock_output == NULL) {
- pr_err("Cannot open %s\n", str);
+ *at = '\0';
+ duration = strtoul(spec, &pos, 0);
+ if (!strcmp(pos, "ns"))
+ duration *= 1;
+ else if (!strcmp(pos, "us"))
+ duration *= 1000;
+ else if (!strcmp(pos, "ms"))
+ duration *= 1000 * 1000;
+ else if (*pos) {
+ pr_err("invalid delay time: %s@%s\n", spec, at + 1);
+ return false;
+ }
+
+ if (duration > 10 * 1000 * 1000) {
+ pr_err("lock delay is too long: %s (> 10ms)\n", spec);
+ return false;
+ }
+
+ tmp = realloc(delays, (nr_delays + 1) * sizeof(*delays));
+ if (tmp == NULL) {
+ pr_err("Memory allocation failure\n");
+ return false;
+ }
+ delays = tmp;
+
+ delays[nr_delays].sym = strdup(at + 1);
+ if (delays[nr_delays].sym == NULL) {
+ pr_err("Memory allocation failure\n");
+ return false;
+ }
+ delays[nr_delays].time = duration;
+
+ nr_delays++;
+ return true;
+}
+
+static int parse_lock_delay(const struct option *opt __maybe_unused, const char *str,
+ int unset __maybe_unused)
+{
+ char *s, *tmp, *tok;
+ int ret = 0;
+
+ s = strdup(str);
+ if (s == NULL)
return -1;
+
+ for (tok = strtok_r(s, ", ", &tmp); tok; tok = strtok_r(NULL, ", ", &tmp)) {
+ if (!add_lock_delay(tok)) {
+ ret = -1;
+ break;
+ }
}
- *name = str;
- return 0;
+ free(s);
+ return ret;
}
int cmd_lock(int argc, const char **argv)
@@ -2467,9 +2606,9 @@ int cmd_lock(int argc, const char **argv)
const struct option info_options[] = {
OPT_BOOLEAN('t', "threads", &info_threads,
- "dump thread list in perf.data"),
+ "dump the thread list in perf.data"),
OPT_BOOLEAN('m', "map", &info_map,
- "map of lock instances (address:name table)"),
+ "dump the map of lock instances (address:name table)"),
OPT_PARENT(lock_options)
};
@@ -2506,7 +2645,7 @@ int cmd_lock(int argc, const char **argv)
OPT_CALLBACK('M', "map-nr-entries", &bpf_map_entries, "num",
"Max number of BPF map entries", parse_map_entry),
OPT_CALLBACK(0, "max-stack", &max_stack_depth, "num",
- "Set the maximum stack depth when collecting lopck contention, "
+ "Set the maximum stack depth when collecting lock contention, "
"Default: " __stringify(CONTENTION_STACK_DEPTH), parse_max_stack),
OPT_INTEGER(0, "stack-skip", &stack_skip,
"Set the number of stack depth to skip when finding a lock caller, "
@@ -2522,6 +2661,11 @@ int cmd_lock(int argc, const char **argv)
OPT_BOOLEAN('o', "lock-owner", &show_lock_owner, "show lock owners instead of waiters"),
OPT_STRING_NOEMPTY('x', "field-separator", &symbol_conf.field_sep, "separator",
"print result in CSV format with custom separator"),
+ OPT_BOOLEAN(0, "lock-cgroup", &show_lock_cgroups, "show lock stats by cgroup"),
+ OPT_CALLBACK('G', "cgroup-filter", NULL, "CGROUPS",
+ "Filter specific cgroups", parse_cgroup_filter),
+ OPT_CALLBACK('J', "inject-delay", NULL, "TIME@FUNC",
+ "Inject delays to specific locks", parse_lock_delay),
OPT_PARENT(lock_options)
};
@@ -2580,6 +2724,13 @@ int cmd_lock(int argc, const char **argv)
if (argc)
usage_with_options(info_usage, info_options);
}
+
+ /* If neither threads nor map requested, display both */
+ if (!info_threads && !info_map) {
+ info_threads = true;
+ info_map = true;
+ }
+
/* recycling report_lock_ops */
trace_handler = &report_lock_ops;
rc = __cmd_report(true);
@@ -2606,6 +2757,9 @@ int cmd_lock(int argc, const char **argv)
usage_with_options(lock_usage, lock_options);
}
+ /* free usage string allocated by parse_options_subcommand */
+ free((void *)lock_usage[0]);
+
zfree(&lockhash_table);
return rc;
}