summaryrefslogtreecommitdiff
path: root/tools/perf/util/synthetic-events.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/util/synthetic-events.c')
-rw-r--r--tools/perf/util/synthetic-events.c794
1 files changed, 529 insertions, 265 deletions
diff --git a/tools/perf/util/synthetic-events.c b/tools/perf/util/synthetic-events.c
index 70f095624a0b..2ba9fa25e00a 100644
--- a/tools/perf/util/synthetic-events.c
+++ b/tools/perf/util/synthetic-events.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-License-Identifier: GPL-2.0-only
#include "util/cgroup.h"
#include "util/data.h"
@@ -38,6 +38,7 @@
#include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
#include <api/fs/fs.h>
#include <api/io.h>
+#include <api/io_dir.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
@@ -47,7 +48,7 @@
unsigned int proc_map_timeout = DEFAULT_PROC_MAP_PARSE_TIMEOUT;
-int perf_tool__process_synth_event(struct perf_tool *tool,
+int perf_tool__process_synth_event(const struct perf_tool *tool,
union perf_event *event,
struct machine *machine,
perf_event__handler_t process)
@@ -187,7 +188,7 @@ static int perf_event__prepare_comm(union perf_event *event, pid_t pid, pid_t ti
return 0;
}
-pid_t perf_event__synthesize_comm(struct perf_tool *tool,
+pid_t perf_event__synthesize_comm(const struct perf_tool *tool,
union perf_event *event, pid_t pid,
perf_event__handler_t process,
struct machine *machine)
@@ -218,7 +219,7 @@ static void perf_event__get_ns_link_info(pid_t pid, const char *ns,
}
}
-int perf_event__synthesize_namespaces(struct perf_tool *tool,
+int perf_event__synthesize_namespaces(const struct perf_tool *tool,
union perf_event *event,
pid_t pid, pid_t tgid,
perf_event__handler_t process,
@@ -257,7 +258,7 @@ int perf_event__synthesize_namespaces(struct perf_tool *tool,
return 0;
}
-static int perf_event__synthesize_fork(struct perf_tool *tool,
+static int perf_event__synthesize_fork(const struct perf_tool *tool,
union perf_event *event,
pid_t pid, pid_t tgid, pid_t ppid,
perf_event__handler_t process,
@@ -364,31 +365,67 @@ static bool read_proc_maps_line(struct io *io, __u64 *start, __u64 *end,
}
static void perf_record_mmap2__read_build_id(struct perf_record_mmap2 *event,
+ struct machine *machine,
bool is_kernel)
{
- struct build_id bid;
+ struct build_id bid = { .size = 0, };
+ struct nsinfo *nsi;
+ struct nscookie nc;
+ struct dso *dso = NULL;
+ struct dso_id dso_id = dso_id_empty;
int rc;
- if (is_kernel)
+ if (is_kernel) {
rc = sysfs__read_build_id("/sys/kernel/notes", &bid);
- else
- rc = filename__read_build_id(event->filename, &bid) > 0 ? 0 : -1;
+ goto out;
+ }
+
+ if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) {
+ build_id__init(&dso_id.build_id, event->build_id, event->build_id_size);
+ } else {
+ dso_id.maj = event->maj;
+ dso_id.min = event->min;
+ dso_id.ino = event->ino;
+ dso_id.ino_generation = event->ino_generation;
+ dso_id.mmap2_valid = true;
+ dso_id.mmap2_ino_generation_valid = true;
+ }
+
+ dso = dsos__findnew_id(&machine->dsos, event->filename, &dso_id);
+ if (dso && dso__has_build_id(dso)) {
+ bid = *dso__bid(dso);
+ rc = 0;
+ goto out;
+ }
+
+ nsi = nsinfo__new(event->pid);
+ nsinfo__mountns_enter(nsi, &nc);
+ rc = filename__read_build_id(event->filename, &bid) > 0 ? 0 : -1;
+
+ nsinfo__mountns_exit(&nc);
+ nsinfo__put(nsi);
+
+out:
if (rc == 0) {
memcpy(event->build_id, bid.data, sizeof(bid.data));
event->build_id_size = (u8) bid.size;
event->header.misc |= PERF_RECORD_MISC_MMAP_BUILD_ID;
event->__reserved_1 = 0;
event->__reserved_2 = 0;
+
+ if (dso && !dso__has_build_id(dso))
+ dso__set_build_id(dso, &bid);
} else {
if (event->filename[0] == '/') {
pr_debug2("Failed to read build ID for %s\n",
event->filename);
}
}
+ dso__put(dso);
}
-int perf_event__synthesize_mmap_events(struct perf_tool *tool,
+int perf_event__synthesize_mmap_events(const struct perf_tool *tool,
union perf_event *event,
pid_t pid, pid_t tgid,
perf_event__handler_t process,
@@ -495,8 +532,8 @@ out:
event->mmap2.pid = tgid;
event->mmap2.tid = pid;
- if (symbol_conf.buildid_mmap2)
- perf_record_mmap2__read_build_id(&event->mmap2, false);
+ if (!symbol_conf.no_buildid_mmap2)
+ perf_record_mmap2__read_build_id(&event->mmap2, machine, false);
if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
rc = -1;
@@ -512,7 +549,7 @@ out:
}
#ifdef HAVE_FILE_HANDLE
-static int perf_event__synthesize_cgroup(struct perf_tool *tool,
+static int perf_event__synthesize_cgroup(const struct perf_tool *tool,
union perf_event *event,
char *path, size_t mount_len,
perf_event__handler_t process,
@@ -552,7 +589,7 @@ static int perf_event__synthesize_cgroup(struct perf_tool *tool,
return 0;
}
-static int perf_event__walk_cgroup_tree(struct perf_tool *tool,
+static int perf_event__walk_cgroup_tree(const struct perf_tool *tool,
union perf_event *event,
char *path, size_t mount_len,
perf_event__handler_t process,
@@ -600,7 +637,7 @@ static int perf_event__walk_cgroup_tree(struct perf_tool *tool,
return ret;
}
-int perf_event__synthesize_cgroups(struct perf_tool *tool,
+int perf_event__synthesize_cgroups(const struct perf_tool *tool,
perf_event__handler_t process,
struct machine *machine)
{
@@ -627,7 +664,7 @@ int perf_event__synthesize_cgroups(struct perf_tool *tool,
return 0;
}
#else
-int perf_event__synthesize_cgroups(struct perf_tool *tool __maybe_unused,
+int perf_event__synthesize_cgroups(const struct perf_tool *tool __maybe_unused,
perf_event__handler_t process __maybe_unused,
struct machine *machine __maybe_unused)
{
@@ -635,18 +672,74 @@ int perf_event__synthesize_cgroups(struct perf_tool *tool __maybe_unused,
}
#endif
-int perf_event__synthesize_modules(struct perf_tool *tool, perf_event__handler_t process,
+struct perf_event__synthesize_modules_maps_cb_args {
+ const struct perf_tool *tool;
+ perf_event__handler_t process;
+ struct machine *machine;
+ union perf_event *event;
+};
+
+static int perf_event__synthesize_modules_maps_cb(struct map *map, void *data)
+{
+ struct perf_event__synthesize_modules_maps_cb_args *args = data;
+ union perf_event *event = args->event;
+ struct dso *dso;
+ size_t size;
+
+ if (!__map__is_kmodule(map))
+ return 0;
+
+ dso = map__dso(map);
+ if (!symbol_conf.no_buildid_mmap2) {
+ size = PERF_ALIGN(dso__long_name_len(dso) + 1, sizeof(u64));
+ event->mmap2.header.type = PERF_RECORD_MMAP2;
+ event->mmap2.header.size = (sizeof(event->mmap2) -
+ (sizeof(event->mmap2.filename) - size));
+ memset(event->mmap2.filename + size, 0, args->machine->id_hdr_size);
+ event->mmap2.header.size += args->machine->id_hdr_size;
+ event->mmap2.start = map__start(map);
+ event->mmap2.len = map__size(map);
+ event->mmap2.pid = args->machine->pid;
+
+ memcpy(event->mmap2.filename, dso__long_name(dso), dso__long_name_len(dso) + 1);
+
+ perf_record_mmap2__read_build_id(&event->mmap2, args->machine, false);
+ } else {
+ size = PERF_ALIGN(dso__long_name_len(dso) + 1, sizeof(u64));
+ event->mmap.header.type = PERF_RECORD_MMAP;
+ event->mmap.header.size = (sizeof(event->mmap) -
+ (sizeof(event->mmap.filename) - size));
+ memset(event->mmap.filename + size, 0, args->machine->id_hdr_size);
+ event->mmap.header.size += args->machine->id_hdr_size;
+ event->mmap.start = map__start(map);
+ event->mmap.len = map__size(map);
+ event->mmap.pid = args->machine->pid;
+
+ memcpy(event->mmap.filename, dso__long_name(dso), dso__long_name_len(dso) + 1);
+ }
+
+ if (perf_tool__process_synth_event(args->tool, event, args->machine, args->process) != 0)
+ return -1;
+
+ return 0;
+}
+
+int perf_event__synthesize_modules(const struct perf_tool *tool, perf_event__handler_t process,
struct machine *machine)
{
- int rc = 0;
- struct map *pos;
+ int rc;
struct maps *maps = machine__kernel_maps(machine);
- union perf_event *event;
- size_t size = symbol_conf.buildid_mmap2 ?
- sizeof(event->mmap2) : sizeof(event->mmap);
+ struct perf_event__synthesize_modules_maps_cb_args args = {
+ .tool = tool,
+ .process = process,
+ .machine = machine,
+ };
+ size_t size = symbol_conf.no_buildid_mmap2
+ ? sizeof(args.event->mmap)
+ : sizeof(args.event->mmap2);
- event = zalloc(size + machine->id_hdr_size);
- if (event == NULL) {
+ args.event = zalloc(size + machine->id_hdr_size);
+ if (args.event == NULL) {
pr_debug("Not enough memory synthesizing mmap event "
"for kernel modules\n");
return -1;
@@ -657,51 +750,13 @@ int perf_event__synthesize_modules(struct perf_tool *tool, perf_event__handler_t
* __perf_event_mmap
*/
if (machine__is_host(machine))
- event->header.misc = PERF_RECORD_MISC_KERNEL;
+ args.event->header.misc = PERF_RECORD_MISC_KERNEL;
else
- event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
+ args.event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
- maps__for_each_entry(maps, pos) {
- if (!__map__is_kmodule(pos))
- continue;
+ rc = maps__for_each_map(maps, perf_event__synthesize_modules_maps_cb, &args);
- if (symbol_conf.buildid_mmap2) {
- size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
- event->mmap2.header.type = PERF_RECORD_MMAP2;
- event->mmap2.header.size = (sizeof(event->mmap2) -
- (sizeof(event->mmap2.filename) - size));
- memset(event->mmap2.filename + size, 0, machine->id_hdr_size);
- event->mmap2.header.size += machine->id_hdr_size;
- event->mmap2.start = pos->start;
- event->mmap2.len = pos->end - pos->start;
- event->mmap2.pid = machine->pid;
-
- memcpy(event->mmap2.filename, pos->dso->long_name,
- pos->dso->long_name_len + 1);
-
- perf_record_mmap2__read_build_id(&event->mmap2, false);
- } else {
- size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
- event->mmap.header.type = PERF_RECORD_MMAP;
- event->mmap.header.size = (sizeof(event->mmap) -
- (sizeof(event->mmap.filename) - size));
- memset(event->mmap.filename + size, 0, machine->id_hdr_size);
- event->mmap.header.size += machine->id_hdr_size;
- event->mmap.start = pos->start;
- event->mmap.len = pos->end - pos->start;
- event->mmap.pid = machine->pid;
-
- memcpy(event->mmap.filename, pos->dso->long_name,
- pos->dso->long_name_len + 1);
- }
-
- if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
- rc = -1;
- break;
- }
- }
-
- free(event);
+ free(args.event);
return rc;
}
@@ -715,14 +770,14 @@ static int __event__synthesize_thread(union perf_event *comm_event,
union perf_event *fork_event,
union perf_event *namespaces_event,
pid_t pid, int full, perf_event__handler_t process,
- struct perf_tool *tool, struct machine *machine,
+ const struct perf_tool *tool, struct machine *machine,
bool needs_mmap, bool mmap_data)
{
char filename[PATH_MAX];
- struct dirent **dirent;
+ struct io_dir iod;
+ struct io_dirent64 *dent;
pid_t tgid, ppid;
int rc = 0;
- int i, n;
/* special case: only send one comm event using passed in pid */
if (!full) {
@@ -754,24 +809,28 @@ static int __event__synthesize_thread(union perf_event *comm_event,
snprintf(filename, sizeof(filename), "%s/proc/%d/task",
machine->root_dir, pid);
- n = scandir(filename, &dirent, filter_task, alphasort);
- if (n < 0)
- return n;
+ io_dir__init(&iod, open(filename, O_CLOEXEC | O_DIRECTORY | O_RDONLY));
+ if (iod.dirfd < 0)
+ return -1;
- for (i = 0; i < n; i++) {
+ while ((dent = io_dir__readdir(&iod)) != NULL) {
char *end;
pid_t _pid;
bool kernel_thread = false;
- _pid = strtol(dirent[i]->d_name, &end, 10);
+ if (!isdigit(dent->d_name[0]))
+ continue;
+
+ _pid = strtol(dent->d_name, &end, 10);
if (*end)
continue;
- rc = -1;
+ /* some threads may exit just after scan, ignore it */
if (perf_event__prepare_comm(comm_event, pid, _pid, machine,
&tgid, &ppid, &kernel_thread) != 0)
- break;
+ continue;
+ rc = -1;
if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
ppid, process, machine) < 0)
break;
@@ -796,14 +855,12 @@ static int __event__synthesize_thread(union perf_event *comm_event,
}
}
- for (i = 0; i < n; i++)
- zfree(&dirent[i]);
- free(dirent);
+ close(iod.dirfd);
return rc;
}
-int perf_event__synthesize_thread_map(struct perf_tool *tool,
+int perf_event__synthesize_thread_map(const struct perf_tool *tool,
struct perf_thread_map *threads,
perf_event__handler_t process,
struct machine *machine,
@@ -880,7 +937,7 @@ out:
return err;
}
-static int __perf_event__synthesize_threads(struct perf_tool *tool,
+static int __perf_event__synthesize_threads(const struct perf_tool *tool,
perf_event__handler_t process,
struct machine *machine,
bool needs_mmap,
@@ -944,7 +1001,7 @@ out:
}
struct synthesize_threads_arg {
- struct perf_tool *tool;
+ const struct perf_tool *tool;
perf_event__handler_t process;
struct machine *machine;
bool needs_mmap;
@@ -966,7 +1023,7 @@ static void *synthesize_threads_worker(void *arg)
return NULL;
}
-int perf_event__synthesize_threads(struct perf_tool *tool,
+int perf_event__synthesize_threads(const struct perf_tool *tool,
perf_event__handler_t process,
struct machine *machine,
bool needs_mmap, bool mmap_data,
@@ -987,7 +1044,7 @@ int perf_event__synthesize_threads(struct perf_tool *tool,
return 0;
snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
- n = scandir(proc_path, &dirent, filter_task, alphasort);
+ n = scandir(proc_path, &dirent, filter_task, NULL);
if (n < 0)
return err;
@@ -1006,11 +1063,11 @@ int perf_event__synthesize_threads(struct perf_tool *tool,
if (thread_nr > n)
thread_nr = n;
- synthesize_threads = calloc(sizeof(pthread_t), thread_nr);
+ synthesize_threads = calloc(thread_nr, sizeof(pthread_t));
if (synthesize_threads == NULL)
goto free_dirent;
- args = calloc(sizeof(*args), thread_nr);
+ args = calloc(thread_nr, sizeof(*args));
if (args == NULL)
goto free_threads;
@@ -1055,20 +1112,20 @@ free_dirent:
return err;
}
-int __weak perf_event__synthesize_extra_kmaps(struct perf_tool *tool __maybe_unused,
+int __weak perf_event__synthesize_extra_kmaps(const struct perf_tool *tool __maybe_unused,
perf_event__handler_t process __maybe_unused,
struct machine *machine __maybe_unused)
{
return 0;
}
-static int __perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
+static int __perf_event__synthesize_kernel_mmap(const struct perf_tool *tool,
perf_event__handler_t process,
struct machine *machine)
{
union perf_event *event;
- size_t size = symbol_conf.buildid_mmap2 ?
- sizeof(event->mmap2) : sizeof(event->mmap);
+ size_t size = symbol_conf.no_buildid_mmap2 ?
+ sizeof(event->mmap) : sizeof(event->mmap2);
struct map *map = machine__kernel_map(machine);
struct kmap *kmap;
int err;
@@ -1102,7 +1159,7 @@ static int __perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
}
- if (symbol_conf.buildid_mmap2) {
+ if (!symbol_conf.no_buildid_mmap2) {
size = snprintf(event->mmap2.filename, sizeof(event->mmap2.filename),
"%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1;
size = PERF_ALIGN(size, sizeof(u64));
@@ -1110,11 +1167,11 @@ static int __perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
event->mmap2.header.size = (sizeof(event->mmap2) -
(sizeof(event->mmap2.filename) - size) + machine->id_hdr_size);
event->mmap2.pgoff = kmap->ref_reloc_sym->addr;
- event->mmap2.start = map->start;
- event->mmap2.len = map->end - event->mmap.start;
+ event->mmap2.start = map__start(map);
+ event->mmap2.len = map__end(map) - event->mmap.start;
event->mmap2.pid = machine->pid;
- perf_record_mmap2__read_build_id(&event->mmap2, true);
+ perf_record_mmap2__read_build_id(&event->mmap2, machine, true);
} else {
size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
"%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1;
@@ -1123,8 +1180,8 @@ static int __perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
event->mmap.header.size = (sizeof(event->mmap) -
(sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
event->mmap.pgoff = kmap->ref_reloc_sym->addr;
- event->mmap.start = map->start;
- event->mmap.len = map->end - event->mmap.start;
+ event->mmap.start = map__start(map);
+ event->mmap.len = map__end(map) - event->mmap.start;
event->mmap.pid = machine->pid;
}
@@ -1134,7 +1191,7 @@ static int __perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
return err;
}
-int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
+int perf_event__synthesize_kernel_mmap(const struct perf_tool *tool,
perf_event__handler_t process,
struct machine *machine)
{
@@ -1147,7 +1204,7 @@ int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
return perf_event__synthesize_extra_kmaps(tool, process, machine);
}
-int perf_event__synthesize_thread_map2(struct perf_tool *tool,
+int perf_event__synthesize_thread_map2(const struct perf_tool *tool,
struct perf_thread_map *threads,
perf_event__handler_t process,
struct machine *machine)
@@ -1183,122 +1240,122 @@ int perf_event__synthesize_thread_map2(struct perf_tool *tool,
return err;
}
-static void synthesize_cpus(struct cpu_map_entries *cpus,
- struct perf_cpu_map *map)
-{
- int i, map_nr = perf_cpu_map__nr(map);
-
- cpus->nr = map_nr;
-
- for (i = 0; i < map_nr; i++)
- cpus->cpu[i] = perf_cpu_map__cpu(map, i).cpu;
-}
-
-static void synthesize_mask(struct perf_record_record_cpu_map *mask,
- struct perf_cpu_map *map, int max)
-{
- int i;
-
- mask->nr = BITS_TO_LONGS(max);
- mask->long_size = sizeof(long);
-
- for (i = 0; i < perf_cpu_map__nr(map); i++)
- set_bit(perf_cpu_map__cpu(map, i).cpu, mask->mask);
-}
+struct synthesize_cpu_map_data {
+ const struct perf_cpu_map *map;
+ int nr;
+ int min_cpu;
+ int max_cpu;
+ int has_any_cpu;
+ int type;
+ size_t size;
+ struct perf_record_cpu_map_data *data;
+};
-static size_t cpus_size(struct perf_cpu_map *map)
+static void synthesize_cpus(struct synthesize_cpu_map_data *data)
{
- return sizeof(struct cpu_map_entries) + perf_cpu_map__nr(map) * sizeof(u16);
+ data->data->type = PERF_CPU_MAP__CPUS;
+ data->data->cpus_data.nr = data->nr;
+ for (int i = 0; i < data->nr; i++)
+ data->data->cpus_data.cpu[i] = perf_cpu_map__cpu(data->map, i).cpu;
}
-static size_t mask_size(struct perf_cpu_map *map, int *max)
+static void synthesize_mask(struct synthesize_cpu_map_data *data)
{
- int i;
+ int idx;
+ struct perf_cpu cpu;
- *max = 0;
+ /* Due to padding, the 4bytes per entry mask variant is always smaller. */
+ data->data->type = PERF_CPU_MAP__MASK;
+ data->data->mask32_data.nr = BITS_TO_U32(data->max_cpu);
+ data->data->mask32_data.long_size = 4;
- for (i = 0; i < perf_cpu_map__nr(map); i++) {
- /* bit position of the cpu is + 1 */
- int bit = perf_cpu_map__cpu(map, i).cpu + 1;
+ perf_cpu_map__for_each_cpu(cpu, idx, data->map) {
+ int bit_word = cpu.cpu / 32;
+ u32 bit_mask = 1U << (cpu.cpu & 31);
- if (bit > *max)
- *max = bit;
+ data->data->mask32_data.mask[bit_word] |= bit_mask;
}
+}
- return sizeof(struct perf_record_record_cpu_map) + BITS_TO_LONGS(*max) * sizeof(long);
+static void synthesize_range_cpus(struct synthesize_cpu_map_data *data)
+{
+ data->data->type = PERF_CPU_MAP__RANGE_CPUS;
+ data->data->range_cpu_data.any_cpu = data->has_any_cpu;
+ data->data->range_cpu_data.start_cpu = data->min_cpu;
+ data->data->range_cpu_data.end_cpu = data->max_cpu;
}
-void *cpu_map_data__alloc(struct perf_cpu_map *map, size_t *size, u16 *type, int *max)
+static void *cpu_map_data__alloc(struct synthesize_cpu_map_data *syn_data,
+ size_t header_size)
{
size_t size_cpus, size_mask;
- bool is_dummy = perf_cpu_map__empty(map);
-
- /*
- * Both array and mask data have variable size based
- * on the number of cpus and their actual values.
- * The size of the 'struct perf_record_cpu_map_data' is:
- *
- * array = size of 'struct cpu_map_entries' +
- * number of cpus * sizeof(u64)
- *
- * mask = size of 'struct perf_record_record_cpu_map' +
- * maximum cpu bit converted to size of longs
- *
- * and finally + the size of 'struct perf_record_cpu_map_data'.
- */
- size_cpus = cpus_size(map);
- size_mask = mask_size(map, max);
- if (is_dummy || (size_cpus < size_mask)) {
- *size += size_cpus;
- *type = PERF_CPU_MAP__CPUS;
- } else {
- *size += size_mask;
- *type = PERF_CPU_MAP__MASK;
- }
-
- *size += sizeof(struct perf_record_cpu_map_data);
- *size = PERF_ALIGN(*size, sizeof(u64));
- return zalloc(*size);
+ syn_data->nr = perf_cpu_map__nr(syn_data->map);
+ syn_data->has_any_cpu = (perf_cpu_map__cpu(syn_data->map, 0).cpu == -1) ? 1 : 0;
+
+ syn_data->min_cpu = perf_cpu_map__cpu(syn_data->map, syn_data->has_any_cpu).cpu;
+ syn_data->max_cpu = perf_cpu_map__max(syn_data->map).cpu;
+ if (syn_data->max_cpu - syn_data->min_cpu + 1 == syn_data->nr - syn_data->has_any_cpu) {
+ /* A consecutive range of CPUs can be encoded using a range. */
+ assert(sizeof(u16) + sizeof(struct perf_record_range_cpu_map) == sizeof(u64));
+ syn_data->type = PERF_CPU_MAP__RANGE_CPUS;
+ syn_data->size = header_size + sizeof(u64);
+ return zalloc(syn_data->size);
+ }
+
+ size_cpus = sizeof(u16) + sizeof(struct cpu_map_entries) + syn_data->nr * sizeof(u16);
+ /* Due to padding, the 4bytes per entry mask variant is always smaller. */
+ size_mask = sizeof(u16) + sizeof(struct perf_record_mask_cpu_map32) +
+ BITS_TO_U32(syn_data->max_cpu) * sizeof(__u32);
+ if (syn_data->has_any_cpu || size_cpus < size_mask) {
+ /* Follow the CPU map encoding. */
+ syn_data->type = PERF_CPU_MAP__CPUS;
+ syn_data->size = header_size + PERF_ALIGN(size_cpus, sizeof(u64));
+ return zalloc(syn_data->size);
+ }
+ /* Encode using a bitmask. */
+ syn_data->type = PERF_CPU_MAP__MASK;
+ syn_data->size = header_size + PERF_ALIGN(size_mask, sizeof(u64));
+ return zalloc(syn_data->size);
}
-void cpu_map_data__synthesize(struct perf_record_cpu_map_data *data, struct perf_cpu_map *map,
- u16 type, int max)
+static void cpu_map_data__synthesize(struct synthesize_cpu_map_data *data)
{
- data->type = type;
-
- switch (type) {
+ switch (data->type) {
case PERF_CPU_MAP__CPUS:
- synthesize_cpus((struct cpu_map_entries *) data->data, map);
+ synthesize_cpus(data);
break;
case PERF_CPU_MAP__MASK:
- synthesize_mask((struct perf_record_record_cpu_map *)data->data, map, max);
+ synthesize_mask(data);
+ break;
+ case PERF_CPU_MAP__RANGE_CPUS:
+ synthesize_range_cpus(data);
+ break;
default:
break;
}
}
-static struct perf_record_cpu_map *cpu_map_event__new(struct perf_cpu_map *map)
+static struct perf_record_cpu_map *cpu_map_event__new(const struct perf_cpu_map *map)
{
- size_t size = sizeof(struct perf_record_cpu_map);
+ struct synthesize_cpu_map_data syn_data = { .map = map };
struct perf_record_cpu_map *event;
- int max;
- u16 type;
- event = cpu_map_data__alloc(map, &size, &type, &max);
+
+ event = cpu_map_data__alloc(&syn_data, sizeof(struct perf_event_header));
if (!event)
return NULL;
+ syn_data.data = &event->data;
event->header.type = PERF_RECORD_CPU_MAP;
- event->header.size = size;
- event->data.type = type;
-
- cpu_map_data__synthesize(&event->data, map, type, max);
+ event->header.size = syn_data.size;
+ cpu_map_data__synthesize(&syn_data);
return event;
}
-int perf_event__synthesize_cpu_map(struct perf_tool *tool,
- struct perf_cpu_map *map,
+
+int perf_event__synthesize_cpu_map(const struct perf_tool *tool,
+ const struct perf_cpu_map *map,
perf_event__handler_t process,
struct machine *machine)
{
@@ -1315,7 +1372,7 @@ int perf_event__synthesize_cpu_map(struct perf_tool *tool,
return err;
}
-int perf_event__synthesize_stat_config(struct perf_tool *tool,
+int perf_event__synthesize_stat_config(const struct perf_tool *tool,
struct perf_stat_config *config,
perf_event__handler_t process,
struct machine *machine)
@@ -1342,6 +1399,7 @@ int perf_event__synthesize_stat_config(struct perf_tool *tool,
ADD(AGGR_MODE, config->aggr_mode)
ADD(INTERVAL, config->interval)
ADD(SCALE, config->scale)
+ ADD(AGGR_LEVEL, config->aggr_level)
WARN_ONCE(i != PERF_STAT_CONFIG_TERM__MAX,
"stat config terms unbalanced\n");
@@ -1353,7 +1411,7 @@ int perf_event__synthesize_stat_config(struct perf_tool *tool,
return err;
}
-int perf_event__synthesize_stat(struct perf_tool *tool,
+int perf_event__synthesize_stat(const struct perf_tool *tool,
struct perf_cpu cpu, u32 thread, u64 id,
struct perf_counts_values *count,
perf_event__handler_t process,
@@ -1375,7 +1433,7 @@ int perf_event__synthesize_stat(struct perf_tool *tool,
return process(tool, (union perf_event *) &event, NULL, machine);
}
-int perf_event__synthesize_stat_round(struct perf_tool *tool,
+int perf_event__synthesize_stat_round(const struct perf_tool *tool,
u64 evtime, u64 type,
perf_event__handler_t process,
struct machine *machine)
@@ -1431,11 +1489,12 @@ size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
result += sizeof(u64);
/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
if (read_format & PERF_FORMAT_GROUP) {
- sz = sample->read.group.nr *
- sizeof(struct sample_read_value);
- result += sz;
+ sz = sample_read_value_size(read_format);
+ result += sz * sample->read.group.nr;
} else {
result += sizeof(u64);
+ if (read_format & PERF_FORMAT_LOST)
+ result += sizeof(u64);
}
}
@@ -1457,9 +1516,9 @@ size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
}
if (type & PERF_SAMPLE_REGS_USER) {
- if (sample->user_regs.abi) {
+ if (sample->user_regs && sample->user_regs->abi) {
result += sizeof(u64);
- sz = hweight64(sample->user_regs.mask) * sizeof(u64);
+ sz = hweight64(sample->user_regs->mask) * sizeof(u64);
result += sz;
} else {
result += sizeof(u64);
@@ -1485,9 +1544,9 @@ size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
result += sizeof(u64);
if (type & PERF_SAMPLE_REGS_INTR) {
- if (sample->intr_regs.abi) {
+ if (sample->intr_regs && sample->intr_regs->abi) {
result += sizeof(u64);
- sz = hweight64(sample->intr_regs.mask) * sizeof(u64);
+ sz = hweight64(sample->intr_regs->mask) * sizeof(u64);
result += sz;
} else {
result += sizeof(u64);
@@ -1514,10 +1573,30 @@ size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
return result;
}
-void __weak arch_perf_synthesize_sample_weight(const struct perf_sample *data,
+static void perf_synthesize_sample_weight(const struct perf_sample *data,
__u64 *array, u64 type __maybe_unused)
{
*array = data->weight;
+
+ if (type & PERF_SAMPLE_WEIGHT_STRUCT) {
+ *array &= 0xffffffff;
+ *array |= ((u64)data->ins_lat << 32);
+ *array |= ((u64)data->weight3 << 48);
+ }
+}
+
+static __u64 *copy_read_group_values(__u64 *array, __u64 read_format,
+ const struct perf_sample *sample)
+{
+ size_t sz = sample_read_value_size(read_format);
+ struct sample_read_value *v = sample->read.group.values;
+
+ sample_read_group__for_each(v, sample->read.group.nr, read_format) {
+ /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
+ memcpy(array, v, sz);
+ array = (void *)array + sz;
+ }
+ return array;
}
int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_format,
@@ -1601,13 +1680,16 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_fo
/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
if (read_format & PERF_FORMAT_GROUP) {
- sz = sample->read.group.nr *
- sizeof(struct sample_read_value);
- memcpy(array, sample->read.group.values, sz);
- array = (void *)array + sz;
+ array = copy_read_group_values(array, read_format,
+ sample);
} else {
*array = sample->read.one.id;
array++;
+
+ if (read_format & PERF_FORMAT_LOST) {
+ *array = sample->read.one.lost;
+ array++;
+ }
}
}
@@ -1618,12 +1700,16 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_fo
}
if (type & PERF_SAMPLE_RAW) {
- u.val32[0] = sample->raw_size;
- *array = u.val64;
- array = (void *)array + sizeof(u32);
+ u32 *array32 = (void *)array;
- memcpy(array, sample->raw_data, sample->raw_size);
- array = (void *)array + sample->raw_size;
+ *array32 = sample->raw_size;
+ array32++;
+
+ memcpy(array32, sample->raw_data, sample->raw_size);
+ array = (void *)(array32 + (sample->raw_size / sizeof(u32)));
+
+ /* make sure the array is 64-bit aligned */
+ BUG_ON(((long)array) % sizeof(u64));
}
if (type & PERF_SAMPLE_BRANCH_STACK) {
@@ -1635,10 +1721,10 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_fo
}
if (type & PERF_SAMPLE_REGS_USER) {
- if (sample->user_regs.abi) {
- *array++ = sample->user_regs.abi;
- sz = hweight64(sample->user_regs.mask) * sizeof(u64);
- memcpy(array, sample->user_regs.regs, sz);
+ if (sample->user_regs && sample->user_regs->abi) {
+ *array++ = sample->user_regs->abi;
+ sz = hweight64(sample->user_regs->mask) * sizeof(u64);
+ memcpy(array, sample->user_regs->regs, sz);
array = (void *)array + sz;
} else {
*array++ = 0;
@@ -1656,7 +1742,7 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_fo
}
if (type & PERF_SAMPLE_WEIGHT_TYPE) {
- arch_perf_synthesize_sample_weight(sample, array, type);
+ perf_synthesize_sample_weight(sample, array, type);
array++;
}
@@ -1671,10 +1757,10 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_fo
}
if (type & PERF_SAMPLE_REGS_INTR) {
- if (sample->intr_regs.abi) {
- *array++ = sample->intr_regs.abi;
- sz = hweight64(sample->intr_regs.mask) * sizeof(u64);
- memcpy(array, sample->intr_regs.regs, sz);
+ if (sample->intr_regs && sample->intr_regs->abi) {
+ *array++ = sample->intr_regs->abi;
+ sz = hweight64(sample->intr_regs->mask) * sizeof(u64);
+ memcpy(array, sample->intr_regs->regs, sz);
array = (void *)array + sz;
} else {
*array++ = 0;
@@ -1711,48 +1797,112 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_fo
return 0;
}
-int perf_event__synthesize_id_index(struct perf_tool *tool, perf_event__handler_t process,
- struct evlist *evlist, struct machine *machine)
+int perf_event__synthesize_id_sample(__u64 *array, u64 type, const struct perf_sample *sample)
+{
+ __u64 *start = array;
+
+ /*
+ * used for cross-endian analysis. See git commit 65014ab3
+ * for why this goofiness is needed.
+ */
+ union u64_swap u;
+
+ if (type & PERF_SAMPLE_TID) {
+ u.val32[0] = sample->pid;
+ u.val32[1] = sample->tid;
+ *array = u.val64;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_TIME) {
+ *array = sample->time;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_ID) {
+ *array = sample->id;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_STREAM_ID) {
+ *array = sample->stream_id;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_CPU) {
+ u.val32[0] = sample->cpu;
+ u.val32[1] = 0;
+ *array = u.val64;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_IDENTIFIER) {
+ *array = sample->id;
+ array++;
+ }
+
+ return (void *)array - (void *)start;
+}
+
+int __perf_event__synthesize_id_index(const struct perf_tool *tool, perf_event__handler_t process,
+ struct evlist *evlist, struct machine *machine, size_t from)
{
union perf_event *ev;
struct evsel *evsel;
- size_t nr = 0, i = 0, sz, max_nr, n;
+ size_t nr = 0, i = 0, sz, max_nr, n, pos;
+ size_t e1_sz = sizeof(struct id_index_entry);
+ size_t e2_sz = sizeof(struct id_index_entry_2);
+ size_t etot_sz = e1_sz + e2_sz;
+ bool e2_needed = false;
int err;
- pr_debug2("Synthesizing id index\n");
+ max_nr = (UINT16_MAX - sizeof(struct perf_record_id_index)) / etot_sz;
- max_nr = (UINT16_MAX - sizeof(struct perf_record_id_index)) /
- sizeof(struct id_index_entry);
-
- evlist__for_each_entry(evlist, evsel)
+ pos = 0;
+ evlist__for_each_entry(evlist, evsel) {
+ if (pos++ < from)
+ continue;
nr += evsel->core.ids;
+ }
+
+ if (!nr)
+ return 0;
+
+ pr_debug2("Synthesizing id index\n");
n = nr > max_nr ? max_nr : nr;
- sz = sizeof(struct perf_record_id_index) + n * sizeof(struct id_index_entry);
+ sz = sizeof(struct perf_record_id_index) + n * etot_sz;
ev = zalloc(sz);
if (!ev)
return -ENOMEM;
+ sz = sizeof(struct perf_record_id_index) + n * e1_sz;
+
ev->id_index.header.type = PERF_RECORD_ID_INDEX;
- ev->id_index.header.size = sz;
ev->id_index.nr = n;
+ pos = 0;
evlist__for_each_entry(evlist, evsel) {
u32 j;
- for (j = 0; j < evsel->core.ids; j++) {
+ if (pos++ < from)
+ continue;
+ for (j = 0; j < evsel->core.ids; j++, i++) {
struct id_index_entry *e;
+ struct id_index_entry_2 *e2;
struct perf_sample_id *sid;
if (i >= n) {
+ ev->id_index.header.size = sz + (e2_needed ? n * e2_sz : 0);
err = process(tool, ev, NULL, machine);
if (err)
goto out_err;
nr -= n;
i = 0;
+ e2_needed = false;
}
- e = &ev->id_index.entries[i++];
+ e = &ev->id_index.entries[i];
e->id = evsel->core.id[j];
@@ -1765,11 +1915,18 @@ int perf_event__synthesize_id_index(struct perf_tool *tool, perf_event__handler_
e->idx = sid->idx;
e->cpu = sid->cpu.cpu;
e->tid = sid->tid;
+
+ if (sid->machine_pid)
+ e2_needed = true;
+
+ e2 = (void *)ev + sz;
+ e2[i].machine_pid = sid->machine_pid;
+ e2[i].vcpu = sid->vcpu.cpu;
}
}
- sz = sizeof(struct perf_record_id_index) + nr * sizeof(struct id_index_entry);
- ev->id_index.header.size = sz;
+ sz = sizeof(struct perf_record_id_index) + nr * e1_sz;
+ ev->id_index.header.size = sz + (e2_needed ? nr * e2_sz : 0);
ev->id_index.nr = nr;
err = process(tool, ev, NULL, machine);
@@ -1779,11 +1936,36 @@ out_err:
return err;
}
-int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
+int perf_event__synthesize_id_index(const struct perf_tool *tool, perf_event__handler_t process,
+ struct evlist *evlist, struct machine *machine)
+{
+ return __perf_event__synthesize_id_index(tool, process, evlist, machine, 0);
+}
+
+int __machine__synthesize_threads(struct machine *machine, const struct perf_tool *tool,
struct target *target, struct perf_thread_map *threads,
perf_event__handler_t process, bool needs_mmap,
bool data_mmap, unsigned int nr_threads_synthesize)
{
+ /*
+ * When perf runs in non-root PID namespace, and the namespace's proc FS
+ * is not mounted, nsinfo__is_in_root_namespace() returns false.
+ * In this case, the proc FS is coming for the parent namespace, thus
+ * perf tool will wrongly gather process info from its parent PID
+ * namespace.
+ *
+ * To avoid the confusion that the perf tool runs in a child PID
+ * namespace but it synthesizes thread info from its parent PID
+ * namespace, returns failure with warning.
+ */
+ if (!nsinfo__is_in_root_namespace()) {
+ pr_err("Perf runs in non-root PID namespace but it tries to ");
+ pr_err("gather process info from its parent PID namespace.\n");
+ pr_err("Please mount the proc file system properly, e.g. ");
+ pr_err("add the option '--mount-proc' for unshare command.\n");
+ return -EPERM;
+ }
+
if (target__has_task(target))
return perf_event__synthesize_thread_map(tool, threads, process, machine,
needs_mmap, data_mmap);
@@ -1821,7 +2003,7 @@ static struct perf_record_event_update *event_update_event__new(size_t size, u64
return ev;
}
-int perf_event__synthesize_event_update_unit(struct perf_tool *tool, struct evsel *evsel,
+int perf_event__synthesize_event_update_unit(const struct perf_tool *tool, struct evsel *evsel,
perf_event__handler_t process)
{
size_t size = strlen(evsel->unit);
@@ -1832,13 +2014,13 @@ int perf_event__synthesize_event_update_unit(struct perf_tool *tool, struct evse
if (ev == NULL)
return -ENOMEM;
- strlcpy(ev->data, evsel->unit, size + 1);
+ strlcpy(ev->unit, evsel->unit, size + 1);
err = process(tool, (union perf_event *)ev, NULL, NULL);
free(ev);
return err;
}
-int perf_event__synthesize_event_update_scale(struct perf_tool *tool, struct evsel *evsel,
+int perf_event__synthesize_event_update_scale(const struct perf_tool *tool, struct evsel *evsel,
perf_event__handler_t process)
{
struct perf_record_event_update *ev;
@@ -1849,59 +2031,53 @@ int perf_event__synthesize_event_update_scale(struct perf_tool *tool, struct evs
if (ev == NULL)
return -ENOMEM;
- ev_data = (struct perf_record_event_update_scale *)ev->data;
- ev_data->scale = evsel->scale;
+ ev->scale.scale = evsel->scale;
err = process(tool, (union perf_event *)ev, NULL, NULL);
free(ev);
return err;
}
-int perf_event__synthesize_event_update_name(struct perf_tool *tool, struct evsel *evsel,
+int perf_event__synthesize_event_update_name(const struct perf_tool *tool, struct evsel *evsel,
perf_event__handler_t process)
{
struct perf_record_event_update *ev;
- size_t len = strlen(evsel->name);
+ size_t len = strlen(evsel__name(evsel));
int err;
ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->core.id[0]);
if (ev == NULL)
return -ENOMEM;
- strlcpy(ev->data, evsel->name, len + 1);
+ strlcpy(ev->name, evsel->name, len + 1);
err = process(tool, (union perf_event *)ev, NULL, NULL);
free(ev);
return err;
}
-int perf_event__synthesize_event_update_cpus(struct perf_tool *tool, struct evsel *evsel,
+int perf_event__synthesize_event_update_cpus(const struct perf_tool *tool, struct evsel *evsel,
perf_event__handler_t process)
{
- size_t size = sizeof(struct perf_record_event_update);
+ struct synthesize_cpu_map_data syn_data = { .map = evsel->core.pmu_cpus };
struct perf_record_event_update *ev;
- int max, err;
- u16 type;
-
- if (!evsel->core.own_cpus)
- return 0;
+ int err;
- ev = cpu_map_data__alloc(evsel->core.own_cpus, &size, &type, &max);
+ ev = cpu_map_data__alloc(&syn_data, sizeof(struct perf_event_header) + 2 * sizeof(u64));
if (!ev)
return -ENOMEM;
+ syn_data.data = &ev->cpus.cpus;
ev->header.type = PERF_RECORD_EVENT_UPDATE;
- ev->header.size = (u16)size;
+ ev->header.size = (u16)syn_data.size;
ev->type = PERF_EVENT_UPDATE__CPUS;
ev->id = evsel->core.id[0];
-
- cpu_map_data__synthesize((struct perf_record_cpu_map_data *)ev->data,
- evsel->core.own_cpus, type, max);
+ cpu_map_data__synthesize(&syn_data);
err = process(tool, (union perf_event *)ev, NULL, NULL);
free(ev);
return err;
}
-int perf_event__synthesize_attrs(struct perf_tool *tool, struct evlist *evlist,
+int perf_event__synthesize_attrs(const struct perf_tool *tool, struct evlist *evlist,
perf_event__handler_t process)
{
struct evsel *evsel;
@@ -1929,7 +2105,7 @@ static bool has_scale(struct evsel *evsel)
return evsel->scale != 1;
}
-int perf_event__synthesize_extra_attr(struct perf_tool *tool, struct evlist *evsel_list,
+int perf_event__synthesize_extra_attr(const struct perf_tool *tool, struct evlist *evsel_list,
perf_event__handler_t process, bool is_pipe)
{
struct evsel *evsel;
@@ -1962,7 +2138,7 @@ int perf_event__synthesize_extra_attr(struct perf_tool *tool, struct evlist *evs
}
}
- if (evsel->core.own_cpus) {
+ if (evsel->core.pmu_cpus) {
err = perf_event__synthesize_event_update_cpus(tool, evsel, process);
if (err < 0) {
pr_err("Couldn't synthesize evsel cpus.\n");
@@ -1985,7 +2161,7 @@ int perf_event__synthesize_extra_attr(struct perf_tool *tool, struct evlist *evs
return 0;
}
-int perf_event__synthesize_attr(struct perf_tool *tool, struct perf_event_attr *attr,
+int perf_event__synthesize_attr(const struct perf_tool *tool, struct perf_event_attr *attr,
u32 ids, u64 *id, perf_event__handler_t process)
{
union perf_event *ev;
@@ -2003,7 +2179,7 @@ int perf_event__synthesize_attr(struct perf_tool *tool, struct perf_event_attr *
return -ENOMEM;
ev->attr.attr = *attr;
- memcpy(ev->attr.id, id, ids * sizeof(u64));
+ memcpy(perf_record_header_attr_id(ev), id, ids * sizeof(u64));
ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
ev->attr.header.size = (u16)size;
@@ -2018,7 +2194,8 @@ int perf_event__synthesize_attr(struct perf_tool *tool, struct perf_event_attr *
return err;
}
-int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, struct evlist *evlist,
+#ifdef HAVE_LIBTRACEEVENT
+int perf_event__synthesize_tracing_data(const struct perf_tool *tool, int fd, struct evlist *evlist,
perf_event__handler_t process)
{
union perf_event ev;
@@ -2041,7 +2218,7 @@ int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, struct e
if (!tdata)
return -1;
- memset(&ev, 0, sizeof(ev));
+ memset(&ev, 0, sizeof(ev.tracing_data));
ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
size = tdata->size;
@@ -2064,31 +2241,114 @@ int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, struct e
return aligned_size;
}
+#endif
-int perf_event__synthesize_build_id(struct perf_tool *tool, struct dso *pos, u16 misc,
- perf_event__handler_t process, struct machine *machine)
+int perf_event__synthesize_build_id(const struct perf_tool *tool,
+ struct perf_sample *sample,
+ struct machine *machine,
+ perf_event__handler_t process,
+ const struct evsel *evsel,
+ __u16 misc,
+ const struct build_id *bid,
+ const char *filename)
{
union perf_event ev;
size_t len;
- if (!pos->hit)
- return 0;
+ len = sizeof(ev.build_id) + strlen(filename) + 1;
+ len = PERF_ALIGN(len, sizeof(u64));
- memset(&ev, 0, sizeof(ev));
+ memset(&ev, 0, len);
- len = pos->long_name_len + 1;
- len = PERF_ALIGN(len, NAME_ALIGN);
- memcpy(&ev.build_id.build_id, pos->bid.data, sizeof(pos->bid.data));
+ ev.build_id.size = bid->size;
+ if (ev.build_id.size > sizeof(ev.build_id.build_id))
+ ev.build_id.size = sizeof(ev.build_id.build_id);
+ memcpy(ev.build_id.build_id, bid->data, ev.build_id.size);
ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
- ev.build_id.header.misc = misc;
+ ev.build_id.header.misc = misc | PERF_RECORD_MISC_BUILD_ID_SIZE;
ev.build_id.pid = machine->pid;
- ev.build_id.header.size = sizeof(ev.build_id) + len;
- memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
+ ev.build_id.header.size = len;
+ strcpy(ev.build_id.filename, filename);
+
+ if (evsel) {
+ void *array = &ev;
+ int ret;
+
+ array += ev.header.size;
+ ret = perf_event__synthesize_id_sample(array, evsel->core.attr.sample_type, sample);
+ if (ret < 0)
+ return ret;
+
+ if (ret & 7) {
+ pr_err("Bad id sample size %d\n", ret);
+ return -EINVAL;
+ }
- return process(tool, &ev, NULL, machine);
+ ev.header.size += ret;
+ }
+
+ return process(tool, &ev, sample, machine);
}
-int perf_event__synthesize_stat_events(struct perf_stat_config *config, struct perf_tool *tool,
+int perf_event__synthesize_mmap2_build_id(const struct perf_tool *tool,
+ struct perf_sample *sample,
+ struct machine *machine,
+ perf_event__handler_t process,
+ const struct evsel *evsel,
+ __u16 misc,
+ __u32 pid, __u32 tid,
+ __u64 start, __u64 len, __u64 pgoff,
+ const struct build_id *bid,
+ __u32 prot, __u32 flags,
+ const char *filename)
+{
+ union perf_event ev;
+ size_t ev_len;
+ void *array;
+ int ret;
+
+ ev_len = sizeof(ev.mmap2) - sizeof(ev.mmap2.filename) + strlen(filename) + 1;
+ ev_len = PERF_ALIGN(ev_len, sizeof(u64));
+
+ memset(&ev, 0, ev_len);
+
+ ev.mmap2.header.type = PERF_RECORD_MMAP2;
+ ev.mmap2.header.misc = misc | PERF_RECORD_MISC_MMAP_BUILD_ID;
+ ev.mmap2.header.size = ev_len;
+
+ ev.mmap2.pid = pid;
+ ev.mmap2.tid = tid;
+ ev.mmap2.start = start;
+ ev.mmap2.len = len;
+ ev.mmap2.pgoff = pgoff;
+
+ ev.mmap2.build_id_size = bid->size;
+ if (ev.mmap2.build_id_size > sizeof(ev.mmap2.build_id))
+ ev.build_id.size = sizeof(ev.mmap2.build_id);
+ memcpy(ev.mmap2.build_id, bid->data, ev.mmap2.build_id_size);
+
+ ev.mmap2.prot = prot;
+ ev.mmap2.flags = flags;
+
+ memcpy(ev.mmap2.filename, filename, min(strlen(filename), sizeof(ev.mmap.filename)));
+
+ array = &ev;
+ array += ev.header.size;
+ ret = perf_event__synthesize_id_sample(array, evsel->core.attr.sample_type, sample);
+ if (ret < 0)
+ return ret;
+
+ if (ret & 7) {
+ pr_err("Bad id sample size %d\n", ret);
+ return -EINVAL;
+ }
+
+ ev.header.size += ret;
+
+ return process(tool, &ev, sample, machine);
+}
+
+int perf_event__synthesize_stat_events(struct perf_stat_config *config, const struct perf_tool *tool,
struct evlist *evlist, perf_event__handler_t process, bool attrs)
{
int err;
@@ -2108,7 +2368,7 @@ int perf_event__synthesize_stat_events(struct perf_stat_config *config, struct p
return err;
}
- err = perf_event__synthesize_cpu_map(tool, evlist->core.cpus, process, NULL);
+ err = perf_event__synthesize_cpu_map(tool, evlist->core.user_requested_cpus, process, NULL);
if (err < 0) {
pr_err("Couldn't synthesize thread map.\n");
return err;
@@ -2125,7 +2385,7 @@ int perf_event__synthesize_stat_events(struct perf_stat_config *config, struct p
extern const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE];
-int perf_event__synthesize_features(struct perf_tool *tool, struct perf_session *session,
+int perf_event__synthesize_features(const struct perf_tool *tool, struct perf_session *session,
struct evlist *evlist, perf_event__handler_t process)
{
struct perf_header *header = &session->header;
@@ -2188,7 +2448,7 @@ int perf_event__synthesize_features(struct perf_tool *tool, struct perf_session
return ret;
}
-int perf_event__synthesize_for_pipe(struct perf_tool *tool,
+int perf_event__synthesize_for_pipe(const struct perf_tool *tool,
struct perf_session *session,
struct perf_data *data,
perf_event__handler_t process)
@@ -2215,6 +2475,7 @@ int perf_event__synthesize_for_pipe(struct perf_tool *tool,
}
ret += err;
+#ifdef HAVE_LIBTRACEEVENT
if (have_tracepoints(&evlist->core.entries)) {
int fd = perf_data__fd(data);
@@ -2234,6 +2495,9 @@ int perf_event__synthesize_for_pipe(struct perf_tool *tool,
}
ret += err;
}
+#else
+ (void)data;
+#endif
return ret;
}