summaryrefslogtreecommitdiff
path: root/tools/perf/util/synthetic-events.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/util/synthetic-events.c')
-rw-r--r--tools/perf/util/synthetic-events.c99
1 files changed, 57 insertions, 42 deletions
diff --git a/tools/perf/util/synthetic-events.c b/tools/perf/util/synthetic-events.c
index 2ae59c03ae77..812424dbf2d5 100644
--- a/tools/perf/util/synthetic-events.c
+++ b/tools/perf/util/synthetic-events.c
@@ -1184,52 +1184,48 @@ int perf_event__synthesize_thread_map2(struct perf_tool *tool,
return err;
}
-static void synthesize_cpus(struct cpu_map_entries *cpus,
- struct perf_cpu_map *map)
+static void synthesize_cpus(struct perf_record_cpu_map_data *data,
+ const struct perf_cpu_map *map)
{
int i, map_nr = perf_cpu_map__nr(map);
- cpus->nr = map_nr;
+ data->cpus_data.nr = map_nr;
for (i = 0; i < map_nr; i++)
- cpus->cpu[i] = perf_cpu_map__cpu(map, i).cpu;
+ data->cpus_data.cpu[i] = perf_cpu_map__cpu(map, i).cpu;
}
-static void synthesize_mask(struct perf_record_record_cpu_map *mask,
- struct perf_cpu_map *map, int max)
+static void synthesize_mask(struct perf_record_cpu_map_data *data,
+ const struct perf_cpu_map *map, int max)
{
- int i;
+ int idx;
+ struct perf_cpu cpu;
+
+ /* Due to padding, the 4bytes per entry mask variant is always smaller. */
+ data->mask32_data.nr = BITS_TO_U32(max);
+ data->mask32_data.long_size = 4;
- mask->nr = BITS_TO_LONGS(max);
- mask->long_size = sizeof(long);
+ perf_cpu_map__for_each_cpu(cpu, idx, map) {
+ int bit_word = cpu.cpu / 32;
+ __u32 bit_mask = 1U << (cpu.cpu & 31);
- for (i = 0; i < perf_cpu_map__nr(map); i++)
- set_bit(perf_cpu_map__cpu(map, i).cpu, mask->mask);
+ data->mask32_data.mask[bit_word] |= bit_mask;
+ }
}
-static size_t cpus_size(struct perf_cpu_map *map)
+static size_t cpus_size(const struct perf_cpu_map *map)
{
return sizeof(struct cpu_map_entries) + perf_cpu_map__nr(map) * sizeof(u16);
}
-static size_t mask_size(struct perf_cpu_map *map, int *max)
+static size_t mask_size(const struct perf_cpu_map *map, int *max)
{
- int i;
-
- *max = 0;
-
- for (i = 0; i < perf_cpu_map__nr(map); i++) {
- /* bit position of the cpu is + 1 */
- int bit = perf_cpu_map__cpu(map, i).cpu + 1;
-
- if (bit > *max)
- *max = bit;
- }
-
- return sizeof(struct perf_record_record_cpu_map) + BITS_TO_LONGS(*max) * sizeof(long);
+ *max = perf_cpu_map__max(map).cpu;
+ return sizeof(struct perf_record_mask_cpu_map32) + BITS_TO_U32(*max) * sizeof(__u32);
}
-void *cpu_map_data__alloc(struct perf_cpu_map *map, size_t *size, u16 *type, int *max)
+static void *cpu_map_data__alloc(const struct perf_cpu_map *map, size_t *size,
+ u16 *type, int *max)
{
size_t size_cpus, size_mask;
bool is_dummy = perf_cpu_map__empty(map);
@@ -1258,30 +1254,31 @@ void *cpu_map_data__alloc(struct perf_cpu_map *map, size_t *size, u16 *type, int
*type = PERF_CPU_MAP__MASK;
}
- *size += sizeof(struct perf_record_cpu_map_data);
+ *size += sizeof(__u16); /* For perf_record_cpu_map_data.type. */
*size = PERF_ALIGN(*size, sizeof(u64));
return zalloc(*size);
}
-void cpu_map_data__synthesize(struct perf_record_cpu_map_data *data, struct perf_cpu_map *map,
- u16 type, int max)
+static void cpu_map_data__synthesize(struct perf_record_cpu_map_data *data,
+ const struct perf_cpu_map *map,
+ u16 type, int max)
{
data->type = type;
switch (type) {
case PERF_CPU_MAP__CPUS:
- synthesize_cpus((struct cpu_map_entries *) data->data, map);
+ synthesize_cpus(data, map);
break;
case PERF_CPU_MAP__MASK:
- synthesize_mask((struct perf_record_record_cpu_map *)data->data, map, max);
+ synthesize_mask(data, map, max);
default:
break;
}
}
-static struct perf_record_cpu_map *cpu_map_event__new(struct perf_cpu_map *map)
+static struct perf_record_cpu_map *cpu_map_event__new(const struct perf_cpu_map *map)
{
- size_t size = sizeof(struct perf_record_cpu_map);
+ size_t size = sizeof(struct perf_event_header);
struct perf_record_cpu_map *event;
int max;
u16 type;
@@ -1299,7 +1296,7 @@ static struct perf_record_cpu_map *cpu_map_event__new(struct perf_cpu_map *map)
}
int perf_event__synthesize_cpu_map(struct perf_tool *tool,
- struct perf_cpu_map *map,
+ const struct perf_cpu_map *map,
perf_event__handler_t process,
struct machine *machine)
{
@@ -1432,11 +1429,12 @@ size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
result += sizeof(u64);
/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
if (read_format & PERF_FORMAT_GROUP) {
- sz = sample->read.group.nr *
- sizeof(struct sample_read_value);
- result += sz;
+ sz = sample_read_value_size(read_format);
+ result += sz * sample->read.group.nr;
} else {
result += sizeof(u64);
+ if (read_format & PERF_FORMAT_LOST)
+ result += sizeof(u64);
}
}
@@ -1521,6 +1519,20 @@ void __weak arch_perf_synthesize_sample_weight(const struct perf_sample *data,
*array = data->weight;
}
+static __u64 *copy_read_group_values(__u64 *array, __u64 read_format,
+ const struct perf_sample *sample)
+{
+ size_t sz = sample_read_value_size(read_format);
+ struct sample_read_value *v = sample->read.group.values;
+
+ sample_read_group__for_each(v, sample->read.group.nr, read_format) {
+ /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
+ memcpy(array, v, sz);
+ array = (void *)array + sz;
+ }
+ return array;
+}
+
int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_format,
const struct perf_sample *sample)
{
@@ -1602,13 +1614,16 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_fo
/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
if (read_format & PERF_FORMAT_GROUP) {
- sz = sample->read.group.nr *
- sizeof(struct sample_read_value);
- memcpy(array, sample->read.group.values, sz);
- array = (void *)array + sz;
+ array = copy_read_group_values(array, read_format,
+ sample);
} else {
*array = sample->read.one.id;
array++;
+
+ if (read_format & PERF_FORMAT_LOST) {
+ *array = sample->read.one.lost;
+ array++;
+ }
}
}