summaryrefslogtreecommitdiff
path: root/tools/perf/util/mmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/util/mmap.c')
-rw-r--r--tools/perf/util/mmap.c260
1 files changed, 15 insertions, 245 deletions
diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c
index a35dc57d5995..063d1b93c53d 100644
--- a/tools/perf/util/mmap.c
+++ b/tools/perf/util/mmap.c
@@ -13,6 +13,7 @@
#include <stdlib.h>
#include <string.h>
#include <unistd.h> // sysconf()
+#include <perf/mmap.h>
#ifdef HAVE_LIBNUMA_SUPPORT
#include <numaif.h>
#endif
@@ -23,116 +24,9 @@
#include "../perf.h"
#include <internal/lib.h> /* page_size */
-size_t perf_mmap__mmap_len(struct mmap *map)
+size_t mmap__mmap_len(struct mmap *map)
{
- return map->core.mask + 1 + page_size;
-}
-
-/* When check_messup is true, 'end' must points to a good entry */
-static union perf_event *perf_mmap__read(struct mmap *map,
- u64 *startp, u64 end)
-{
- unsigned char *data = map->core.base + page_size;
- union perf_event *event = NULL;
- int diff = end - *startp;
-
- if (diff >= (int)sizeof(event->header)) {
- size_t size;
-
- event = (union perf_event *)&data[*startp & map->core.mask];
- size = event->header.size;
-
- if (size < sizeof(event->header) || diff < (int)size)
- return NULL;
-
- /*
- * Event straddles the mmap boundary -- header should always
- * be inside due to u64 alignment of output.
- */
- if ((*startp & map->core.mask) + size != ((*startp + size) & map->core.mask)) {
- unsigned int offset = *startp;
- unsigned int len = min(sizeof(*event), size), cpy;
- void *dst = map->core.event_copy;
-
- do {
- cpy = min(map->core.mask + 1 - (offset & map->core.mask), len);
- memcpy(dst, &data[offset & map->core.mask], cpy);
- offset += cpy;
- dst += cpy;
- len -= cpy;
- } while (len);
-
- event = (union perf_event *)map->core.event_copy;
- }
-
- *startp += size;
- }
-
- return event;
-}
-
-/*
- * Read event from ring buffer one by one.
- * Return one event for each call.
- *
- * Usage:
- * perf_mmap__read_init()
- * while(event = perf_mmap__read_event()) {
- * //process the event
- * perf_mmap__consume()
- * }
- * perf_mmap__read_done()
- */
-union perf_event *perf_mmap__read_event(struct mmap *map)
-{
- union perf_event *event;
-
- /*
- * Check if event was unmapped due to a POLLHUP/POLLERR.
- */
- if (!refcount_read(&map->core.refcnt))
- return NULL;
-
- /* non-overwirte doesn't pause the ringbuffer */
- if (!map->core.overwrite)
- map->core.end = perf_mmap__read_head(map);
-
- event = perf_mmap__read(map, &map->core.start, map->core.end);
-
- if (!map->core.overwrite)
- map->core.prev = map->core.start;
-
- return event;
-}
-
-static bool perf_mmap__empty(struct mmap *map)
-{
- return perf_mmap__read_head(map) == map->core.prev && !map->auxtrace_mmap.base;
-}
-
-void perf_mmap__get(struct mmap *map)
-{
- refcount_inc(&map->core.refcnt);
-}
-
-void perf_mmap__put(struct mmap *map)
-{
- BUG_ON(map->core.base && refcount_read(&map->core.refcnt) == 0);
-
- if (refcount_dec_and_test(&map->core.refcnt))
- perf_mmap__munmap(map);
-}
-
-void perf_mmap__consume(struct mmap *map)
-{
- if (!map->core.overwrite) {
- u64 old = map->core.prev;
-
- perf_mmap__write_tail(map, old);
- }
-
- if (refcount_read(&map->core.refcnt) == 1 && perf_mmap__empty(map))
- perf_mmap__put(map);
+ return perf_mmap__mmap_len(&map->core);
}
int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused,
@@ -170,7 +64,7 @@ static int perf_mmap__aio_enabled(struct mmap *map)
#ifdef HAVE_LIBNUMA_SUPPORT
static int perf_mmap__aio_alloc(struct mmap *map, int idx)
{
- map->aio.data[idx] = mmap(NULL, perf_mmap__mmap_len(map), PROT_READ|PROT_WRITE,
+ map->aio.data[idx] = mmap(NULL, mmap__mmap_len(map), PROT_READ|PROT_WRITE,
MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
if (map->aio.data[idx] == MAP_FAILED) {
map->aio.data[idx] = NULL;
@@ -183,7 +77,7 @@ static int perf_mmap__aio_alloc(struct mmap *map, int idx)
static void perf_mmap__aio_free(struct mmap *map, int idx)
{
if (map->aio.data[idx]) {
- munmap(map->aio.data[idx], perf_mmap__mmap_len(map));
+ munmap(map->aio.data[idx], mmap__mmap_len(map));
map->aio.data[idx] = NULL;
}
}
@@ -196,7 +90,7 @@ static int perf_mmap__aio_bind(struct mmap *map, int idx, int cpu, int affinity)
if (affinity != PERF_AFFINITY_SYS && cpu__max_node() > 1) {
data = map->aio.data[idx];
- mmap_len = perf_mmap__mmap_len(map);
+ mmap_len = mmap__mmap_len(map);
node_mask = 1UL << cpu__get_node(cpu);
if (mbind(data, mmap_len, MPOL_BIND, &node_mask, 1, 0)) {
pr_err("Failed to bind [%p-%p] AIO buffer to node %d: error %m\n",
@@ -210,7 +104,7 @@ static int perf_mmap__aio_bind(struct mmap *map, int idx, int cpu, int affinity)
#else /* !HAVE_LIBNUMA_SUPPORT */
static int perf_mmap__aio_alloc(struct mmap *map, int idx)
{
- map->aio.data[idx] = malloc(perf_mmap__mmap_len(map));
+ map->aio.data[idx] = malloc(mmap__mmap_len(map));
if (map->aio.data[idx] == NULL)
return -1;
@@ -311,19 +205,13 @@ static void perf_mmap__aio_munmap(struct mmap *map __maybe_unused)
}
#endif
-void perf_mmap__munmap(struct mmap *map)
+void mmap__munmap(struct mmap *map)
{
perf_mmap__aio_munmap(map);
if (map->data != NULL) {
- munmap(map->data, perf_mmap__mmap_len(map));
+ munmap(map->data, mmap__mmap_len(map));
map->data = NULL;
}
- if (map->core.base != NULL) {
- munmap(map->core.base, perf_mmap__mmap_len(map));
- map->core.base = NULL;
- map->core.fd = -1;
- refcount_set(&map->core.refcnt, 0);
- }
auxtrace_mmap__munmap(&map->auxtrace_mmap);
}
@@ -353,34 +241,13 @@ static void perf_mmap__setup_affinity_mask(struct mmap *map, struct mmap_params
CPU_SET(map->core.cpu, &map->affinity_mask);
}
-int perf_mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, int cpu)
+int mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, int cpu)
{
- /*
- * The last one will be done at perf_mmap__consume(), so that we
- * make sure we don't prevent tools from consuming every last event in
- * the ring buffer.
- *
- * I.e. we can get the POLLHUP meaning that the fd doesn't exist
- * anymore, but the last events for it are still in the ring buffer,
- * waiting to be consumed.
- *
- * Tools can chose to ignore this at their own discretion, but the
- * evlist layer can't just drop it when filtering events in
- * perf_evlist__filter_pollfd().
- */
- refcount_set(&map->core.refcnt, 2);
- map->core.prev = 0;
- map->core.mask = mp->mask;
- map->core.base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot,
- MAP_SHARED, fd, 0);
- if (map->core.base == MAP_FAILED) {
+ if (perf_mmap__mmap(&map->core, &mp->core, fd, cpu)) {
pr_debug2("failed to mmap perf event ring buffer, error %d\n",
errno);
- map->core.base = NULL;
return -1;
}
- map->core.fd = fd;
- map->core.cpu = cpu;
perf_mmap__setup_affinity_mask(map, mp);
@@ -389,7 +256,7 @@ int perf_mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, int cpu)
map->comp_level = mp->comp_level;
if (map->comp_level && !perf_mmap__aio_enabled(map)) {
- map->data = mmap(NULL, perf_mmap__mmap_len(map), PROT_READ|PROT_WRITE,
+ map->data = mmap(NULL, mmap__mmap_len(map), PROT_READ|PROT_WRITE,
MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
if (map->data == MAP_FAILED) {
pr_debug2("failed to mmap data buffer, error %d\n",
@@ -406,96 +273,16 @@ int perf_mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, int cpu)
return perf_mmap__aio_mmap(map, mp);
}
-static int overwrite_rb_find_range(void *buf, int mask, u64 *start, u64 *end)
-{
- struct perf_event_header *pheader;
- u64 evt_head = *start;
- int size = mask + 1;
-
- pr_debug2("%s: buf=%p, start=%"PRIx64"\n", __func__, buf, *start);
- pheader = (struct perf_event_header *)(buf + (*start & mask));
- while (true) {
- if (evt_head - *start >= (unsigned int)size) {
- pr_debug("Finished reading overwrite ring buffer: rewind\n");
- if (evt_head - *start > (unsigned int)size)
- evt_head -= pheader->size;
- *end = evt_head;
- return 0;
- }
-
- pheader = (struct perf_event_header *)(buf + (evt_head & mask));
-
- if (pheader->size == 0) {
- pr_debug("Finished reading overwrite ring buffer: get start\n");
- *end = evt_head;
- return 0;
- }
-
- evt_head += pheader->size;
- pr_debug3("move evt_head: %"PRIx64"\n", evt_head);
- }
- WARN_ONCE(1, "Shouldn't get here\n");
- return -1;
-}
-
-/*
- * Report the start and end of the available data in ringbuffer
- */
-static int __perf_mmap__read_init(struct mmap *md)
-{
- u64 head = perf_mmap__read_head(md);
- u64 old = md->core.prev;
- unsigned char *data = md->core.base + page_size;
- unsigned long size;
-
- md->core.start = md->core.overwrite ? head : old;
- md->core.end = md->core.overwrite ? old : head;
-
- if ((md->core.end - md->core.start) < md->core.flush)
- return -EAGAIN;
-
- size = md->core.end - md->core.start;
- if (size > (unsigned long)(md->core.mask) + 1) {
- if (!md->core.overwrite) {
- WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
-
- md->core.prev = head;
- perf_mmap__consume(md);
- return -EAGAIN;
- }
-
- /*
- * Backward ring buffer is full. We still have a chance to read
- * most of data from it.
- */
- if (overwrite_rb_find_range(data, md->core.mask, &md->core.start, &md->core.end))
- return -EINVAL;
- }
-
- return 0;
-}
-
-int perf_mmap__read_init(struct mmap *map)
-{
- /*
- * Check if event was unmapped due to a POLLHUP/POLLERR.
- */
- if (!refcount_read(&map->core.refcnt))
- return -ENOENT;
-
- return __perf_mmap__read_init(map);
-}
-
int perf_mmap__push(struct mmap *md, void *to,
int push(struct mmap *map, void *to, void *buf, size_t size))
{
- u64 head = perf_mmap__read_head(md);
+ u64 head = perf_mmap__read_head(&md->core);
unsigned char *data = md->core.base + page_size;
unsigned long size;
void *buf;
int rc = 0;
- rc = perf_mmap__read_init(md);
+ rc = perf_mmap__read_init(&md->core);
if (rc < 0)
return (rc == -EAGAIN) ? 1 : -1;
@@ -522,24 +309,7 @@ int perf_mmap__push(struct mmap *md, void *to,
}
md->core.prev = head;
- perf_mmap__consume(md);
+ perf_mmap__consume(&md->core);
out:
return rc;
}
-
-/*
- * Mandatory for overwrite mode
- * The direction of overwrite mode is backward.
- * The last perf_mmap__read() will set tail to map->core.prev.
- * Need to correct the map->core.prev to head which is the end of next read.
- */
-void perf_mmap__read_done(struct mmap *map)
-{
- /*
- * Check if event was unmapped due to a POLLHUP/POLLERR.
- */
- if (!refcount_read(&map->core.refcnt))
- return;
-
- map->core.prev = perf_mmap__read_head(map);
-}