summaryrefslogtreecommitdiff
path: root/tools/lib/perf/mmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/lib/perf/mmap.c')
-rw-r--r--tools/lib/perf/mmap.c90
1 files changed, 85 insertions, 5 deletions
diff --git a/tools/lib/perf/mmap.c b/tools/lib/perf/mmap.c
index 0d1634cedf44..ec124eb0ec0a 100644
--- a/tools/lib/perf/mmap.c
+++ b/tools/lib/perf/mmap.c
@@ -19,6 +19,7 @@
void perf_mmap__init(struct perf_mmap *map, struct perf_mmap *prev,
bool overwrite, libperf_unmap_cb_t unmap_cb)
{
+ /* Assume fields were zero initialized. */
map->fd = -1;
map->overwrite = overwrite;
map->unmap_cb = unmap_cb;
@@ -51,13 +52,18 @@ int perf_mmap__mmap(struct perf_mmap *map, struct perf_mmap_param *mp,
void perf_mmap__munmap(struct perf_mmap *map)
{
- if (map && map->base != NULL) {
+ if (!map)
+ return;
+
+ zfree(&map->event_copy);
+ map->event_copy_sz = 0;
+ if (map->base) {
munmap(map->base, perf_mmap__mmap_len(map));
map->base = NULL;
map->fd = -1;
refcount_set(&map->refcnt, 0);
}
- if (map && map->unmap_cb)
+ if (map->unmap_cb)
map->unmap_cb(map);
}
@@ -223,9 +229,17 @@ static union perf_event *perf_mmap__read(struct perf_mmap *map,
*/
if ((*startp & map->mask) + size != ((*startp + size) & map->mask)) {
unsigned int offset = *startp;
- unsigned int len = min(sizeof(*event), size), cpy;
+ unsigned int len = size, cpy;
void *dst = map->event_copy;
+ if (size > map->event_copy_sz) {
+ dst = realloc(map->event_copy, size);
+ if (!dst)
+ return NULL;
+ map->event_copy = dst;
+ map->event_copy_sz = size;
+ }
+
do {
cpy = min(map->mask + 1 - (offset & map->mask), len);
memcpy(dst, &data[offset & map->mask], cpy);
@@ -265,7 +279,7 @@ union perf_event *perf_mmap__read_event(struct perf_mmap *map)
if (!refcount_read(&map->refcnt))
return NULL;
- /* non-overwirte doesn't pause the ringbuffer */
+ /* non-overwrite doesn't pause the ringbuffer */
if (!map->overwrite)
map->end = perf_mmap__read_head(map);
@@ -392,6 +406,72 @@ static u64 read_perf_counter(unsigned int counter)
static u64 read_timestamp(void) { return read_sysreg(cntvct_el0); }
+/* __riscv_xlen contains the witdh of the native base integer, here 64-bit */
+#elif defined(__riscv) && __riscv_xlen == 64
+
+/* TODO: implement rv32 support */
+
+#define CSR_CYCLE 0xc00
+#define CSR_TIME 0xc01
+
+#define csr_read(csr) \
+({ \
+ register unsigned long __v; \
+ __asm__ __volatile__ ("csrr %0, %1" \
+ : "=r" (__v) \
+ : "i" (csr) : ); \
+ __v; \
+})
+
+static unsigned long csr_read_num(int csr_num)
+{
+#define switchcase_csr_read(__csr_num, __val) {\
+ case __csr_num: \
+ __val = csr_read(__csr_num); \
+ break; }
+#define switchcase_csr_read_2(__csr_num, __val) {\
+ switchcase_csr_read(__csr_num + 0, __val) \
+ switchcase_csr_read(__csr_num + 1, __val)}
+#define switchcase_csr_read_4(__csr_num, __val) {\
+ switchcase_csr_read_2(__csr_num + 0, __val) \
+ switchcase_csr_read_2(__csr_num + 2, __val)}
+#define switchcase_csr_read_8(__csr_num, __val) {\
+ switchcase_csr_read_4(__csr_num + 0, __val) \
+ switchcase_csr_read_4(__csr_num + 4, __val)}
+#define switchcase_csr_read_16(__csr_num, __val) {\
+ switchcase_csr_read_8(__csr_num + 0, __val) \
+ switchcase_csr_read_8(__csr_num + 8, __val)}
+#define switchcase_csr_read_32(__csr_num, __val) {\
+ switchcase_csr_read_16(__csr_num + 0, __val) \
+ switchcase_csr_read_16(__csr_num + 16, __val)}
+
+ unsigned long ret = 0;
+
+ switch (csr_num) {
+ switchcase_csr_read_32(CSR_CYCLE, ret)
+ default:
+ break;
+ }
+
+ return ret;
+#undef switchcase_csr_read_32
+#undef switchcase_csr_read_16
+#undef switchcase_csr_read_8
+#undef switchcase_csr_read_4
+#undef switchcase_csr_read_2
+#undef switchcase_csr_read
+}
+
+static u64 read_perf_counter(unsigned int counter)
+{
+ return csr_read_num(CSR_CYCLE + counter);
+}
+
+static u64 read_timestamp(void)
+{
+ return csr_read_num(CSR_TIME);
+}
+
#else
static u64 read_perf_counter(unsigned int counter __maybe_unused) { return 0; }
static u64 read_timestamp(void) { return 0; }
@@ -428,7 +508,7 @@ int perf_mmap__read_self(struct perf_mmap *map, struct perf_counts_values *count
idx = READ_ONCE(pc->index);
cnt = READ_ONCE(pc->offset);
if (pc->cap_user_rdpmc && idx) {
- s64 evcnt = read_perf_counter(idx - 1);
+ u64 evcnt = read_perf_counter(idx - 1);
u16 width = READ_ONCE(pc->pmc_width);
evcnt <<= 64 - width;