diff options
Diffstat (limited to 'tools/perf/bench/epoll-ctl.c')
| -rw-r--r-- | tools/perf/bench/epoll-ctl.c | 90 |
1 files changed, 55 insertions, 35 deletions
diff --git a/tools/perf/bench/epoll-ctl.c b/tools/perf/bench/epoll-ctl.c index 0c0a6e824934..d66d852b90e4 100644 --- a/tools/perf/bench/epoll-ctl.c +++ b/tools/perf/bench/epoll-ctl.c @@ -5,7 +5,7 @@ * Benchmark the various operations allowed for epoll_ctl(2). * The idea is to concurrently stress a single epoll instance */ -#ifdef HAVE_EVENTFD +#ifdef HAVE_EVENTFD_SUPPORT /* For the CLR_() macros */ #include <string.h> #include <pthread.h> @@ -14,17 +14,19 @@ #include <inttypes.h> #include <signal.h> #include <stdlib.h> +#include <unistd.h> #include <linux/compiler.h> #include <linux/kernel.h> #include <sys/time.h> #include <sys/resource.h> #include <sys/epoll.h> #include <sys/eventfd.h> +#include <perf/cpumap.h> +#include "../util/mutex.h" #include "../util/stat.h" #include <subcmd/parse-options.h> #include "bench.h" -#include "cpumap.h" #include <err.h> @@ -33,7 +35,6 @@ static unsigned int nthreads = 0; static unsigned int nsecs = 8; -struct timeval start, end, runtime; static bool done, __verbose, randomize; /* @@ -58,10 +59,10 @@ static unsigned int nested = 0; /* amount of fds to monitor, per thread */ static unsigned int nfds = 64; -static pthread_mutex_t thread_lock; +static struct mutex thread_lock; static unsigned int threads_starting; static struct stats all_stats[EPOLL_NR_OPS]; -static pthread_cond_t thread_parent, thread_worker; +static struct cond thread_parent, thread_worker; struct worker { int tid; @@ -92,8 +93,8 @@ static void toggle_done(int sig __maybe_unused, { /* inform all threads that we're done for the day */ done = true; - gettimeofday(&end, NULL); - timersub(&end, &start, &runtime); + gettimeofday(&bench__end, NULL); + timersub(&bench__end, &bench__start, &bench__runtime); } static void nest_epollfd(void) @@ -106,7 +107,7 @@ static void nest_epollfd(void) printinfo("Nesting level(s): %d\n", nested); epollfdp = calloc(nested, sizeof(int)); - if (!epollfd) + if (!epollfdp) err(EXIT_FAILURE, "calloc"); for (i = 0; i < nested; i++) { @@ -174,12 +175,12 @@ static void *workerfn(void *arg) struct timespec ts = { .tv_sec = 0, .tv_nsec = 250 }; - pthread_mutex_lock(&thread_lock); + mutex_lock(&thread_lock); threads_starting--; if (!threads_starting) - pthread_cond_signal(&thread_parent); - pthread_cond_wait(&thread_worker, &thread_lock); - pthread_mutex_unlock(&thread_lock); + cond_signal(&thread_parent); + cond_wait(&thread_worker, &thread_lock); + mutex_unlock(&thread_lock); /* Let 'em loose */ do { @@ -219,16 +220,23 @@ static void init_fdmaps(struct worker *w, int pct) } } -static int do_threads(struct worker *worker, struct cpu_map *cpu) +static int do_threads(struct worker *worker, struct perf_cpu_map *cpu) { pthread_attr_t thread_attr, *attrp = NULL; - cpu_set_t cpuset; + cpu_set_t *cpuset; unsigned int i, j; - int ret; + int ret = 0; + int nrcpus; + size_t size; if (!noaffinity) pthread_attr_init(&thread_attr); + nrcpus = cpu__max_cpu().cpu; + cpuset = CPU_ALLOC(nrcpus); + BUG_ON(!cpuset); + size = CPU_ALLOC_SIZE(nrcpus); + for (i = 0; i < nthreads; i++) { struct worker *w = &worker[i]; @@ -252,22 +260,28 @@ static int do_threads(struct worker *worker, struct cpu_map *cpu) init_fdmaps(w, 50); if (!noaffinity) { - CPU_ZERO(&cpuset); - CPU_SET(cpu->map[i % cpu->nr], &cpuset); + CPU_ZERO_S(size, cpuset); + CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, + size, cpuset); - ret = pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset); - if (ret) + ret = pthread_attr_setaffinity_np(&thread_attr, size, cpuset); + if (ret) { + CPU_FREE(cpuset); err(EXIT_FAILURE, "pthread_attr_setaffinity_np"); + } attrp = &thread_attr; } ret = pthread_create(&w->thread, attrp, workerfn, (void *)(struct worker *) w); - if (ret) + if (ret) { + CPU_FREE(cpuset); err(EXIT_FAILURE, "pthread_create"); + } } + CPU_FREE(cpuset); if (!noaffinity) pthread_attr_destroy(&thread_attr); @@ -301,7 +315,7 @@ int bench_epoll_ctl(int argc, const char **argv) int j, ret = 0; struct sigaction act; struct worker *worker = NULL; - struct cpu_map *cpu; + struct perf_cpu_map *cpu; struct rlimit rl, prevrl; unsigned int i; @@ -311,11 +325,12 @@ int bench_epoll_ctl(int argc, const char **argv) exit(EXIT_FAILURE); } + memset(&act, 0, sizeof(act)); sigfillset(&act.sa_mask); act.sa_sigaction = toggle_done; sigaction(SIGINT, &act, NULL); - cpu = cpu_map__new(NULL); + cpu = perf_cpu_map__new_online_cpus(); if (!cpu) goto errmem; @@ -332,7 +347,7 @@ int bench_epoll_ctl(int argc, const char **argv) /* default to the number of CPUs */ if (!nthreads) - nthreads = cpu->nr; + nthreads = perf_cpu_map__nr(cpu); worker = calloc(nthreads, sizeof(*worker)); if (!worker) @@ -353,21 +368,21 @@ int bench_epoll_ctl(int argc, const char **argv) for (i = 0; i < EPOLL_NR_OPS; i++) init_stats(&all_stats[i]); - pthread_mutex_init(&thread_lock, NULL); - pthread_cond_init(&thread_parent, NULL); - pthread_cond_init(&thread_worker, NULL); + mutex_init(&thread_lock); + cond_init(&thread_parent); + cond_init(&thread_worker); threads_starting = nthreads; - gettimeofday(&start, NULL); + gettimeofday(&bench__start, NULL); do_threads(worker, cpu); - pthread_mutex_lock(&thread_lock); + mutex_lock(&thread_lock); while (threads_starting) - pthread_cond_wait(&thread_parent, &thread_lock); - pthread_cond_broadcast(&thread_worker); - pthread_mutex_unlock(&thread_lock); + cond_wait(&thread_parent, &thread_lock); + cond_broadcast(&thread_worker); + mutex_unlock(&thread_lock); sleep(nsecs); toggle_done(0, NULL, NULL); @@ -380,9 +395,9 @@ int bench_epoll_ctl(int argc, const char **argv) } /* cleanup & report results */ - pthread_cond_destroy(&thread_parent); - pthread_cond_destroy(&thread_worker); - pthread_mutex_destroy(&thread_lock); + cond_destroy(&thread_parent); + cond_destroy(&thread_worker); + mutex_destroy(&thread_lock); for (i = 0; i < nthreads; i++) { unsigned long t[EPOLL_NR_OPS]; @@ -406,8 +421,13 @@ int bench_epoll_ctl(int argc, const char **argv) print_summary(); close(epollfd); + perf_cpu_map__put(cpu); + for (i = 0; i < nthreads; i++) + free(worker[i].fdmap); + + free(worker); return ret; errmem: err(EXIT_FAILURE, "calloc"); } -#endif // HAVE_EVENTFD +#endif // HAVE_EVENTFD_SUPPORT |
