summaryrefslogtreecommitdiff
path: root/tools/virtio/ringtest/main.h
blob: 16917acb0adef30beab588329e3e8e547aff04ef (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
/*
 * Copyright (C) 2016 Red Hat, Inc.
 * Author: Michael S. Tsirkin <mst@redhat.com>
 * This work is licensed under the terms of the GNU GPL, version 2.
 *
 * Common macros and functions for ring benchmarking.
 */
#ifndef MAIN_H
#define MAIN_H

#include <stdbool.h>

extern bool do_exit;

#if defined(__x86_64__) || defined(__i386__)
#include "x86intrin.h"

static inline void wait_cycles(unsigned long long cycles)
{
	unsigned long long t;

	t = __rdtsc();
	while (__rdtsc() - t < cycles) {}
}

#define VMEXIT_CYCLES 500
#define VMENTRY_CYCLES 500

#else
static inline void wait_cycles(unsigned long long cycles)
{
	_Exit(5);
}
#define VMEXIT_CYCLES 0
#define VMENTRY_CYCLES 0
#endif

static inline void vmexit(void)
{
	if (!do_exit)
		return;
	
	wait_cycles(VMEXIT_CYCLES);
}
static inline void vmentry(void)
{
	if (!do_exit)
		return;
	
	wait_cycles(VMENTRY_CYCLES);
}

/* implemented by ring */
void alloc_ring(void);
/* guest side */
int add_inbuf(unsigned, void *, void *);
void *get_buf(unsigned *, void **);
void disable_call();
bool enable_call();
void kick_available();
void poll_used();
/* host side */
void disable_kick();
bool enable_kick();
bool use_buf(unsigned *, void **);
void call_used();
void poll_avail();

/* implemented by main */
extern bool do_sleep;
void kick(void);
void wait_for_kick(void);
void call(void);
void wait_for_call(void);

extern unsigned ring_size;

/* Compiler barrier - similar to what Linux uses */
#define barrier() asm volatile("" ::: "memory")

/* Is there a portable way to do this? */
#if defined(__x86_64__) || defined(__i386__)
#define cpu_relax() asm ("rep; nop" ::: "memory")
#else
#define cpu_relax() assert(0)
#endif

extern bool do_relax;

static inline void busy_wait(void)
{
	if (do_relax)
		cpu_relax();
	else
		/* prevent compiler from removing busy loops */
		barrier();
} 

/*
 * Not using __ATOMIC_SEQ_CST since gcc docs say they are only synchronized
 * with other __ATOMIC_SEQ_CST calls.
 */
#define smp_mb() __sync_synchronize()

/*
 * This abuses the atomic builtins for thread fences, and
 * adds a compiler barrier.
 */
#define smp_release() do { \
    barrier(); \
    __atomic_thread_fence(__ATOMIC_RELEASE); \
} while (0)

#define smp_acquire() do { \
    __atomic_thread_fence(__ATOMIC_ACQUIRE); \
    barrier(); \
} while (0)

#endif