summaryrefslogtreecommitdiff
path: root/arch/arm64/include/asm/extable.h
blob: b15eb4a3e6b20830e916720fb25503367f7818b9 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_EXTABLE_H
#define __ASM_EXTABLE_H

/*
 * The exception table consists of pairs of relative offsets: the first
 * is the relative offset to an instruction that is allowed to fault,
 * and the second is the relative offset at which the program should
 * continue. No registers are modified, so it is entirely up to the
 * continuation code to figure out what to do.
 *
 * All the routines below use bits of fixup code that are out of line
 * with the main instruction path.  This means when everything is well,
 * we don't even have to jump over them.  Further, they do not intrude
 * on our cache or tlb entries.
 */

struct exception_table_entry
{
	int insn, fixup;
};

#define ARCH_HAS_RELATIVE_EXTABLE

static inline bool in_bpf_jit(struct pt_regs *regs)
{
	if (!IS_ENABLED(CONFIG_BPF_JIT))
		return false;

	return regs->pc >= BPF_JIT_REGION_START &&
	       regs->pc < BPF_JIT_REGION_END;
}

#ifdef CONFIG_BPF_JIT
int arm64_bpf_fixup_exception(const struct exception_table_entry *ex,
			      struct pt_regs *regs);
#else /* !CONFIG_BPF_JIT */
static inline
int arm64_bpf_fixup_exception(const struct exception_table_entry *ex,
			      struct pt_regs *regs)
{
	return 0;
}
#endif /* !CONFIG_BPF_JIT */

extern int fixup_exception(struct pt_regs *regs);
#endif