summaryrefslogtreecommitdiff
path: root/arch/s390/kernel/ftrace.c
blob: 1d94ffdf347bb7bd2785bc65f142fe01115b95d9 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
// SPDX-License-Identifier: GPL-2.0
/*
 * Dynamic function tracer architecture backend.
 *
 * Copyright IBM Corp. 2009,2014
 *
 *   Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
 *		Martin Schwidefsky <schwidefsky@de.ibm.com>
 */

#include <linux/moduleloader.h>
#include <linux/hardirq.h>
#include <linux/uaccess.h>
#include <linux/ftrace.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/kprobes.h>
#include <trace/syscall.h>
#include <asm/asm-offsets.h>
#include <asm/cacheflush.h>
#include <asm/ftrace.lds.h>
#include <asm/nospec-branch.h>
#include <asm/set_memory.h>
#include "entry.h"
#include "ftrace.h"

/*
 * To generate function prologue either gcc's hotpatch feature (since gcc 4.8)
 * or a combination of -pg -mrecord-mcount -mnop-mcount -mfentry flags
 * (since gcc 9 / clang 10) is used.
 * In both cases the original and also the disabled function prologue contains
 * only a single six byte instruction and looks like this:
 * >	brcl	0,0			# offset 0
 * To enable ftrace the code gets patched like above and afterwards looks
 * like this:
 * >	brasl	%r0,ftrace_caller	# offset 0
 *
 * The instruction will be patched by ftrace_make_call / ftrace_make_nop.
 * The ftrace function gets called with a non-standard C function call ABI
 * where r0 contains the return address. It is also expected that the called
 * function only clobbers r0 and r1, but restores r2-r15.
 * For module code we can't directly jump to ftrace caller, but need a
 * trampoline (ftrace_plt), which clobbers also r1.
 */

void *ftrace_func __read_mostly = ftrace_stub;
struct ftrace_insn {
	u16 opc;
	s32 disp;
} __packed;

asm(
	"	.align 16\n"
	"ftrace_shared_hotpatch_trampoline_br:\n"
	"	lmg	%r0,%r1,2(%r1)\n"
	"	br	%r1\n"
	"ftrace_shared_hotpatch_trampoline_br_end:\n"
);

#ifdef CONFIG_EXPOLINE
asm(
	"	.align 16\n"
	"ftrace_shared_hotpatch_trampoline_ex:\n"
	"	lmg	%r0,%r1,2(%r1)\n"
	"	ex	%r0," __stringify(__LC_BR_R1) "(%r0)\n"
	"	j	.\n"
	"ftrace_shared_hotpatch_trampoline_ex_end:\n"
);

asm(
	"	.align 16\n"
	"ftrace_shared_hotpatch_trampoline_exrl:\n"
	"	lmg	%r0,%r1,2(%r1)\n"
	"	.insn	ril,0xc60000000000,%r0,0f\n" /* exrl */
	"	j	.\n"
	"0:	br	%r1\n"
	"ftrace_shared_hotpatch_trampoline_exrl_end:\n"
);
#endif /* CONFIG_EXPOLINE */

#ifdef CONFIG_MODULES
static char *ftrace_plt;

asm(
	"	.data\n"
	"ftrace_plt_template:\n"
	"	basr	%r1,%r0\n"
	"	lg	%r1,0f-.(%r1)\n"
	"	br	%r1\n"
	"0:	.quad	ftrace_caller\n"
	"ftrace_plt_template_end:\n"
	"	.previous\n"
);
#endif /* CONFIG_MODULES */

static const char *ftrace_shared_hotpatch_trampoline(const char **end)
{
	const char *tstart, *tend;

	tstart = ftrace_shared_hotpatch_trampoline_br;
	tend = ftrace_shared_hotpatch_trampoline_br_end;
#ifdef CONFIG_EXPOLINE
	if (!nospec_disable) {
		tstart = ftrace_shared_hotpatch_trampoline_ex;
		tend = ftrace_shared_hotpatch_trampoline_ex_end;
		if (test_facility(35)) { /* exrl */
			tstart = ftrace_shared_hotpatch_trampoline_exrl;
			tend = ftrace_shared_hotpatch_trampoline_exrl_end;
		}
	}
#endif /* CONFIG_EXPOLINE */
	if (end)
		*end = tend;
	return tstart;
}

bool ftrace_need_init_nop(void)
{
	return ftrace_shared_hotpatch_trampoline(NULL);
}

int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
{
	static struct ftrace_hotpatch_trampoline *next_vmlinux_trampoline =
		__ftrace_hotpatch_trampolines_start;
	static const char orig[6] = { 0xc0, 0x04, 0x00, 0x00, 0x00, 0x00 };
	static struct ftrace_hotpatch_trampoline *trampoline;
	struct ftrace_hotpatch_trampoline **next_trampoline;
	struct ftrace_hotpatch_trampoline *trampolines_end;
	struct ftrace_hotpatch_trampoline tmp;
	struct ftrace_insn *insn;
	const char *shared;
	s32 disp;

	BUILD_BUG_ON(sizeof(struct ftrace_hotpatch_trampoline) !=
		     SIZEOF_FTRACE_HOTPATCH_TRAMPOLINE);

	next_trampoline = &next_vmlinux_trampoline;
	trampolines_end = __ftrace_hotpatch_trampolines_end;
	shared = ftrace_shared_hotpatch_trampoline(NULL);
#ifdef CONFIG_MODULES
	if (mod) {
		next_trampoline = &mod->arch.next_trampoline;
		trampolines_end = mod->arch.trampolines_end;
		shared = ftrace_plt;
	}
#endif

	if (WARN_ON_ONCE(*next_trampoline >= trampolines_end))
		return -ENOMEM;
	trampoline = (*next_trampoline)++;

	/* Check for the compiler-generated fentry nop (brcl 0, .). */
	if (WARN_ON_ONCE(memcmp((const void *)rec->ip, &orig, sizeof(orig))))
		return -EINVAL;

	/* Generate the trampoline. */
	tmp.brasl_opc = 0xc015; /* brasl %r1, shared */
	tmp.brasl_disp = (shared - (const char *)&trampoline->brasl_opc) / 2;
	tmp.interceptor = FTRACE_ADDR;
	tmp.rest_of_intercepted_function = rec->ip + sizeof(struct ftrace_insn);
	s390_kernel_write(trampoline, &tmp, sizeof(tmp));

	/* Generate a jump to the trampoline. */
	disp = ((char *)trampoline - (char *)rec->ip) / 2;
	insn = (struct ftrace_insn *)rec->ip;
	s390_kernel_write(&insn->disp, &disp, sizeof(disp));

	return 0;
}

int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
		       unsigned long addr)
{
	return 0;
}

static void ftrace_generate_nop_insn(struct ftrace_insn *insn)
{
	/* brcl 0,0 */
	insn->opc = 0xc004;
	insn->disp = 0;
}

static void ftrace_generate_call_insn(struct ftrace_insn *insn,
				      unsigned long ip)
{
	unsigned long target;

	/* brasl r0,ftrace_caller */
	target = FTRACE_ADDR;
#ifdef CONFIG_MODULES
	if (is_module_addr((void *)ip))
		target = (unsigned long)ftrace_plt;
#endif /* CONFIG_MODULES */
	insn->opc = 0xc005;
	insn->disp = (target - ip) / 2;
}

static void brcl_disable(void *brcl)
{
	u8 op = 0x04; /* set mask field to zero */

	s390_kernel_write((char *)brcl + 1, &op, sizeof(op));
}

int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
		    unsigned long addr)
{
	struct ftrace_insn orig, new, old;

	if (ftrace_shared_hotpatch_trampoline(NULL)) {
		brcl_disable((void *)rec->ip);
		return 0;
	}

	if (copy_from_kernel_nofault(&old, (void *) rec->ip, sizeof(old)))
		return -EFAULT;
	/* Replace ftrace call with a nop. */
	ftrace_generate_call_insn(&orig, rec->ip);
	ftrace_generate_nop_insn(&new);

	/* Verify that the to be replaced code matches what we expect. */
	if (memcmp(&orig, &old, sizeof(old)))
		return -EINVAL;
	s390_kernel_write((void *) rec->ip, &new, sizeof(new));
	return 0;
}

static void brcl_enable(void *brcl)
{
	u8 op = 0xf4; /* set mask field to all ones */

	s390_kernel_write((char *)brcl + 1, &op, sizeof(op));
}

int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
	struct ftrace_insn orig, new, old;

	if (ftrace_shared_hotpatch_trampoline(NULL)) {
		brcl_enable((void *)rec->ip);
		return 0;
	}

	if (copy_from_kernel_nofault(&old, (void *) rec->ip, sizeof(old)))
		return -EFAULT;
	/* Replace nop with an ftrace call. */
	ftrace_generate_nop_insn(&orig);
	ftrace_generate_call_insn(&new, rec->ip);

	/* Verify that the to be replaced code matches what we expect. */
	if (memcmp(&orig, &old, sizeof(old)))
		return -EINVAL;
	s390_kernel_write((void *) rec->ip, &new, sizeof(new));
	return 0;
}

int ftrace_update_ftrace_func(ftrace_func_t func)
{
	ftrace_func = func;
	return 0;
}

int __init ftrace_dyn_arch_init(void)
{
	return 0;
}

void arch_ftrace_update_code(int command)
{
	if (ftrace_shared_hotpatch_trampoline(NULL))
		ftrace_modify_all_code(command);
	else
		ftrace_run_stop_machine(command);
}

static void __ftrace_sync(void *dummy)
{
}

int ftrace_arch_code_modify_post_process(void)
{
	if (ftrace_shared_hotpatch_trampoline(NULL)) {
		/* Send SIGP to the other CPUs, so they see the new code. */
		smp_call_function(__ftrace_sync, NULL, 1);
	}
	return 0;
}

#ifdef CONFIG_MODULES

static int __init ftrace_plt_init(void)
{
	const char *start, *end;

	ftrace_plt = module_alloc(PAGE_SIZE);
	if (!ftrace_plt)
		panic("cannot allocate ftrace plt\n");

	start = ftrace_shared_hotpatch_trampoline(&end);
	if (!start) {
		start = ftrace_plt_template;
		end = ftrace_plt_template_end;
	}
	memcpy(ftrace_plt, start, end - start);
	set_memory_ro((unsigned long)ftrace_plt, 1);
	return 0;
}
device_initcall(ftrace_plt_init);

#endif /* CONFIG_MODULES */

#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/*
 * Hook the return address and push it in the stack of return addresses
 * in current thread info.
 */
unsigned long prepare_ftrace_return(unsigned long ra, unsigned long sp,
				    unsigned long ip)
{
	if (unlikely(ftrace_graph_is_dead()))
		goto out;
	if (unlikely(atomic_read(&current->tracing_graph_pause)))
		goto out;
	ip -= MCOUNT_INSN_SIZE;
	if (!function_graph_enter(ra, ip, 0, (void *) sp))
		ra = (unsigned long) return_to_handler;
out:
	return ra;
}
NOKPROBE_SYMBOL(prepare_ftrace_return);

/*
 * Patch the kernel code at ftrace_graph_caller location. The instruction
 * there is branch relative on condition. To enable the ftrace graph code
 * block, we simply patch the mask field of the instruction to zero and
 * turn the instruction into a nop.
 * To disable the ftrace graph code the mask field will be patched to
 * all ones, which turns the instruction into an unconditional branch.
 */
int ftrace_enable_ftrace_graph_caller(void)
{
	brcl_disable(ftrace_graph_caller);
	return 0;
}

int ftrace_disable_ftrace_graph_caller(void)
{
	brcl_enable(ftrace_graph_caller);
	return 0;
}

#endif /* CONFIG_FUNCTION_GRAPH_TRACER */

#ifdef CONFIG_KPROBES_ON_FTRACE
void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
		struct ftrace_ops *ops, struct ftrace_regs *fregs)
{
	struct kprobe_ctlblk *kcb;
	struct pt_regs *regs;
	struct kprobe *p;
	int bit;

	bit = ftrace_test_recursion_trylock(ip, parent_ip);
	if (bit < 0)
		return;

	regs = ftrace_get_regs(fregs);
	preempt_disable_notrace();
	p = get_kprobe((kprobe_opcode_t *)ip);
	if (unlikely(!p) || kprobe_disabled(p))
		goto out;

	if (kprobe_running()) {
		kprobes_inc_nmissed_count(p);
		goto out;
	}

	__this_cpu_write(current_kprobe, p);

	kcb = get_kprobe_ctlblk();
	kcb->kprobe_status = KPROBE_HIT_ACTIVE;

	instruction_pointer_set(regs, ip);

	if (!p->pre_handler || !p->pre_handler(p, regs)) {

		instruction_pointer_set(regs, ip + MCOUNT_INSN_SIZE);

		if (unlikely(p->post_handler)) {
			kcb->kprobe_status = KPROBE_HIT_SSDONE;
			p->post_handler(p, regs, 0);
		}
	}
	__this_cpu_write(current_kprobe, NULL);
out:
	preempt_enable_notrace();
	ftrace_test_recursion_unlock(bit);
}
NOKPROBE_SYMBOL(kprobe_ftrace_handler);

int arch_prepare_kprobe_ftrace(struct kprobe *p)
{
	p->ainsn.insn = NULL;
	return 0;
}
#endif