summaryrefslogtreecommitdiff
path: root/arch/arm/include/asm/tls.h
blob: 3dcd0f71a0daeafe2c1c1ebd061417c0f1ea6502 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASMARM_TLS_H
#define __ASMARM_TLS_H

#include <linux/compiler.h>
#include <asm/thread_info.h>

#ifdef __ASSEMBLY__
#include <asm/asm-offsets.h>
	.macro switch_tls_none, base, tp, tpuser, tmp1, tmp2
	.endm

	.macro switch_tls_v6k, base, tp, tpuser, tmp1, tmp2
	mrc	p15, 0, \tmp2, c13, c0, 2	@ get the user r/w register
	@ TLS register update is deferred until return to user space
	mcr	p15, 0, \tpuser, c13, c0, 2	@ set the user r/w register
	str	\tmp2, [\base, #TI_TP_VALUE + 4] @ save it
	.endm

	.macro switch_tls_v6, base, tp, tpuser, tmp1, tmp2
#ifdef CONFIG_SMP
ALT_SMP(nop)
ALT_UP_B(.L0_\@)
	.subsection 1
#endif
.L0_\@:
	ldr_va	\tmp1, elf_hwcap
	mov	\tmp2, #0xffff0fff
	tst	\tmp1, #HWCAP_TLS		@ hardware TLS available?
	streq	\tp, [\tmp2, #-15]		@ set TLS value at 0xffff0ff0
	beq	.L2_\@
	mcr	p15, 0, \tp, c13, c0, 3		@ yes, set TLS register
#ifdef CONFIG_SMP
	b	.L1_\@
	.previous
#endif
.L1_\@: switch_tls_v6k \base, \tp, \tpuser, \tmp1, \tmp2
.L2_\@:
	.endm

	.macro switch_tls_software, base, tp, tpuser, tmp1, tmp2
	mov	\tmp1, #0xffff0fff
	str	\tp, [\tmp1, #-15]		@ set TLS value at 0xffff0ff0
	.endm
#else
#include <asm/smp_plat.h>
#endif

#ifdef CONFIG_TLS_REG_EMUL
#define tls_emu		1
#define has_tls_reg		1
#define defer_tls_reg_update	0
#define switch_tls	switch_tls_none
#elif defined(CONFIG_CPU_V6)
#define tls_emu		0
#define has_tls_reg		(elf_hwcap & HWCAP_TLS)
#define defer_tls_reg_update	is_smp()
#define switch_tls	switch_tls_v6
#elif defined(CONFIG_CPU_32v6K)
#define tls_emu		0
#define has_tls_reg		1
#define defer_tls_reg_update	1
#define switch_tls	switch_tls_v6k
#else
#define tls_emu		0
#define has_tls_reg		0
#define defer_tls_reg_update	0
#define switch_tls	switch_tls_software
#endif

#ifndef __ASSEMBLY__

static inline void set_tls(unsigned long val)
{
	struct thread_info *thread;

	thread = current_thread_info();

	thread->tp_value[0] = val;

	/*
	 * This code runs with preemption enabled and therefore must
	 * be reentrant with respect to switch_tls.
	 *
	 * We need to ensure ordering between the shadow state and the
	 * hardware state, so that we don't corrupt the hardware state
	 * with a stale shadow state during context switch.
	 *
	 * If we're preempted here, switch_tls will load TPIDRURO from
	 * thread_info upon resuming execution and the following mcr
	 * is merely redundant.
	 */
	barrier();

	if (!tls_emu) {
		if (has_tls_reg && !defer_tls_reg_update) {
			asm("mcr p15, 0, %0, c13, c0, 3"
			    : : "r" (val));
		} else if (!has_tls_reg) {
#ifdef CONFIG_KUSER_HELPERS
			/*
			 * User space must never try to access this
			 * directly.  Expect your app to break
			 * eventually if you do so.  The user helper
			 * at 0xffff0fe0 must be used instead.  (see
			 * entry-armv.S for details)
			 */
			*((unsigned int *)0xffff0ff0) = val;
#endif
		}

	}
}

static inline unsigned long get_tpuser(void)
{
	unsigned long reg = 0;

	if (has_tls_reg && !tls_emu)
		__asm__("mrc p15, 0, %0, c13, c0, 2" : "=r" (reg));

	return reg;
}

static inline void set_tpuser(unsigned long val)
{
	/* Since TPIDRURW is fully context-switched (unlike TPIDRURO),
	 * we need not update thread_info.
	 */
	if (has_tls_reg && !tls_emu) {
		asm("mcr p15, 0, %0, c13, c0, 2"
		    : : "r" (val));
	}
}

static inline void flush_tls(void)
{
	set_tls(0);
	set_tpuser(0);
}

#endif
#endif	/* __ASMARM_TLS_H */