summaryrefslogtreecommitdiff
path: root/arch/s390/lib/uaccess_mvcos.c
blob: 86c96d6c191a3819bb094cb7d0698b3401a0c147 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
/*
 *  arch/s390/lib/uaccess_mvcos.c
 *
 *  Optimized user space space access functions based on mvcos.
 *
 *    Copyright (C) IBM Corp. 2006
 *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
 *		 Gerald Schaefer (gerald.schaefer@de.ibm.com)
 */

#include <linux/errno.h>
#include <linux/mm.h>
#include <asm/uaccess.h>
#include <asm/futex.h>

#ifndef __s390x__
#define AHI	"ahi"
#define ALR	"alr"
#define CLR	"clr"
#define LHI	"lhi"
#define SLR	"slr"
#else
#define AHI	"aghi"
#define ALR	"algr"
#define CLR	"clgr"
#define LHI	"lghi"
#define SLR	"slgr"
#endif

size_t copy_from_user_mvcos(size_t size, const void __user *ptr, void *x)
{
	register unsigned long reg0 asm("0") = 0x81UL;
	unsigned long tmp1, tmp2;

	tmp1 = -4096UL;
	asm volatile(
		"0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n"
		"   jz    4f\n"
		"1:"ALR"  %0,%3\n"
		"  "SLR"  %1,%3\n"
		"  "SLR"  %2,%3\n"
		"   j     0b\n"
		"2: la    %4,4095(%1)\n"/* %4 = ptr + 4095 */
		"   nr    %4,%3\n"	/* %4 = (ptr + 4095) & -4096 */
		"  "SLR"  %4,%1\n"
		"  "CLR"  %0,%4\n"	/* copy crosses next page boundary? */
		"   jnh   5f\n"
		"3: .insn ss,0xc80000000000,0(%4,%2),0(%1),0\n"
		"  "SLR"  %0,%4\n"
		"   j     5f\n"
		"4:"SLR"  %0,%0\n"
		"5: \n"
		EX_TABLE(0b,2b) EX_TABLE(3b,5b)
		: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
		: "d" (reg0) : "cc", "memory");
	return size;
}

size_t copy_to_user_mvcos(size_t size, void __user *ptr, const void *x)
{
	register unsigned long reg0 asm("0") = 0x810000UL;
	unsigned long tmp1, tmp2;

	tmp1 = -4096UL;
	asm volatile(
		"0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
		"   jz    4f\n"
		"1:"ALR"  %0,%3\n"
		"  "SLR"  %1,%3\n"
		"  "SLR"  %2,%3\n"
		"   j     0b\n"
		"2: la    %4,4095(%1)\n"/* %4 = ptr + 4095 */
		"   nr    %4,%3\n"	/* %4 = (ptr + 4095) & -4096 */
		"  "SLR"  %4,%1\n"
		"  "CLR"  %0,%4\n"	/* copy crosses next page boundary? */
		"   jnh   5f\n"
		"3: .insn ss,0xc80000000000,0(%4,%1),0(%2),0\n"
		"  "SLR"  %0,%4\n"
		"   j     5f\n"
		"4:"SLR"  %0,%0\n"
		"5: \n"
		EX_TABLE(0b,2b) EX_TABLE(3b,5b)
		: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
		: "d" (reg0) : "cc", "memory");
	return size;
}

size_t copy_in_user_mvcos(size_t size, void __user *to, const void __user *from)
{
	register unsigned long reg0 asm("0") = 0x810081UL;
	unsigned long tmp1, tmp2;

	tmp1 = -4096UL;
	/* FIXME: copy with reduced length. */
	asm volatile(
		"0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
		"   jz    2f\n"
		"1:"ALR"  %0,%3\n"
		"  "SLR"  %1,%3\n"
		"  "SLR"  %2,%3\n"
		"   j     0b\n"
		"2:"SLR"  %0,%0\n"
		"3: \n"
		EX_TABLE(0b,3b)
		: "+a" (size), "+a" (to), "+a" (from), "+a" (tmp1), "=a" (tmp2)
		: "d" (reg0) : "cc", "memory");
	return size;
}

size_t clear_user_mvcos(size_t size, void __user *to)
{
	register unsigned long reg0 asm("0") = 0x810000UL;
	unsigned long tmp1, tmp2;

	tmp1 = -4096UL;
	asm volatile(
		"0: .insn ss,0xc80000000000,0(%0,%1),0(%4),0\n"
		"   jz    4f\n"
		"1:"ALR"  %0,%2\n"
		"  "SLR"  %1,%2\n"
		"   j     0b\n"
		"2: la    %3,4095(%1)\n"/* %4 = to + 4095 */
		"   nr    %3,%2\n"	/* %4 = (to + 4095) & -4096 */
		"  "SLR"  %3,%1\n"
		"  "CLR"  %0,%3\n"	/* copy crosses next page boundary? */
		"   jnh   5f\n"
		"3: .insn ss,0xc80000000000,0(%3,%1),0(%4),0\n"
		"  "SLR"  %0,%3\n"
		"   j     5f\n"
		"4:"SLR"  %0,%0\n"
		"5: \n"
		EX_TABLE(0b,2b) EX_TABLE(3b,5b)
		: "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2)
		: "a" (empty_zero_page), "d" (reg0) : "cc", "memory");
	return size;
}

extern size_t copy_from_user_std_small(size_t, const void __user *, void *);
extern size_t copy_to_user_std_small(size_t, void __user *, const void *);
extern size_t strnlen_user_std(size_t, const char __user *);
extern size_t strncpy_from_user_std(size_t, const char __user *, char *);
extern int futex_atomic_op(int, int __user *, int, int *);
extern int futex_atomic_cmpxchg(int __user *, int, int);

struct uaccess_ops uaccess_mvcos = {
	.copy_from_user = copy_from_user_mvcos,
	.copy_from_user_small = copy_from_user_std_small,
	.copy_to_user = copy_to_user_mvcos,
	.copy_to_user_small = copy_to_user_std_small,
	.copy_in_user = copy_in_user_mvcos,
	.clear_user = clear_user_mvcos,
	.strnlen_user = strnlen_user_std,
	.strncpy_from_user = strncpy_from_user_std,
	.futex_atomic_op = futex_atomic_op,
	.futex_atomic_cmpxchg = futex_atomic_cmpxchg,
};