summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2016-10-14 20:42:44 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2016-10-14 20:42:44 -0400
commit2692a71bbd40160165e89d5505c5c28144ec5a42 (patch)
treeacaaf0f1a4942486109094acad37c29456aeda98 /arch
parent7041c57709efdc1e31aaff663cfe17f0b21f4743 (diff)
parentb065444286bed7ec49ee81c593a46f3031fbfc83 (diff)
Merge branch 'work.uaccess' into for-linus
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/include/asm/uaccess.h9
-rw-r--r--arch/alpha/lib/copy_user.S16
-rw-r--r--arch/alpha/lib/ev6-copy_user.S23
-rw-r--r--arch/arc/kernel/signal.c8
-rw-r--r--arch/arm/include/asm/uaccess.h11
-rw-r--r--arch/arm/lib/copy_from_user.S9
-rw-r--r--arch/arm64/include/asm/uaccess.h10
-rw-r--r--arch/arm64/lib/copy_from_user.S7
-rw-r--r--arch/blackfin/include/asm/uaccess.h32
9 files changed, 47 insertions, 78 deletions
diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h
index 466e42e96bfa..94f587535dee 100644
--- a/arch/alpha/include/asm/uaccess.h
+++ b/arch/alpha/include/asm/uaccess.h
@@ -396,11 +396,12 @@ copy_to_user(void __user *to, const void *from, long n)
extern inline long
copy_from_user(void *to, const void __user *from, long n)
{
+ long res = n;
if (likely(__access_ok((unsigned long)from, n, get_fs())))
- n = __copy_tofrom_user_nocheck(to, (__force void *)from, n);
- else
- memset(to, 0, n);
- return n;
+ res = __copy_from_user_inatomic(to, from, n);
+ if (unlikely(res))
+ memset(to + (n - res), 0, res);
+ return res;
}
extern void __do_clear_user(void);
diff --git a/arch/alpha/lib/copy_user.S b/arch/alpha/lib/copy_user.S
index 6f3fab9eb434..ac9c3766ba8c 100644
--- a/arch/alpha/lib/copy_user.S
+++ b/arch/alpha/lib/copy_user.S
@@ -124,22 +124,8 @@ $65:
bis $31,$31,$0
$41:
$35:
-$exitout:
- ret $31,($28),1
-
$exitin:
- /* A stupid byte-by-byte zeroing of the rest of the output
- buffer. This cures security holes by never leaving
- random kernel data around to be copied elsewhere. */
-
- mov $0,$1
-$101:
- EXO ( ldq_u $2,0($6) )
- subq $1,1,$1
- mskbl $2,$6,$2
- EXO ( stq_u $2,0($6) )
- addq $6,1,$6
- bgt $1,$101
+$exitout:
ret $31,($28),1
.end __copy_user
diff --git a/arch/alpha/lib/ev6-copy_user.S b/arch/alpha/lib/ev6-copy_user.S
index db42ffe9c350..c4d0689c3d26 100644
--- a/arch/alpha/lib/ev6-copy_user.S
+++ b/arch/alpha/lib/ev6-copy_user.S
@@ -227,33 +227,12 @@ $dirtyentry:
bgt $0,$onebyteloop # U .. .. .. : U L U L
$zerolength:
+$exitin:
$exitout: # Destination for exception recovery(?)
nop # .. .. .. E
nop # .. .. E ..
nop # .. E .. ..
ret $31,($28),1 # L0 .. .. .. : L U L U
-$exitin:
-
- /* A stupid byte-by-byte zeroing of the rest of the output
- buffer. This cures security holes by never leaving
- random kernel data around to be copied elsewhere. */
-
- nop
- nop
- nop
- mov $0,$1
-
-$101:
- EXO ( stb $31,0($6) ) # L
- subq $1,1,$1 # E
- addq $6,1,$6 # E
- bgt $1,$101 # U
-
- nop
- nop
- nop
- ret $31,($28),1 # L0
-
.end __copy_user
diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
index 6cb3736b6b83..d347bbc086fe 100644
--- a/arch/arc/kernel/signal.c
+++ b/arch/arc/kernel/signal.c
@@ -107,13 +107,13 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
struct user_regs_struct uregs;
err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
- if (!err)
- set_current_blocked(&set);
-
err |= __copy_from_user(&uregs.scratch,
&(sf->uc.uc_mcontext.regs.scratch),
sizeof(sf->uc.uc_mcontext.regs.scratch));
+ if (err)
+ return err;
+ set_current_blocked(&set);
regs->bta = uregs.scratch.bta;
regs->lp_start = uregs.scratch.lp_start;
regs->lp_end = uregs.scratch.lp_end;
@@ -138,7 +138,7 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
regs->r0 = uregs.scratch.r0;
regs->sp = uregs.scratch.sp;
- return err;
+ return 0;
}
static inline int is_do_ss_needed(unsigned int magic)
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index a93c0f99acf7..1f59ea051bab 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -533,11 +533,12 @@ __clear_user(void __user *addr, unsigned long n)
static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
{
- if (access_ok(VERIFY_READ, from, n))
- n = __copy_from_user(to, from, n);
- else /* security hole - plug it */
- memset(to, 0, n);
- return n;
+ unsigned long res = n;
+ if (likely(access_ok(VERIFY_READ, from, n)))
+ res = __copy_from_user(to, from, n);
+ if (unlikely(res))
+ memset(to + (n - res), 0, res);
+ return res;
}
static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
index 1512bebfbf1b..7a4b06049001 100644
--- a/arch/arm/lib/copy_from_user.S
+++ b/arch/arm/lib/copy_from_user.S
@@ -98,12 +98,9 @@ ENDPROC(arm_copy_from_user)
.pushsection .fixup,"ax"
.align 0
copy_abort_preamble
- ldmfd sp!, {r1, r2}
- sub r3, r0, r1
- rsb r1, r3, r2
- str r1, [sp]
- bl __memzero
- ldr r0, [sp], #4
+ ldmfd sp!, {r1, r2, r3}
+ sub r0, r0, r1
+ rsb r0, r0, r2
copy_abort_end
.popsection
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index c47257c91b77..bcaf6fba1b65 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -278,14 +278,16 @@ static inline unsigned long __must_check __copy_to_user(void __user *to, const v
static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
{
+ unsigned long res = n;
kasan_check_write(to, n);
if (access_ok(VERIFY_READ, from, n)) {
check_object_size(to, n, false);
- n = __arch_copy_from_user(to, from, n);
- } else /* security hole - plug it */
- memset(to, 0, n);
- return n;
+ res = __arch_copy_from_user(to, from, n);
+ }
+ if (unlikely(res))
+ memset(to + (n - res), 0, res);
+ return res;
}
static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
index 0b90497d4424..4fd67ea03bb0 100644
--- a/arch/arm64/lib/copy_from_user.S
+++ b/arch/arm64/lib/copy_from_user.S
@@ -79,11 +79,6 @@ ENDPROC(__arch_copy_from_user)
.section .fixup,"ax"
.align 2
-9998:
- sub x0, end, dst
-9999:
- strb wzr, [dst], #1 // zero remaining buffer space
- cmp dst, end
- b.lo 9999b
+9998: sub x0, end, dst // bytes not copied
ret
.previous
diff --git a/arch/blackfin/include/asm/uaccess.h b/arch/blackfin/include/asm/uaccess.h
index 0a2a70096d8b..0eff88aa6d6a 100644
--- a/arch/blackfin/include/asm/uaccess.h
+++ b/arch/blackfin/include/asm/uaccess.h
@@ -163,18 +163,29 @@ static inline int bad_user_access_length(void)
: "a" (__ptr(ptr))); \
})
-#define __copy_from_user(to, from, n) copy_from_user(to, from, n)
-#define __copy_to_user(to, from, n) copy_to_user(to, from, n)
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
static inline unsigned long __must_check
+__copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ memcpy(to, (const void __force *)from, n);
+ return 0;
+}
+
+static inline unsigned long __must_check
+__copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ memcpy((void __force *)to, from, n);
+ SSYNC();
+ return 0;
+}
+
+static inline unsigned long __must_check
copy_from_user(void *to, const void __user *from, unsigned long n)
{
- if (likely(access_ok(VERIFY_READ, from, n))) {
- memcpy(to, (const void __force *)from, n);
- return 0;
- }
+ if (likely(access_ok(VERIFY_READ, from, n)))
+ return __copy_from_user(to, from, n);
memset(to, 0, n);
return n;
}
@@ -182,12 +193,9 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
static inline unsigned long __must_check
copy_to_user(void __user *to, const void *from, unsigned long n)
{
- if (access_ok(VERIFY_WRITE, to, n))
- memcpy((void __force *)to, from, n);
- else
- return n;
- SSYNC();
- return 0;
+ if (likely(access_ok(VERIFY_WRITE, to, n)))
+ return __copy_to_user(to, from, n);
+ return n;
}
/*