From 6e41c585e38ff696de3a11509a0ad0a11150b0c3 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 22 Jul 2020 22:14:36 -0400 Subject: unify generic instances of csum_partial_copy_nocheck() quite a few architectures have the same csum_partial_copy_nocheck() - simply memcpy() the data and then return the csum of the copy. hexagon, parisc, ia64, s390, um: explicitly spelled out that way. arc, arm64, csky, h8300, m68k/nommu, microblaze, mips/GENERIC_CSUM, nds32, nios2, openrisc, riscv, unicore32: end up picking the same thing spelled out in lib/checksum.h (with varying amounts of perversions along the way). everybody else (alpha, arm, c6x, m68k/mmu, mips/!GENERIC_CSUM, powerpc, sh, sparc, x86, xtensa) have non-generic variants. For all except c6x the declaration is in their asm/checksum.h. c6x uses the wrapper from asm-generic/checksum.h that would normally lead to the lib/checksum.h instance, but in case of c6x we end up using an asm function from arch/c6x instead. Screw that mess - have architectures with private instances define _HAVE_ARCH_CSUM_AND_COPY in their asm/checksum.h and have the default one right in net/checksum.h conditional on _HAVE_ARCH_CSUM_AND_COPY *not* defined. Signed-off-by: Al Viro --- arch/powerpc/include/asm/checksum.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/powerpc') diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h index 9cce06194dcc..d75fc5bf8f37 100644 --- a/arch/powerpc/include/asm/checksum.h +++ b/arch/powerpc/include/asm/checksum.h @@ -29,6 +29,7 @@ extern __wsum csum_and_copy_from_user(const void __user *src, void *dst, extern __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len, __wsum sum, int *err_ptr); +#define _HAVE_ARCH_CSUM_AND_COPY #define csum_partial_copy_nocheck(src, dst, len, sum) \ csum_partial_copy_generic((src), (dst), (len), (sum), NULL, NULL) -- cgit From cc44c17baf7f3f833d36b2f2a1edb1cc0b6f2cc4 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 11 Jul 2020 00:12:07 -0400 Subject: csum_partial_copy_nocheck(): drop the last argument It's always 0. Note that we theoretically could use ~0U as well - result will be the same modulo 0xffff, _if_ the damn thing did the right thing for any value of initial sum; later we'll make use of that when convenient. However, unlike csum_and_copy_..._user(), there are instances that did not work for arbitrary initial sums; c6x is one such. Signed-off-by: Al Viro --- arch/powerpc/include/asm/checksum.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h index d75fc5bf8f37..64299785f639 100644 --- a/arch/powerpc/include/asm/checksum.h +++ b/arch/powerpc/include/asm/checksum.h @@ -30,8 +30,8 @@ extern __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len, __wsum sum, int *err_ptr); #define _HAVE_ARCH_CSUM_AND_COPY -#define csum_partial_copy_nocheck(src, dst, len, sum) \ - csum_partial_copy_generic((src), (dst), (len), (sum), NULL, NULL) +#define csum_partial_copy_nocheck(src, dst, len) \ + csum_partial_copy_generic((src), (dst), (len), 0, NULL, NULL) /* -- cgit From c693cc4676a055c4126e487b30b0a96ea7ec9936 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 11 Jul 2020 00:27:49 -0400 Subject: saner calling conventions for csum_and_copy_..._user() All callers of these primitives will * discard anything we might've copied in case of error * ignore the csum value in case of error * always pass 0xffffffff as the initial sum, so the resulting csum value (in case of success, that is) will never be 0. That suggest the following calling conventions: * don't pass err_ptr - just return 0 on error. * don't bother with zeroing destination, etc. in case of error * don't pass the initial sum - just use 0xffffffff. This commit does the minimal conversion in the instances of csum_and_copy_...(); the changes of actual asm code behind them are done later in the series. Note that this asm code is often shared with csum_partial_copy_nocheck(); the difference is that csum_partial_copy_nocheck() passes 0 for initial sum while csum_and_copy_..._user() pass 0xffffffff. Fortunately, we are free to pass 0xffffffff in all cases and subsequent patches will use that freedom without any special comments. A part that could be split off: parisc and uml/i386 claimed to have csum_and_copy_to_user() instances of their own, but those were identical to the generic one, so we simply drop them. Not sure if it's worth a separate commit... Signed-off-by: Al Viro --- arch/powerpc/include/asm/checksum.h | 4 +-- arch/powerpc/lib/checksum_wrappers.c | 68 +++++++++++------------------------- 2 files changed, 23 insertions(+), 49 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h index 64299785f639..dba685d984c0 100644 --- a/arch/powerpc/include/asm/checksum.h +++ b/arch/powerpc/include/asm/checksum.h @@ -24,10 +24,10 @@ extern __wsum csum_partial_copy_generic(const void *src, void *dst, #define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER extern __wsum csum_and_copy_from_user(const void __user *src, void *dst, - int len, __wsum sum, int *err_ptr); + int len); #define HAVE_CSUM_COPY_USER extern __wsum csum_and_copy_to_user(const void *src, void __user *dst, - int len, __wsum sum, int *err_ptr); + int len); #define _HAVE_ARCH_CSUM_AND_COPY #define csum_partial_copy_nocheck(src, dst, len) \ diff --git a/arch/powerpc/lib/checksum_wrappers.c b/arch/powerpc/lib/checksum_wrappers.c index fabe4db28726..b1faa82dd8af 100644 --- a/arch/powerpc/lib/checksum_wrappers.c +++ b/arch/powerpc/lib/checksum_wrappers.c @@ -12,82 +12,56 @@ #include __wsum csum_and_copy_from_user(const void __user *src, void *dst, - int len, __wsum sum, int *err_ptr) + int len) { unsigned int csum; + int err = 0; might_sleep(); - allow_read_from_user(src, len); - - *err_ptr = 0; - if (!len) { - csum = 0; - goto out; - } + if (unlikely(!access_ok(src, len))) + return 0; - if (unlikely((len < 0) || !access_ok(src, len))) { - *err_ptr = -EFAULT; - csum = (__force unsigned int)sum; - goto out; - } + allow_read_from_user(src, len); csum = csum_partial_copy_generic((void __force *)src, dst, - len, sum, err_ptr, NULL); + len, ~0U, &err, NULL); - if (unlikely(*err_ptr)) { + if (unlikely(err)) { int missing = __copy_from_user(dst, src, len); - if (missing) { - memset(dst + len - missing, 0, missing); - *err_ptr = -EFAULT; - } else { - *err_ptr = 0; - } - - csum = csum_partial(dst, len, sum); + if (missing) + csum = 0; + else + csum = csum_partial(dst, len, ~0U); } -out: prevent_read_from_user(src, len); return (__force __wsum)csum; } EXPORT_SYMBOL(csum_and_copy_from_user); -__wsum csum_and_copy_to_user(const void *src, void __user *dst, int len, - __wsum sum, int *err_ptr) +__wsum csum_and_copy_to_user(const void *src, void __user *dst, int len) { unsigned int csum; + int err = 0; might_sleep(); - allow_write_to_user(dst, len); - - *err_ptr = 0; - - if (!len) { - csum = 0; - goto out; - } + if (unlikely(!access_ok(dst, len))) + return 0; - if (unlikely((len < 0) || !access_ok(dst, len))) { - *err_ptr = -EFAULT; - csum = -1; /* invalid checksum */ - goto out; - } + allow_write_to_user(dst, len); csum = csum_partial_copy_generic(src, (void __force *)dst, - len, sum, NULL, err_ptr); + len, ~0U, NULL, &err); - if (unlikely(*err_ptr)) { - csum = csum_partial(src, len, sum); + if (unlikely(err)) { + csum = csum_partial(src, len, ~0U); - if (copy_to_user(dst, src, len)) { - *err_ptr = -EFAULT; - csum = -1; /* invalid checksum */ - } + if (copy_to_user(dst, src, len)) + csum = 0; } -out: prevent_write_to_user(dst, len); return (__force __wsum)csum; } -- cgit From 70d65cd555c5e43c613700f604a47f7ebcf7b6f1 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 20 Jul 2020 10:09:24 -0400 Subject: ppc: propagate the calling conventions change down to csum_partial_copy_generic() ... and get rid of the pointless fallback in the wrappers. On error it used to zero the unwritten area and calculate the csum of the entire thing. Not wanting to do it in assembler part had been very reasonable; doing that in the first place, OTOH... In case of an error the caller discards the data we'd copied, along with whatever checksum it might've had. Signed-off-by: Al Viro --- arch/powerpc/include/asm/checksum.h | 6 +-- arch/powerpc/lib/checksum_32.S | 74 +++++++++++++----------------------- arch/powerpc/lib/checksum_64.S | 37 ++++++------------ arch/powerpc/lib/checksum_wrappers.c | 32 +++------------- 4 files changed, 46 insertions(+), 103 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h index dba685d984c0..82f099ba2411 100644 --- a/arch/powerpc/include/asm/checksum.h +++ b/arch/powerpc/include/asm/checksum.h @@ -18,9 +18,7 @@ * Like csum_partial, this must be called with even lengths, * except for the last fragment. */ -extern __wsum csum_partial_copy_generic(const void *src, void *dst, - int len, __wsum sum, - int *src_err, int *dst_err); +extern __wsum csum_partial_copy_generic(const void *src, void *dst, int len); #define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER extern __wsum csum_and_copy_from_user(const void __user *src, void *dst, @@ -31,7 +29,7 @@ extern __wsum csum_and_copy_to_user(const void *src, void __user *dst, #define _HAVE_ARCH_CSUM_AND_COPY #define csum_partial_copy_nocheck(src, dst, len) \ - csum_partial_copy_generic((src), (dst), (len), 0, NULL, NULL) + csum_partial_copy_generic((src), (dst), (len)) /* diff --git a/arch/powerpc/lib/checksum_32.S b/arch/powerpc/lib/checksum_32.S index ecd150dc3ed9..ec5cd2dede35 100644 --- a/arch/powerpc/lib/checksum_32.S +++ b/arch/powerpc/lib/checksum_32.S @@ -78,12 +78,10 @@ EXPORT_SYMBOL(__csum_partial) /* * Computes the checksum of a memory block at src, length len, - * and adds in "sum" (32-bit), while copying the block to dst. - * If an access exception occurs on src or dst, it stores -EFAULT - * to *src_err or *dst_err respectively, and (for an error on - * src) zeroes the rest of dst. + * and adds in 0xffffffff, while copying the block to dst. + * If an access exception occurs it returns zero. * - * csum_partial_copy_generic(src, dst, len, sum, src_err, dst_err) + * csum_partial_copy_generic(src, dst, len) */ #define CSUM_COPY_16_BYTES_WITHEX(n) \ 8 ## n ## 0: \ @@ -108,14 +106,14 @@ EXPORT_SYMBOL(__csum_partial) adde r12,r12,r10 #define CSUM_COPY_16_BYTES_EXCODE(n) \ - EX_TABLE(8 ## n ## 0b, src_error); \ - EX_TABLE(8 ## n ## 1b, src_error); \ - EX_TABLE(8 ## n ## 2b, src_error); \ - EX_TABLE(8 ## n ## 3b, src_error); \ - EX_TABLE(8 ## n ## 4b, dst_error); \ - EX_TABLE(8 ## n ## 5b, dst_error); \ - EX_TABLE(8 ## n ## 6b, dst_error); \ - EX_TABLE(8 ## n ## 7b, dst_error); + EX_TABLE(8 ## n ## 0b, fault); \ + EX_TABLE(8 ## n ## 1b, fault); \ + EX_TABLE(8 ## n ## 2b, fault); \ + EX_TABLE(8 ## n ## 3b, fault); \ + EX_TABLE(8 ## n ## 4b, fault); \ + EX_TABLE(8 ## n ## 5b, fault); \ + EX_TABLE(8 ## n ## 6b, fault); \ + EX_TABLE(8 ## n ## 7b, fault); .text .stabs "arch/powerpc/lib/",N_SO,0,0,0f @@ -127,11 +125,8 @@ LG_CACHELINE_BYTES = L1_CACHE_SHIFT CACHELINE_MASK = (L1_CACHE_BYTES-1) _GLOBAL(csum_partial_copy_generic) - stwu r1,-16(r1) - stw r7,12(r1) - stw r8,8(r1) - - addic r12,r6,0 + li r12,-1 + addic r0,r0,0 /* clear carry */ addi r6,r4,-4 neg r0,r4 addi r4,r3,-4 @@ -246,34 +241,19 @@ _GLOBAL(csum_partial_copy_generic) rlwinm r3,r3,8,0,31 /* odd destination address: rotate one byte */ blr -/* read fault */ -src_error: - lwz r7,12(r1) - addi r1,r1,16 - cmpwi cr0,r7,0 - beqlr - li r0,-EFAULT - stw r0,0(r7) - blr -/* write fault */ -dst_error: - lwz r8,8(r1) - addi r1,r1,16 - cmpwi cr0,r8,0 - beqlr - li r0,-EFAULT - stw r0,0(r8) +fault: + li r3,0 blr - EX_TABLE(70b, src_error); - EX_TABLE(71b, dst_error); - EX_TABLE(72b, src_error); - EX_TABLE(73b, dst_error); - EX_TABLE(54b, dst_error); + EX_TABLE(70b, fault); + EX_TABLE(71b, fault); + EX_TABLE(72b, fault); + EX_TABLE(73b, fault); + EX_TABLE(54b, fault); /* * this stuff handles faults in the cacheline loop and branches to either - * src_error (if in read part) or dst_error (if in write part) + * fault (if in read part) or fault (if in write part) */ CSUM_COPY_16_BYTES_EXCODE(0) #if L1_CACHE_BYTES >= 32 @@ -290,12 +270,12 @@ dst_error: #endif #endif - EX_TABLE(30b, src_error); - EX_TABLE(31b, dst_error); - EX_TABLE(40b, src_error); - EX_TABLE(41b, dst_error); - EX_TABLE(50b, src_error); - EX_TABLE(51b, dst_error); + EX_TABLE(30b, fault); + EX_TABLE(31b, fault); + EX_TABLE(40b, fault); + EX_TABLE(41b, fault); + EX_TABLE(50b, fault); + EX_TABLE(51b, fault); EXPORT_SYMBOL(csum_partial_copy_generic) diff --git a/arch/powerpc/lib/checksum_64.S b/arch/powerpc/lib/checksum_64.S index 514978f908d4..98ff51bd2f7d 100644 --- a/arch/powerpc/lib/checksum_64.S +++ b/arch/powerpc/lib/checksum_64.S @@ -182,34 +182,33 @@ EXPORT_SYMBOL(__csum_partial) .macro srcnr 100: - EX_TABLE(100b,.Lsrc_error_nr) + EX_TABLE(100b,.Lerror_nr) .endm .macro source 150: - EX_TABLE(150b,.Lsrc_error) + EX_TABLE(150b,.Lerror) .endm .macro dstnr 200: - EX_TABLE(200b,.Ldest_error_nr) + EX_TABLE(200b,.Lerror_nr) .endm .macro dest 250: - EX_TABLE(250b,.Ldest_error) + EX_TABLE(250b,.Lerror) .endm /* * Computes the checksum of a memory block at src, length len, - * and adds in "sum" (32-bit), while copying the block to dst. - * If an access exception occurs on src or dst, it stores -EFAULT - * to *src_err or *dst_err respectively. The caller must take any action - * required in this case (zeroing memory, recalculating partial checksum etc). + * and adds in 0xffffffff (32-bit), while copying the block to dst. + * If an access exception occurs, it returns 0. * - * csum_partial_copy_generic(r3=src, r4=dst, r5=len, r6=sum, r7=src_err, r8=dst_err) + * csum_partial_copy_generic(r3=src, r4=dst, r5=len) */ _GLOBAL(csum_partial_copy_generic) + li r6,-1 addic r0,r6,0 /* clear carry */ srdi. r6,r5,3 /* less than 8 bytes? */ @@ -401,29 +400,15 @@ dstnr; stb r6,0(r4) srdi r3,r3,32 blr -.Lsrc_error: +.Lerror: ld r14,STK_REG(R14)(r1) ld r15,STK_REG(R15)(r1) ld r16,STK_REG(R16)(r1) addi r1,r1,STACKFRAMESIZE -.Lsrc_error_nr: - cmpdi 0,r7,0 - beqlr - li r6,-EFAULT - stw r6,0(r7) +.Lerror_nr: + li r3,0 blr -.Ldest_error: - ld r14,STK_REG(R14)(r1) - ld r15,STK_REG(R15)(r1) - ld r16,STK_REG(R16)(r1) - addi r1,r1,STACKFRAMESIZE -.Ldest_error_nr: - cmpdi 0,r8,0 - beqlr - li r6,-EFAULT - stw r6,0(r8) - blr EXPORT_SYMBOL(csum_partial_copy_generic) /* diff --git a/arch/powerpc/lib/checksum_wrappers.c b/arch/powerpc/lib/checksum_wrappers.c index b1faa82dd8af..b895166afc82 100644 --- a/arch/powerpc/lib/checksum_wrappers.c +++ b/arch/powerpc/lib/checksum_wrappers.c @@ -14,8 +14,7 @@ __wsum csum_and_copy_from_user(const void __user *src, void *dst, int len) { - unsigned int csum; - int err = 0; + __wsum csum; might_sleep(); @@ -24,27 +23,16 @@ __wsum csum_and_copy_from_user(const void __user *src, void *dst, allow_read_from_user(src, len); - csum = csum_partial_copy_generic((void __force *)src, dst, - len, ~0U, &err, NULL); - - if (unlikely(err)) { - int missing = __copy_from_user(dst, src, len); - - if (missing) - csum = 0; - else - csum = csum_partial(dst, len, ~0U); - } + csum = csum_partial_copy_generic((void __force *)src, dst, len); prevent_read_from_user(src, len); - return (__force __wsum)csum; + return csum; } EXPORT_SYMBOL(csum_and_copy_from_user); __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len) { - unsigned int csum; - int err = 0; + __wsum csum; might_sleep(); if (unlikely(!access_ok(dst, len))) @@ -52,17 +40,9 @@ __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len) allow_write_to_user(dst, len); - csum = csum_partial_copy_generic(src, (void __force *)dst, - len, ~0U, NULL, &err); - - if (unlikely(err)) { - csum = csum_partial(src, len, ~0U); - - if (copy_to_user(dst, src, len)) - csum = 0; - } + csum = csum_partial_copy_generic(src, (void __force *)dst, len); prevent_write_to_user(dst, len); - return (__force __wsum)csum; + return csum; } EXPORT_SYMBOL(csum_and_copy_to_user); -- cgit