diff options
Diffstat (limited to 'arch/powerpc/include/asm/checksum.h')
| -rw-r--r-- | arch/powerpc/include/asm/checksum.h | 72 |
1 files changed, 37 insertions, 35 deletions
diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h index 842124b199b5..4b573a3b7e17 100644 --- a/arch/powerpc/include/asm/checksum.h +++ b/arch/powerpc/include/asm/checksum.h @@ -1,17 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ #ifndef _ASM_POWERPC_CHECKSUM_H #define _ASM_POWERPC_CHECKSUM_H #ifdef __KERNEL__ /* - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. */ -#ifdef CONFIG_GENERIC_CSUM -#include <asm-generic/checksum.h> -#else +#include <linux/bitops.h> +#include <linux/in6.h> /* * Computes the checksum of a memory block at src, length len, * and adds in "sum" (32-bit), while copying the block to dst. @@ -22,19 +18,18 @@ * Like csum_partial, this must be called with even lengths, * except for the last fragment. */ -extern __wsum csum_partial_copy_generic(const void *src, void *dst, - int len, __wsum sum, - int *src_err, int *dst_err); +extern __wsum csum_partial_copy_generic(const void *src, void *dst, int len); #define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER extern __wsum csum_and_copy_from_user(const void __user *src, void *dst, - int len, __wsum sum, int *err_ptr); + int len); #define HAVE_CSUM_COPY_USER extern __wsum csum_and_copy_to_user(const void *src, void __user *dst, - int len, __wsum sum, int *err_ptr); + int len); -#define csum_partial_copy_nocheck(src, dst, len, sum) \ - csum_partial_copy_generic((src), (dst), (len), (sum), NULL, NULL) +#define _HAVE_ARCH_CSUM_AND_COPY +#define csum_partial_copy_nocheck(src, dst, len) \ + csum_partial_copy_generic((src), (dst), (len)) /* @@ -43,23 +38,20 @@ extern __wsum csum_and_copy_to_user(const void *src, void __user *dst, */ static inline __sum16 csum_fold(__wsum sum) { - unsigned int tmp; - - /* swap the two 16-bit halves of sum */ - __asm__("rlwinm %0,%1,16,0,31" : "=r" (tmp) : "r" (sum)); - /* if there is a carry from adding the two 16-bit halves, - it will carry from the lower half into the upper half, - giving us the correct sum in the upper half. */ - return (__force __sum16)(~((__force u32)sum + tmp) >> 16); + u32 tmp = (__force u32)sum; + + /* + * swap the two 16-bit halves of sum + * if there is a carry from adding the two 16-bit halves, + * it will carry from the lower half into the upper half, + * giving us the correct sum in the upper half. + */ + return (__force __sum16)(~(tmp + rol32(tmp, 16)) >> 16); } static inline u32 from64to32(u64 x) { - /* add up 32-bit and 32-bit for 32+c bit */ - x = (x & 0xffffffff) + (x >> 32); - /* add up carry.. */ - x = (x & 0xffffffff) + (x >> 32); - return (u32)x; + return (x + ror64(x, 32)) >> 32; } static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len, @@ -100,20 +92,19 @@ static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len, } #define HAVE_ARCH_CSUM_ADD -static inline __wsum csum_add(__wsum csum, __wsum addend) +static __always_inline __wsum csum_add(__wsum csum, __wsum addend) { #ifdef __powerpc64__ u64 res = (__force u64)csum; -#endif + + res += (__force u64)addend; + return (__force __wsum)((u32)res + (res >> 32)); +#else if (__builtin_constant_p(csum) && csum == 0) return addend; if (__builtin_constant_p(addend) && addend == 0) return csum; -#ifdef __powerpc64__ - res += (__force u64)addend; - return (__force __wsum) from64to32(res); -#else asm("addc %0,%0,%1;" "addze %0,%0;" : "+r" (csum) : "r" (addend) : "xer"); @@ -121,6 +112,13 @@ static inline __wsum csum_add(__wsum csum, __wsum addend) #endif } +#define HAVE_ARCH_CSUM_SHIFT +static __always_inline __wsum csum_shift(__wsum sum, int offset) +{ + /* rotate sum to align it with a 16b boundary */ + return (__force __wsum)rol32((__force u32)sum, (offset & 1) << 3); +} + /* * This is a version of ip_compute_csum() optimized for IP headers, * which always checksum on 4 octet boundaries. ihl is the number @@ -172,7 +170,7 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) */ __wsum __csum_partial(const void *buff, int len, __wsum sum); -static inline __wsum csum_partial(const void *buff, int len, __wsum sum) +static __always_inline __wsum csum_partial(const void *buff, int len, __wsum sum) { if (__builtin_constant_p(len) && len <= 16 && (len & 1) == 0) { if (len == 2) @@ -214,6 +212,10 @@ static inline __sum16 ip_compute_csum(const void *buff, int len) return csum_fold(csum_partial(buff, len, 0)); } -#endif +#define _HAVE_ARCH_IPV6_CSUM +__sum16 csum_ipv6_magic(const struct in6_addr *saddr, + const struct in6_addr *daddr, + __u32 len, __u8 proto, __wsum sum); + #endif /* __KERNEL__ */ #endif |
