summaryrefslogtreecommitdiff
path: root/arch/mips/include/asm/checksum.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/include/asm/checksum.h')
-rw-r--r--arch/mips/include/asm/checksum.h173
1 files changed, 83 insertions, 90 deletions
diff --git a/arch/mips/include/asm/checksum.h b/arch/mips/include/asm/checksum.h
index ac3d2b8a20d4..0921ddda11a4 100644
--- a/arch/mips/include/asm/checksum.h
+++ b/arch/mips/include/asm/checksum.h
@@ -7,13 +7,18 @@
* Copyright (C) 1999 Silicon Graphics, Inc.
* Copyright (C) 2001 Thiemo Seufer.
* Copyright (C) 2002 Maciej W. Rozycki
+ * Copyright (C) 2014 Imagination Technologies Ltd.
*/
#ifndef _ASM_CHECKSUM_H
#define _ASM_CHECKSUM_H
+#ifdef CONFIG_GENERIC_CSUM
+#include <asm-generic/checksum.h>
+#else
+
#include <linux/in6.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
/*
* computes the checksum of a memory block at buff, length len,
@@ -29,20 +34,17 @@
*/
__wsum csum_partial(const void *buff, int len, __wsum sum);
-__wsum __csum_partial_copy_user(const void *src, void *dst,
- int len, __wsum sum, int *err_ptr);
+__wsum __csum_partial_copy_from_user(const void __user *src, void *dst, int len);
+__wsum __csum_partial_copy_to_user(const void *src, void __user *dst, int len);
-/*
- * this is a new version of the above that records errors it finds in *errp,
- * but continues and zeros the rest of the buffer.
- */
+#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
static inline
-__wsum csum_partial_copy_from_user(const void __user *src, void *dst, int len,
- __wsum sum, int *err_ptr)
+__wsum csum_and_copy_from_user(const void __user *src, void *dst, int len)
{
might_fault();
- return __csum_partial_copy_user((__force void *)src, dst,
- len, sum, err_ptr);
+ if (!access_ok(src, len))
+ return 0;
+ return __csum_partial_copy_from_user(src, dst, len);
}
/*
@@ -50,46 +52,40 @@ __wsum csum_partial_copy_from_user(const void __user *src, void *dst, int len,
*/
#define HAVE_CSUM_COPY_USER
static inline
-__wsum csum_and_copy_to_user(const void *src, void __user *dst, int len,
- __wsum sum, int *err_ptr)
+__wsum csum_and_copy_to_user(const void *src, void __user *dst, int len)
{
might_fault();
- if (access_ok(VERIFY_WRITE, dst, len))
- return __csum_partial_copy_user(src, (__force void *)dst,
- len, sum, err_ptr);
- if (len)
- *err_ptr = -EFAULT;
-
- return (__force __wsum)-1; /* invalid checksum */
+ if (!access_ok(dst, len))
+ return 0;
+ return __csum_partial_copy_to_user(src, dst, len);
}
/*
* the same as csum_partial, but copies from user space (but on MIPS
* we have just one address space, so this is identical to the above)
*/
-__wsum csum_partial_copy_nocheck(const void *src, void *dst,
- int len, __wsum sum);
+#define _HAVE_ARCH_CSUM_AND_COPY
+__wsum __csum_partial_copy_nocheck(const void *src, void *dst, int len);
+static inline __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len)
+{
+ return __csum_partial_copy_nocheck(src, dst, len);
+}
/*
* Fold a partial checksum without adding pseudo headers
*/
-static inline __sum16 csum_fold(__wsum sum)
+static inline __sum16 csum_fold(__wsum csum)
{
- __asm__(
- " .set push # csum_fold\n"
- " .set noat \n"
- " sll $1, %0, 16 \n"
- " addu %0, $1 \n"
- " sltu $1, %0, $1 \n"
- " srl %0, %0, 16 \n"
- " addu %0, $1 \n"
- " xori %0, 0xffff \n"
- " .set pop"
- : "=r" (sum)
- : "0" (sum));
+ u32 sum = (__force u32)csum;
- return (__force __sum16)sum;
+ sum += (sum << 16);
+ csum = (__force __wsum)(sum < (__force u32)csum);
+ sum >>= 16;
+ sum += (__force u32)csum;
+
+ return (__force __sum16)~sum;
}
+#define csum_fold csum_fold
/*
* This is a version of ip_compute_csum() optimized for IP headers,
@@ -128,60 +124,51 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
return csum_fold(csum);
}
+#define ip_fast_csum ip_fast_csum
-static inline __wsum csum_tcpudp_nofold(__be32 saddr,
- __be32 daddr, unsigned short len, unsigned short proto,
- __wsum sum)
-{
- __asm__(
- " .set push # csum_tcpudp_nofold\n"
- " .set noat \n"
-#ifdef CONFIG_32BIT
- " addu %0, %2 \n"
- " sltu $1, %0, %2 \n"
- " addu %0, $1 \n"
-
- " addu %0, %3 \n"
- " sltu $1, %0, %3 \n"
- " addu %0, $1 \n"
-
- " addu %0, %4 \n"
- " sltu $1, %0, %4 \n"
- " addu %0, $1 \n"
-#endif
-#ifdef CONFIG_64BIT
- " daddu %0, %2 \n"
- " daddu %0, %3 \n"
- " daddu %0, %4 \n"
- " dsll32 $1, %0, 0 \n"
- " daddu %0, $1 \n"
- " dsra32 %0, %0, 0 \n"
-#endif
- " .set pop"
- : "=r" (sum)
- : "0" ((__force unsigned long)daddr),
- "r" ((__force unsigned long)saddr),
-#ifdef __MIPSEL__
- "r" ((proto + len) << 8),
-#else
- "r" (proto + len),
-#endif
- "r" ((__force unsigned long)sum));
-
- return sum;
-}
-
-/*
- * computes the checksum of the TCP/UDP pseudo-header
- * returns a 16-bit checksum, already complemented
- */
-static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
- unsigned short len,
- unsigned short proto,
- __wsum sum)
+static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
+ __u32 len, __u8 proto,
+ __wsum isum)
{
- return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
+ const unsigned int sh32 = IS_ENABLED(CONFIG_64BIT) ? 32 : 0;
+ unsigned long sum = (__force unsigned long)daddr;
+ unsigned long tmp;
+ __u32 osum;
+
+ tmp = (__force unsigned long)saddr;
+ sum += tmp;
+
+ if (IS_ENABLED(CONFIG_32BIT))
+ sum += sum < tmp;
+
+ /*
+ * We know PROTO + LEN has the sign bit clear, so cast to a signed
+ * type to avoid an extraneous zero-extension where TMP is 64-bit.
+ */
+ tmp = (__s32)(proto + len);
+ tmp <<= IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN) ? 8 : 0;
+ sum += tmp;
+ if (IS_ENABLED(CONFIG_32BIT))
+ sum += sum < tmp;
+
+ tmp = (__force unsigned long)isum;
+ sum += tmp;
+
+ if (IS_ENABLED(CONFIG_32BIT)) {
+ sum += sum < tmp;
+ osum = sum;
+ } else if (IS_ENABLED(CONFIG_64BIT)) {
+ tmp = sum << sh32;
+ sum += tmp;
+ osum = sum < tmp;
+ osum += sum >> sh32;
+ } else {
+ BUILD_BUG();
+ }
+
+ return (__force __wsum)osum;
}
+#define csum_tcpudp_nofold csum_tcpudp_nofold
/*
* this routine is used for miscellaneous IP-like checksums, mainly
@@ -195,9 +182,11 @@ static inline __sum16 ip_compute_csum(const void *buff, int len)
#define _HAVE_ARCH_IPV6_CSUM
static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
const struct in6_addr *daddr,
- __u32 len, unsigned short proto,
+ __u32 len, __u8 proto,
__wsum sum)
{
+ __wsum tmp;
+
__asm__(
" .set push # csum_ipv6_magic\n"
" .set noreorder \n"
@@ -250,11 +239,15 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
" addu %0, $1 # Add final carry\n"
" .set pop"
- : "=r" (sum), "=r" (proto)
+ : "=&r" (sum), "=&r" (tmp)
: "r" (saddr), "r" (daddr),
- "0" (htonl(len)), "1" (htonl(proto)), "r" (sum));
+ "0" (htonl(len)), "r" (htonl(proto)), "r" (sum)
+ : "memory");
return csum_fold(sum);
}
+#include <asm-generic/checksum.h>
+#endif /* CONFIG_GENERIC_CSUM */
+
#endif /* _ASM_CHECKSUM_H */