summaryrefslogtreecommitdiff
path: root/arch/sparc/include/asm/checksum_64.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc/include/asm/checksum_64.h')
-rw-r--r--arch/sparc/include/asm/checksum_64.h71
1 files changed, 24 insertions, 47 deletions
diff --git a/arch/sparc/include/asm/checksum_64.h b/arch/sparc/include/asm/checksum_64.h
index 019b9615e43c..d6b59461e064 100644
--- a/arch/sparc/include/asm/checksum_64.h
+++ b/arch/sparc/include/asm/checksum_64.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __SPARC64_CHECKSUM_H
#define __SPARC64_CHECKSUM_H
@@ -16,7 +17,7 @@
*/
#include <linux/in6.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
/* computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit)
@@ -29,7 +30,7 @@
*
* it's best to have buff aligned on a 32-bit boundary
*/
-extern __wsum csum_partial(const void * buff, int len, __wsum sum);
+__wsum csum_partial(const void * buff, int len, __wsum sum);
/* the same as csum_partial, but copies from user space while it
* checksums
@@ -37,47 +38,14 @@ extern __wsum csum_partial(const void * buff, int len, __wsum sum);
* here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*/
-extern __wsum csum_partial_copy_nocheck(const void *src, void *dst,
- int len, __wsum sum);
-
-extern long __csum_partial_copy_from_user(const void __user *src,
- void *dst, int len,
- __wsum sum);
-
-static inline __wsum
-csum_partial_copy_from_user(const void __user *src,
- void *dst, int len,
- __wsum sum, int *err)
-{
- long ret = __csum_partial_copy_from_user(src, dst, len, sum);
- if (ret < 0)
- *err = -EFAULT;
- return (__force __wsum) ret;
-}
-
-/*
- * Copy and checksum to user
- */
-#define HAVE_CSUM_COPY_USER
-extern long __csum_partial_copy_to_user(const void *src,
- void __user *dst, int len,
- __wsum sum);
-
-static inline __wsum
-csum_and_copy_to_user(const void *src,
- void __user *dst, int len,
- __wsum sum, int *err)
-{
- long ret = __csum_partial_copy_to_user(src, dst, len, sum);
- if (ret < 0)
- *err = -EFAULT;
- return (__force __wsum) ret;
-}
+__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len);
+__wsum csum_and_copy_from_user(const void __user *src, void *dst, int len);
+__wsum csum_and_copy_to_user(const void *src, void __user *dst, int len);
/* ihl is always 5 or greater, almost always is 5, and iph is word aligned
* the majority of the time.
*/
-extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
+__sum16 ip_fast_csum(const void *iph, unsigned int ihl);
/* Fold a partial checksum without adding pseudo headers. */
static inline __sum16 csum_fold(__wsum sum)
@@ -96,9 +64,8 @@ static inline __sum16 csum_fold(__wsum sum)
}
static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
- unsigned int len,
- unsigned short proto,
- __wsum sum)
+ __u32 len, __u8 proto,
+ __wsum sum)
{
__asm__ __volatile__(
" addcc %1, %0, %0\n"
@@ -116,9 +83,8 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
* returns a 16-bit checksum, already complemented
*/
static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
- unsigned short len,
- unsigned short proto,
- __wsum sum)
+ __u32 len, __u8 proto,
+ __wsum sum)
{
return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
}
@@ -127,8 +93,7 @@ static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
const struct in6_addr *daddr,
- __u32 len, unsigned short proto,
- __wsum sum)
+ __u32 len, __u8 proto, __wsum sum)
{
__asm__ __volatile__ (
" addcc %3, %4, %%g7\n"
@@ -164,4 +129,16 @@ static inline __sum16 ip_compute_csum(const void *buff, int len)
return csum_fold(csum_partial(buff, len, 0));
}
+#define HAVE_ARCH_CSUM_ADD
+static inline __wsum csum_add(__wsum csum, __wsum addend)
+{
+ __asm__ __volatile__(
+ "addcc %0, %1, %0\n"
+ "addx %0, %%g0, %0"
+ : "=r" (csum)
+ : "r" (addend), "0" (csum));
+
+ return csum;
+}
+
#endif /* !(__SPARC64_CHECKSUM_H) */