summaryrefslogtreecommitdiff
path: root/arch/mips/kernel/cmpxchg.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kernel/cmpxchg.c')
-rw-r--r--arch/mips/kernel/cmpxchg.c16
1 files changed, 6 insertions, 10 deletions
diff --git a/arch/mips/kernel/cmpxchg.c b/arch/mips/kernel/cmpxchg.c
index 0b9535bc2c53..c371def2302d 100644
--- a/arch/mips/kernel/cmpxchg.c
+++ b/arch/mips/kernel/cmpxchg.c
@@ -1,11 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2017 Imagination Technologies
* Author: Paul Burton <paul.burton@mips.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/bitops.h>
@@ -26,7 +22,7 @@ unsigned long __xchg_small(volatile void *ptr, unsigned long val, unsigned int s
/*
* Calculate a shift & mask that correspond to the value we wish to
- * exchange within the naturally aligned 4 byte integerthat includes
+ * exchange within the naturally aligned 4 byte integer that includes
* it.
*/
shift = (unsigned long)ptr & 0x3;
@@ -45,7 +41,7 @@ unsigned long __xchg_small(volatile void *ptr, unsigned long val, unsigned int s
do {
old32 = load32;
new32 = (load32 & ~mask) | (val << shift);
- load32 = cmpxchg(ptr32, old32, new32);
+ load32 = arch_cmpxchg(ptr32, old32, new32);
} while (load32 != old32);
return (load32 & mask) >> shift;
@@ -54,10 +50,9 @@ unsigned long __xchg_small(volatile void *ptr, unsigned long val, unsigned int s
unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old,
unsigned long new, unsigned int size)
{
- u32 mask, old32, new32, load32;
+ u32 mask, old32, new32, load32, load;
volatile u32 *ptr32;
unsigned int shift;
- u8 load;
/* Check that ptr is naturally aligned */
WARN_ON((unsigned long)ptr & (size - 1));
@@ -102,8 +97,9 @@ unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old,
*/
old32 = (load32 & ~mask) | (old << shift);
new32 = (load32 & ~mask) | (new << shift);
- load32 = cmpxchg(ptr32, old32, new32);
+ load32 = arch_cmpxchg(ptr32, old32, new32);
if (load32 == old32)
return old;
}
}
+EXPORT_SYMBOL(__cmpxchg_small);