summaryrefslogtreecommitdiff
path: root/lib/atomic64.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/atomic64.c')
-rw-r--r--lib/atomic64.c220
1 files changed, 124 insertions, 96 deletions
diff --git a/lib/atomic64.c b/lib/atomic64.c
index 08a4f068e61e..1a72bba36d24 100644
--- a/lib/atomic64.c
+++ b/lib/atomic64.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Generic implementation of 64-bit atomics using spinlocks,
* useful on processors that don't have 64-bit atomic instructions.
*
* Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#include <linux/types.h>
#include <linux/cache.h>
@@ -29,15 +25,15 @@
* Ensure each lock is in a separate cacheline.
*/
static union {
- raw_spinlock_t lock;
+ arch_spinlock_t lock;
char pad[L1_CACHE_BYTES];
} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = {
[0 ... (NR_LOCKS - 1)] = {
- .lock = __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock),
+ .lock = __ARCH_SPIN_LOCK_UNLOCKED,
},
};
-static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
+static inline arch_spinlock_t *lock_addr(const atomic64_t *v)
{
unsigned long addr = (unsigned long) v;
@@ -46,134 +42,166 @@ static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
}
-long long atomic64_read(const atomic64_t *v)
+s64 generic_atomic64_read(const atomic64_t *v)
{
unsigned long flags;
- raw_spinlock_t *lock = lock_addr(v);
- long long val;
+ arch_spinlock_t *lock = lock_addr(v);
+ s64 val;
- raw_spin_lock_irqsave(lock, flags);
+ local_irq_save(flags);
+ arch_spin_lock(lock);
val = v->counter;
- raw_spin_unlock_irqrestore(lock, flags);
+ arch_spin_unlock(lock);
+ local_irq_restore(flags);
return val;
}
-EXPORT_SYMBOL(atomic64_read);
+EXPORT_SYMBOL(generic_atomic64_read);
-void atomic64_set(atomic64_t *v, long long i)
+void generic_atomic64_set(atomic64_t *v, s64 i)
{
unsigned long flags;
- raw_spinlock_t *lock = lock_addr(v);
+ arch_spinlock_t *lock = lock_addr(v);
- raw_spin_lock_irqsave(lock, flags);
+ local_irq_save(flags);
+ arch_spin_lock(lock);
v->counter = i;
- raw_spin_unlock_irqrestore(lock, flags);
-}
-EXPORT_SYMBOL(atomic64_set);
-
-void atomic64_add(long long a, atomic64_t *v)
-{
- unsigned long flags;
- raw_spinlock_t *lock = lock_addr(v);
-
- raw_spin_lock_irqsave(lock, flags);
- v->counter += a;
- raw_spin_unlock_irqrestore(lock, flags);
-}
-EXPORT_SYMBOL(atomic64_add);
-
-long long atomic64_add_return(long long a, atomic64_t *v)
-{
- unsigned long flags;
- raw_spinlock_t *lock = lock_addr(v);
- long long val;
-
- raw_spin_lock_irqsave(lock, flags);
- val = v->counter += a;
- raw_spin_unlock_irqrestore(lock, flags);
- return val;
+ arch_spin_unlock(lock);
+ local_irq_restore(flags);
}
-EXPORT_SYMBOL(atomic64_add_return);
-
-void atomic64_sub(long long a, atomic64_t *v)
+EXPORT_SYMBOL(generic_atomic64_set);
+
+#define ATOMIC64_OP(op, c_op) \
+void generic_atomic64_##op(s64 a, atomic64_t *v) \
+{ \
+ unsigned long flags; \
+ arch_spinlock_t *lock = lock_addr(v); \
+ \
+ local_irq_save(flags); \
+ arch_spin_lock(lock); \
+ v->counter c_op a; \
+ arch_spin_unlock(lock); \
+ local_irq_restore(flags); \
+} \
+EXPORT_SYMBOL(generic_atomic64_##op);
+
+#define ATOMIC64_OP_RETURN(op, c_op) \
+s64 generic_atomic64_##op##_return(s64 a, atomic64_t *v) \
+{ \
+ unsigned long flags; \
+ arch_spinlock_t *lock = lock_addr(v); \
+ s64 val; \
+ \
+ local_irq_save(flags); \
+ arch_spin_lock(lock); \
+ val = (v->counter c_op a); \
+ arch_spin_unlock(lock); \
+ local_irq_restore(flags); \
+ return val; \
+} \
+EXPORT_SYMBOL(generic_atomic64_##op##_return);
+
+#define ATOMIC64_FETCH_OP(op, c_op) \
+s64 generic_atomic64_fetch_##op(s64 a, atomic64_t *v) \
+{ \
+ unsigned long flags; \
+ arch_spinlock_t *lock = lock_addr(v); \
+ s64 val; \
+ \
+ local_irq_save(flags); \
+ arch_spin_lock(lock); \
+ val = v->counter; \
+ v->counter c_op a; \
+ arch_spin_unlock(lock); \
+ local_irq_restore(flags); \
+ return val; \
+} \
+EXPORT_SYMBOL(generic_atomic64_fetch_##op);
+
+#define ATOMIC64_OPS(op, c_op) \
+ ATOMIC64_OP(op, c_op) \
+ ATOMIC64_OP_RETURN(op, c_op) \
+ ATOMIC64_FETCH_OP(op, c_op)
+
+ATOMIC64_OPS(add, +=)
+ATOMIC64_OPS(sub, -=)
+
+#undef ATOMIC64_OPS
+#define ATOMIC64_OPS(op, c_op) \
+ ATOMIC64_OP(op, c_op) \
+ ATOMIC64_FETCH_OP(op, c_op)
+
+ATOMIC64_OPS(and, &=)
+ATOMIC64_OPS(or, |=)
+ATOMIC64_OPS(xor, ^=)
+
+#undef ATOMIC64_OPS
+#undef ATOMIC64_FETCH_OP
+#undef ATOMIC64_OP
+
+s64 generic_atomic64_dec_if_positive(atomic64_t *v)
{
unsigned long flags;
- raw_spinlock_t *lock = lock_addr(v);
+ arch_spinlock_t *lock = lock_addr(v);
+ s64 val;
- raw_spin_lock_irqsave(lock, flags);
- v->counter -= a;
- raw_spin_unlock_irqrestore(lock, flags);
-}
-EXPORT_SYMBOL(atomic64_sub);
-
-long long atomic64_sub_return(long long a, atomic64_t *v)
-{
- unsigned long flags;
- raw_spinlock_t *lock = lock_addr(v);
- long long val;
-
- raw_spin_lock_irqsave(lock, flags);
- val = v->counter -= a;
- raw_spin_unlock_irqrestore(lock, flags);
- return val;
-}
-EXPORT_SYMBOL(atomic64_sub_return);
-
-long long atomic64_dec_if_positive(atomic64_t *v)
-{
- unsigned long flags;
- raw_spinlock_t *lock = lock_addr(v);
- long long val;
-
- raw_spin_lock_irqsave(lock, flags);
+ local_irq_save(flags);
+ arch_spin_lock(lock);
val = v->counter - 1;
if (val >= 0)
v->counter = val;
- raw_spin_unlock_irqrestore(lock, flags);
+ arch_spin_unlock(lock);
+ local_irq_restore(flags);
return val;
}
-EXPORT_SYMBOL(atomic64_dec_if_positive);
+EXPORT_SYMBOL(generic_atomic64_dec_if_positive);
-long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
+s64 generic_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
{
unsigned long flags;
- raw_spinlock_t *lock = lock_addr(v);
- long long val;
+ arch_spinlock_t *lock = lock_addr(v);
+ s64 val;
- raw_spin_lock_irqsave(lock, flags);
+ local_irq_save(flags);
+ arch_spin_lock(lock);
val = v->counter;
if (val == o)
v->counter = n;
- raw_spin_unlock_irqrestore(lock, flags);
+ arch_spin_unlock(lock);
+ local_irq_restore(flags);
return val;
}
-EXPORT_SYMBOL(atomic64_cmpxchg);
+EXPORT_SYMBOL(generic_atomic64_cmpxchg);
-long long atomic64_xchg(atomic64_t *v, long long new)
+s64 generic_atomic64_xchg(atomic64_t *v, s64 new)
{
unsigned long flags;
- raw_spinlock_t *lock = lock_addr(v);
- long long val;
+ arch_spinlock_t *lock = lock_addr(v);
+ s64 val;
- raw_spin_lock_irqsave(lock, flags);
+ local_irq_save(flags);
+ arch_spin_lock(lock);
val = v->counter;
v->counter = new;
- raw_spin_unlock_irqrestore(lock, flags);
+ arch_spin_unlock(lock);
+ local_irq_restore(flags);
return val;
}
-EXPORT_SYMBOL(atomic64_xchg);
+EXPORT_SYMBOL(generic_atomic64_xchg);
-int atomic64_add_unless(atomic64_t *v, long long a, long long u)
+s64 generic_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
unsigned long flags;
- raw_spinlock_t *lock = lock_addr(v);
- int ret = 0;
+ arch_spinlock_t *lock = lock_addr(v);
+ s64 val;
- raw_spin_lock_irqsave(lock, flags);
- if (v->counter != u) {
+ local_irq_save(flags);
+ arch_spin_lock(lock);
+ val = v->counter;
+ if (val != u)
v->counter += a;
- ret = 1;
- }
- raw_spin_unlock_irqrestore(lock, flags);
- return ret;
+ arch_spin_unlock(lock);
+ local_irq_restore(flags);
+
+ return val;
}
-EXPORT_SYMBOL(atomic64_add_unless);
+EXPORT_SYMBOL(generic_atomic64_fetch_add_unless);