summaryrefslogtreecommitdiff
path: root/arch/arc/include/asm/atomic-spinlock.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-09-05 11:43:03 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2021-09-05 11:43:03 -0700
commite07af2626643293fa16df655979e7963250abc63 (patch)
tree1889972d796e84f5b00ccc0c00c3615517bd340a /arch/arc/include/asm/atomic-spinlock.h
parent063df71a574b88e94391a3a719cf66d1b46df884 (diff)
parent56809a28d45fcad94b28cfd614600568c0d46545 (diff)
Merge tag 'arc-5.15-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc
Pull ARC updates from Vineet Gupta: "Finally a big pile of changes for ARC (atomics/mm). These are from our internal arc64 tree, preparing mainline for eventual arc64 support. I'm spreading them out to avoid tsunami of patches in one release. - MM rework: - Implement up to 4 paging levels - Enable STRICT_MM_TYPECHECK - switch pgtable_t back to 'struct page *' - Atomics rework / implement relaxed accessors - Retire legacy MMUv1,v2; ARC750 cores - A few other build errors, typos" * tag 'arc-5.15-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc: (33 commits) ARC: mm: vmalloc sync from kernel to user table to update PMD ... ARC: mm: support 4 levels of page tables ARC: mm: support 3 levels of page tables ARC: mm: switch to asm-generic/pgalloc.h ARC: mm: switch pgtable_t back to struct page * ARC: mm: hack to allow 2 level build with 4 level code ARC: mm: disintegrate pgtable.h into levels and flags ARC: mm: disintegrate mmu.h (arcv2 bits out) ARC: mm: move MMU specific bits out of entry code ... ARC: mm: move MMU specific bits out of ASID allocator ARC: mm: non-functional code movement/cleanup ARC: mm: pmd_populate* to use the canonical set_pmd (and drop pmd_set) ARC: ioremap: use more commonly used PAGE_KERNEL based uncached flag ARC: mm: Enable STRICT_MM_TYPECHECKS ARC: mm: Fixes to allow STRICT_MM_TYPECHECKS ARC: mm: move mmu/cache externs out to setup.h ARC: mm: remove tlb paranoid code ARC: mm: use SCRATCH_DATA0 register for caching pgdir in ARCv2 only ARC: retire MMUv1 and MMUv2 support ARC: retire ARC750 support ...
Diffstat (limited to 'arch/arc/include/asm/atomic-spinlock.h')
-rw-r--r--arch/arc/include/asm/atomic-spinlock.h102
1 files changed, 102 insertions, 0 deletions
diff --git a/arch/arc/include/asm/atomic-spinlock.h b/arch/arc/include/asm/atomic-spinlock.h
new file mode 100644
index 000000000000..2c830347bfb4
--- /dev/null
+++ b/arch/arc/include/asm/atomic-spinlock.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _ASM_ARC_ATOMIC_SPLOCK_H
+#define _ASM_ARC_ATOMIC_SPLOCK_H
+
+/*
+ * Non hardware assisted Atomic-R-M-W
+ * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
+ */
+
+static inline void arch_atomic_set(atomic_t *v, int i)
+{
+ /*
+ * Independent of hardware support, all of the atomic_xxx() APIs need
+ * to follow the same locking rules to make sure that a "hardware"
+ * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn
+ * sequence
+ *
+ * Thus atomic_set() despite being 1 insn (and seemingly atomic)
+ * requires the locking.
+ */
+ unsigned long flags;
+
+ atomic_ops_lock(flags);
+ WRITE_ONCE(v->counter, i);
+ atomic_ops_unlock(flags);
+}
+
+#define arch_atomic_set_release(v, i) arch_atomic_set((v), (i))
+
+#define ATOMIC_OP(op, c_op, asm_op) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
+{ \
+ unsigned long flags; \
+ \
+ atomic_ops_lock(flags); \
+ v->counter c_op i; \
+ atomic_ops_unlock(flags); \
+}
+
+#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
+static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
+{ \
+ unsigned long flags; \
+ unsigned int temp; \
+ \
+ /* \
+ * spin lock/unlock provides the needed smp_mb() before/after \
+ */ \
+ atomic_ops_lock(flags); \
+ temp = v->counter; \
+ temp c_op i; \
+ v->counter = temp; \
+ atomic_ops_unlock(flags); \
+ \
+ return temp; \
+}
+
+#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
+{ \
+ unsigned long flags; \
+ unsigned int orig; \
+ \
+ /* \
+ * spin lock/unlock provides the needed smp_mb() before/after \
+ */ \
+ atomic_ops_lock(flags); \
+ orig = v->counter; \
+ v->counter c_op i; \
+ atomic_ops_unlock(flags); \
+ \
+ return orig; \
+}
+
+#define ATOMIC_OPS(op, c_op, asm_op) \
+ ATOMIC_OP(op, c_op, asm_op) \
+ ATOMIC_OP_RETURN(op, c_op, asm_op) \
+ ATOMIC_FETCH_OP(op, c_op, asm_op)
+
+ATOMIC_OPS(add, +=, add)
+ATOMIC_OPS(sub, -=, sub)
+
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(op, c_op, asm_op) \
+ ATOMIC_OP(op, c_op, asm_op) \
+ ATOMIC_FETCH_OP(op, c_op, asm_op)
+
+ATOMIC_OPS(and, &=, and)
+ATOMIC_OPS(andnot, &= ~, bic)
+ATOMIC_OPS(or, |=, or)
+ATOMIC_OPS(xor, ^=, xor)
+
+#define arch_atomic_andnot arch_atomic_andnot
+#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
+
+#undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
+
+#endif