summaryrefslogtreecommitdiff
path: root/arch/sh/include/asm/spinlock-cas.h
blob: 270ee4d3e25b8acc9fd2231d2b93d061d18a9fc7 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
/*
 * include/asm-sh/spinlock-cas.h
 *
 * Copyright (C) 2015 SEI
 *
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 */
#ifndef __ASM_SH_SPINLOCK_CAS_H
#define __ASM_SH_SPINLOCK_CAS_H

#include <asm/barrier.h>
#include <asm/processor.h>

static inline unsigned __sl_cas(volatile unsigned *p, unsigned old, unsigned new)
{
	__asm__ __volatile__("cas.l %1,%0,@r0"
		: "+r"(new)
		: "r"(old), "z"(p)
		: "t", "memory" );
	return new;
}

/*
 * Your basic SMP spinlocks, allowing only a single CPU anywhere
 */

#define arch_spin_is_locked(x)		((x)->lock <= 0)

static inline void arch_spin_lock(arch_spinlock_t *lock)
{
	while (!__sl_cas(&lock->lock, 1, 0));
}

static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
	__sl_cas(&lock->lock, 0, 1);
}

static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
	return __sl_cas(&lock->lock, 1, 0);
}

/*
 * Read-write spinlocks, allowing multiple readers but only one writer.
 *
 * NOTE! it is quite common to have readers in interrupts but no interrupt
 * writers. For those circumstances we can "mix" irq-safe locks - any writer
 * needs to get a irq-safe write-lock, but readers can get non-irqsafe
 * read-locks.
 */

static inline void arch_read_lock(arch_rwlock_t *rw)
{
	unsigned old;
	do old = rw->lock;
	while (!old || __sl_cas(&rw->lock, old, old-1) != old);
}

static inline void arch_read_unlock(arch_rwlock_t *rw)
{
	unsigned old;
	do old = rw->lock;
	while (__sl_cas(&rw->lock, old, old+1) != old);
}

static inline void arch_write_lock(arch_rwlock_t *rw)
{
	while (__sl_cas(&rw->lock, RW_LOCK_BIAS, 0) != RW_LOCK_BIAS);
}

static inline void arch_write_unlock(arch_rwlock_t *rw)
{
	__sl_cas(&rw->lock, 0, RW_LOCK_BIAS);
}

static inline int arch_read_trylock(arch_rwlock_t *rw)
{
	unsigned old;
	do old = rw->lock;
	while (old && __sl_cas(&rw->lock, old, old-1) != old);
	return !!old;
}

static inline int arch_write_trylock(arch_rwlock_t *rw)
{
	return __sl_cas(&rw->lock, RW_LOCK_BIAS, 0) == RW_LOCK_BIAS;
}

#endif /* __ASM_SH_SPINLOCK_CAS_H */