summaryrefslogtreecommitdiff
path: root/include/linux/local_lock_internal.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/local_lock_internal.h')
-rw-r--r--include/linux/local_lock_internal.h174
1 files changed, 164 insertions, 10 deletions
diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h
index 975e33b793a7..8f82b4eb542f 100644
--- a/include/linux/local_lock_internal.h
+++ b/include/linux/local_lock_internal.h
@@ -15,6 +15,15 @@ typedef struct {
#endif
} local_lock_t;
+/* local_trylock() and local_trylock_irqsave() only work with local_trylock_t */
+typedef struct {
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+ struct task_struct *owner;
+#endif
+ u8 acquired;
+} local_trylock_t;
+
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define LOCAL_LOCK_DEBUG_INIT(lockname) \
.dep_map = { \
@@ -24,6 +33,9 @@ typedef struct {
}, \
.owner = NULL,
+# define LOCAL_TRYLOCK_DEBUG_INIT(lockname) \
+ LOCAL_LOCK_DEBUG_INIT(lockname)
+
static inline void local_lock_acquire(local_lock_t *l)
{
lock_map_acquire(&l->dep_map);
@@ -31,6 +43,13 @@ static inline void local_lock_acquire(local_lock_t *l)
l->owner = current;
}
+static inline void local_trylock_acquire(local_lock_t *l)
+{
+ lock_map_acquire_try(&l->dep_map);
+ DEBUG_LOCKS_WARN_ON(l->owner);
+ l->owner = current;
+}
+
static inline void local_lock_release(local_lock_t *l)
{
DEBUG_LOCKS_WARN_ON(l->owner != current);
@@ -44,12 +63,15 @@ static inline void local_lock_debug_init(local_lock_t *l)
}
#else /* CONFIG_DEBUG_LOCK_ALLOC */
# define LOCAL_LOCK_DEBUG_INIT(lockname)
+# define LOCAL_TRYLOCK_DEBUG_INIT(lockname)
static inline void local_lock_acquire(local_lock_t *l) { }
+static inline void local_trylock_acquire(local_lock_t *l) { }
static inline void local_lock_release(local_lock_t *l) { }
static inline void local_lock_debug_init(local_lock_t *l) { }
#endif /* !CONFIG_DEBUG_LOCK_ALLOC */
#define INIT_LOCAL_LOCK(lockname) { LOCAL_LOCK_DEBUG_INIT(lockname) }
+#define INIT_LOCAL_TRYLOCK(lockname) { LOCAL_TRYLOCK_DEBUG_INIT(lockname) }
#define __local_lock_init(lock) \
do { \
@@ -62,42 +84,133 @@ do { \
local_lock_debug_init(lock); \
} while (0)
+#define __local_trylock_init(lock) __local_lock_init((local_lock_t *)lock)
+
+#define __spinlock_nested_bh_init(lock) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ debug_check_no_locks_freed((void *)lock, sizeof(*lock));\
+ lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, \
+ 0, LD_WAIT_CONFIG, LD_WAIT_INV, \
+ LD_LOCK_NORMAL); \
+ local_lock_debug_init(lock); \
+} while (0)
+
+#define __local_lock_acquire(lock) \
+ do { \
+ local_trylock_t *__tl; \
+ local_lock_t *__l; \
+ \
+ __l = (local_lock_t *)(lock); \
+ __tl = (local_trylock_t *)__l; \
+ _Generic((lock), \
+ local_trylock_t *: ({ \
+ lockdep_assert(__tl->acquired == 0); \
+ WRITE_ONCE(__tl->acquired, 1); \
+ }), \
+ local_lock_t *: (void)0); \
+ local_lock_acquire(__l); \
+ } while (0)
+
#define __local_lock(lock) \
do { \
preempt_disable(); \
- local_lock_acquire(this_cpu_ptr(lock)); \
+ __local_lock_acquire(lock); \
} while (0)
#define __local_lock_irq(lock) \
do { \
local_irq_disable(); \
- local_lock_acquire(this_cpu_ptr(lock)); \
+ __local_lock_acquire(lock); \
} while (0)
#define __local_lock_irqsave(lock, flags) \
do { \
local_irq_save(flags); \
- local_lock_acquire(this_cpu_ptr(lock)); \
+ __local_lock_acquire(lock); \
+ } while (0)
+
+#define __local_trylock(lock) \
+ ({ \
+ local_trylock_t *__tl; \
+ \
+ preempt_disable(); \
+ __tl = (lock); \
+ if (READ_ONCE(__tl->acquired)) { \
+ preempt_enable(); \
+ __tl = NULL; \
+ } else { \
+ WRITE_ONCE(__tl->acquired, 1); \
+ local_trylock_acquire( \
+ (local_lock_t *)__tl); \
+ } \
+ !!__tl; \
+ })
+
+#define __local_trylock_irqsave(lock, flags) \
+ ({ \
+ local_trylock_t *__tl; \
+ \
+ local_irq_save(flags); \
+ __tl = (lock); \
+ if (READ_ONCE(__tl->acquired)) { \
+ local_irq_restore(flags); \
+ __tl = NULL; \
+ } else { \
+ WRITE_ONCE(__tl->acquired, 1); \
+ local_trylock_acquire( \
+ (local_lock_t *)__tl); \
+ } \
+ !!__tl; \
+ })
+
+/* preemption or migration must be disabled before calling __local_lock_is_locked */
+#define __local_lock_is_locked(lock) READ_ONCE(this_cpu_ptr(lock)->acquired)
+
+#define __local_lock_release(lock) \
+ do { \
+ local_trylock_t *__tl; \
+ local_lock_t *__l; \
+ \
+ __l = (local_lock_t *)(lock); \
+ __tl = (local_trylock_t *)__l; \
+ local_lock_release(__l); \
+ _Generic((lock), \
+ local_trylock_t *: ({ \
+ lockdep_assert(__tl->acquired == 1); \
+ WRITE_ONCE(__tl->acquired, 0); \
+ }), \
+ local_lock_t *: (void)0); \
} while (0)
#define __local_unlock(lock) \
do { \
- local_lock_release(this_cpu_ptr(lock)); \
+ __local_lock_release(lock); \
preempt_enable(); \
} while (0)
#define __local_unlock_irq(lock) \
do { \
- local_lock_release(this_cpu_ptr(lock)); \
+ __local_lock_release(lock); \
local_irq_enable(); \
} while (0)
#define __local_unlock_irqrestore(lock, flags) \
do { \
- local_lock_release(this_cpu_ptr(lock)); \
+ __local_lock_release(lock); \
local_irq_restore(flags); \
} while (0)
+#define __local_lock_nested_bh(lock) \
+ do { \
+ lockdep_assert_in_softirq(); \
+ local_lock_acquire((lock)); \
+ } while (0)
+
+#define __local_unlock_nested_bh(lock) \
+ local_lock_release((lock))
+
#else /* !CONFIG_PREEMPT_RT */
/*
@@ -105,18 +218,22 @@ do { \
* critical section while staying preemptible.
*/
typedef spinlock_t local_lock_t;
+typedef spinlock_t local_trylock_t;
#define INIT_LOCAL_LOCK(lockname) __LOCAL_SPIN_LOCK_UNLOCKED((lockname))
+#define INIT_LOCAL_TRYLOCK(lockname) __LOCAL_SPIN_LOCK_UNLOCKED((lockname))
-#define __local_lock_init(l) \
+#define __local_lock_init(__l) \
do { \
- local_spin_lock_init((l)); \
+ local_spin_lock_init((__l)); \
} while (0)
+#define __local_trylock_init(__l) __local_lock_init(__l)
+
#define __local_lock(__lock) \
do { \
migrate_disable(); \
- spin_lock(this_cpu_ptr((__lock))); \
+ spin_lock((__lock)); \
} while (0)
#define __local_lock_irq(lock) __local_lock(lock)
@@ -130,7 +247,7 @@ typedef spinlock_t local_lock_t;
#define __local_unlock(__lock) \
do { \
- spin_unlock(this_cpu_ptr((__lock))); \
+ spin_unlock((__lock)); \
migrate_enable(); \
} while (0)
@@ -138,4 +255,41 @@ typedef spinlock_t local_lock_t;
#define __local_unlock_irqrestore(lock, flags) __local_unlock(lock)
+#define __local_lock_nested_bh(lock) \
+do { \
+ lockdep_assert_in_softirq_func(); \
+ spin_lock((lock)); \
+} while (0)
+
+#define __local_unlock_nested_bh(lock) \
+do { \
+ spin_unlock((lock)); \
+} while (0)
+
+#define __local_trylock(lock) \
+ ({ \
+ int __locked; \
+ \
+ if (in_nmi() | in_hardirq()) { \
+ __locked = 0; \
+ } else { \
+ migrate_disable(); \
+ __locked = spin_trylock((lock)); \
+ if (!__locked) \
+ migrate_enable(); \
+ } \
+ __locked; \
+ })
+
+#define __local_trylock_irqsave(lock, flags) \
+ ({ \
+ typecheck(unsigned long, flags); \
+ flags = 0; \
+ __local_trylock(lock); \
+ })
+
+/* migration must be disabled before calling __local_lock_is_locked */
+#define __local_lock_is_locked(__lock) \
+ (rt_mutex_owner(&this_cpu_ptr(__lock)->lock) == current)
+
#endif /* CONFIG_PREEMPT_RT */