summaryrefslogtreecommitdiff
path: root/drivers/tty/tty_ldsem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/tty/tty_ldsem.c')
-rw-r--r--drivers/tty/tty_ldsem.c188
1 files changed, 74 insertions, 114 deletions
diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c
index 22fad8ad5ac2..4e18031a5ca3 100644
--- a/drivers/tty/tty_ldsem.c
+++ b/drivers/tty/tty_ldsem.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Ldisc rw semaphore
*
@@ -22,9 +23,6 @@
* Michel Lespinasse <walken@google.com>.
*
* Copyright (C) 2013 Peter Hurley <peter@hurleysoftware.com>
- *
- * This file may be redistributed under the terms of the GNU General Public
- * License v2.
*/
#include <linux/list.h>
@@ -32,36 +30,8 @@
#include <linux/atomic.h>
#include <linux/tty.h>
#include <linux/sched.h>
-
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define __acq(l, s, t, r, c, n, i) \
- lock_acquire(&(l)->dep_map, s, t, r, c, n, i)
-# define __rel(l, n, i) \
- lock_release(&(l)->dep_map, n, i)
-# ifdef CONFIG_PROVE_LOCKING
-# define lockdep_acquire(l, s, t, i) __acq(l, s, t, 0, 2, NULL, i)
-# define lockdep_acquire_nest(l, s, t, n, i) __acq(l, s, t, 0, 2, n, i)
-# define lockdep_acquire_read(l, s, t, i) __acq(l, s, t, 1, 2, NULL, i)
-# define lockdep_release(l, n, i) __rel(l, n, i)
-# else
-# define lockdep_acquire(l, s, t, i) __acq(l, s, t, 0, 1, NULL, i)
-# define lockdep_acquire_nest(l, s, t, n, i) __acq(l, s, t, 0, 1, n, i)
-# define lockdep_acquire_read(l, s, t, i) __acq(l, s, t, 1, 1, NULL, i)
-# define lockdep_release(l, n, i) __rel(l, n, i)
-# endif
-#else
-# define lockdep_acquire(l, s, t, i) do { } while (0)
-# define lockdep_acquire_nest(l, s, t, n, i) do { } while (0)
-# define lockdep_acquire_read(l, s, t, i) do { } while (0)
-# define lockdep_release(l, n, i) do { } while (0)
-#endif
-
-#ifdef CONFIG_LOCK_STAT
-# define lock_stat(_lock, stat) lock_##stat(&(_lock)->dep_map, _RET_IP_)
-#else
-# define lock_stat(_lock, stat) do { } while (0)
-#endif
+#include <linux/sched/debug.h>
+#include <linux/sched/task.h>
#if BITS_PER_LONG == 64
@@ -81,18 +51,6 @@ struct ldsem_waiter {
struct task_struct *task;
};
-static inline long ldsem_atomic_update(long delta, struct ld_semaphore *sem)
-{
- return atomic_long_add_return(delta, (atomic_long_t *)&sem->count);
-}
-
-static inline int ldsem_cmpxchg(long *old, long new, struct ld_semaphore *sem)
-{
- long tmp = *old;
- *old = atomic_long_cmpxchg(&sem->count, *old, new);
- return *old == tmp;
-}
-
/*
* Initialize an ldsem:
*/
@@ -106,7 +64,7 @@ void __init_ldsem(struct ld_semaphore *sem, const char *name,
debug_check_no_locks_freed((void *)sem, sizeof(*sem));
lockdep_init_map(&sem->dep_map, name, key, 0);
#endif
- sem->count = LDSEM_UNLOCKED;
+ atomic_long_set(&sem->count, LDSEM_UNLOCKED);
sem->wait_readers = 0;
raw_spin_lock_init(&sem->wait_lock);
INIT_LIST_HEAD(&sem->read_wait);
@@ -119,23 +77,23 @@ static void __ldsem_wake_readers(struct ld_semaphore *sem)
struct task_struct *tsk;
long adjust, count;
- /* Try to grant read locks to all readers on the read wait list.
+ /*
+ * Try to grant read locks to all readers on the read wait list.
* Note the 'active part' of the count is incremented by
* the number of readers before waking any processes up.
*/
adjust = sem->wait_readers * (LDSEM_ACTIVE_BIAS - LDSEM_WAIT_BIAS);
- count = ldsem_atomic_update(adjust, sem);
+ count = atomic_long_add_return(adjust, &sem->count);
do {
if (count > 0)
break;
- if (ldsem_cmpxchg(&count, count - adjust, sem))
+ if (atomic_long_try_cmpxchg(&sem->count, &count, count - adjust))
return;
} while (1);
list_for_each_entry_safe(waiter, next, &sem->read_wait, list) {
tsk = waiter->task;
- smp_mb();
- waiter->task = NULL;
+ smp_store_release(&waiter->task, NULL);
wake_up_process(tsk);
put_task_struct(tsk);
}
@@ -145,14 +103,15 @@ static void __ldsem_wake_readers(struct ld_semaphore *sem)
static inline int writer_trylock(struct ld_semaphore *sem)
{
- /* only wake this writer if the active part of the count can be
+ /*
+ * Only wake this writer if the active part of the count can be
* transitioned from 0 -> 1
*/
- long count = ldsem_atomic_update(LDSEM_ACTIVE_BIAS, sem);
+ long count = atomic_long_add_return(LDSEM_ACTIVE_BIAS, &sem->count);
do {
if ((count & LDSEM_ACTIVE_MASK) == LDSEM_ACTIVE_BIAS)
return 1;
- if (ldsem_cmpxchg(&count, count - LDSEM_ACTIVE_BIAS, sem))
+ if (atomic_long_try_cmpxchg(&sem->count, &count, count - LDSEM_ACTIVE_BIAS))
return 0;
} while (1);
}
@@ -197,18 +156,21 @@ static struct ld_semaphore __sched *
down_read_failed(struct ld_semaphore *sem, long count, long timeout)
{
struct ldsem_waiter waiter;
- struct task_struct *tsk = current;
long adjust = -LDSEM_ACTIVE_BIAS + LDSEM_WAIT_BIAS;
/* set up my own style of waitqueue */
raw_spin_lock_irq(&sem->wait_lock);
- /* Try to reverse the lock attempt but if the count has changed
- * so that reversing fails, check if there are are no waiters,
- * and early-out if not */
+ /*
+ * Try to reverse the lock attempt but if the count has changed
+ * so that reversing fails, check if there are no waiters,
+ * and early-out if not
+ */
do {
- if (ldsem_cmpxchg(&count, count + adjust, sem))
+ if (atomic_long_try_cmpxchg(&sem->count, &count, count + adjust)) {
+ count += adjust;
break;
+ }
if (count > 0) {
raw_spin_unlock_irq(&sem->wait_lock);
return sem;
@@ -218,8 +180,8 @@ down_read_failed(struct ld_semaphore *sem, long count, long timeout)
list_add_tail(&waiter.list, &sem->read_wait);
sem->wait_readers++;
- waiter.task = tsk;
- get_task_struct(tsk);
+ waiter.task = current;
+ get_task_struct(current);
/* if there are no active locks, wake the new lock owner(s) */
if ((count & LDSEM_ACTIVE_MASK) == 0)
@@ -229,24 +191,27 @@ down_read_failed(struct ld_semaphore *sem, long count, long timeout)
/* wait to be given the lock */
for (;;) {
- set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+ set_current_state(TASK_UNINTERRUPTIBLE);
- if (!waiter.task)
+ if (!smp_load_acquire(&waiter.task))
break;
if (!timeout)
break;
timeout = schedule_timeout(timeout);
}
- __set_task_state(tsk, TASK_RUNNING);
+ __set_current_state(TASK_RUNNING);
if (!timeout) {
- /* lock timed out but check if this task was just
+ /*
+ * Lock timed out but check if this task was just
* granted lock ownership - if so, pretend there
- * was no timeout; otherwise, cleanup lock wait */
+ * was no timeout; otherwise, cleanup lock wait.
+ */
raw_spin_lock_irq(&sem->wait_lock);
if (waiter.task) {
- ldsem_atomic_update(-LDSEM_WAIT_BIAS, sem);
+ atomic_long_add_return(-LDSEM_WAIT_BIAS, &sem->count);
+ sem->wait_readers--;
list_del(&waiter.list);
raw_spin_unlock_irq(&sem->wait_lock);
put_task_struct(waiter.task);
@@ -265,18 +230,19 @@ static struct ld_semaphore __sched *
down_write_failed(struct ld_semaphore *sem, long count, long timeout)
{
struct ldsem_waiter waiter;
- struct task_struct *tsk = current;
long adjust = -LDSEM_ACTIVE_BIAS;
int locked = 0;
/* set up my own style of waitqueue */
raw_spin_lock_irq(&sem->wait_lock);
- /* Try to reverse the lock attempt but if the count has changed
+ /*
+ * Try to reverse the lock attempt but if the count has changed
* so that reversing fails, check if the lock is now owned,
- * and early-out if so */
+ * and early-out if so.
+ */
do {
- if (ldsem_cmpxchg(&count, count + adjust, sem))
+ if (atomic_long_try_cmpxchg(&sem->count, &count, count + adjust))
break;
if ((count & LDSEM_ACTIVE_MASK) == LDSEM_ACTIVE_BIAS) {
raw_spin_unlock_irq(&sem->wait_lock);
@@ -286,26 +252,37 @@ down_write_failed(struct ld_semaphore *sem, long count, long timeout)
list_add_tail(&waiter.list, &sem->write_wait);
- waiter.task = tsk;
+ waiter.task = current;
- set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+ set_current_state(TASK_UNINTERRUPTIBLE);
for (;;) {
if (!timeout)
break;
raw_spin_unlock_irq(&sem->wait_lock);
timeout = schedule_timeout(timeout);
raw_spin_lock_irq(&sem->wait_lock);
- set_task_state(tsk, TASK_UNINTERRUPTIBLE);
- if ((locked = writer_trylock(sem)))
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ locked = writer_trylock(sem);
+ if (locked)
break;
}
if (!locked)
- ldsem_atomic_update(-LDSEM_WAIT_BIAS, sem);
+ atomic_long_add_return(-LDSEM_WAIT_BIAS, &sem->count);
list_del(&waiter.list);
+
+ /*
+ * In case of timeout, wake up every reader who gave the right of way
+ * to writer. Prevent separation readers into two groups:
+ * one that helds semaphore and another that sleeps.
+ * (in case of no contention with a writer)
+ */
+ if (!locked && list_empty(&sem->write_wait))
+ __ldsem_wake_readers(sem);
+
raw_spin_unlock_irq(&sem->wait_lock);
- __set_task_state(tsk, TASK_RUNNING);
+ __set_current_state(TASK_RUNNING);
/* lock wait may have timed out */
if (!locked)
@@ -315,41 +292,41 @@ down_write_failed(struct ld_semaphore *sem, long count, long timeout)
-static inline int __ldsem_down_read_nested(struct ld_semaphore *sem,
+static int __ldsem_down_read_nested(struct ld_semaphore *sem,
int subclass, long timeout)
{
long count;
- lockdep_acquire_read(sem, subclass, 0, _RET_IP_);
+ rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
- count = ldsem_atomic_update(LDSEM_READ_BIAS, sem);
+ count = atomic_long_add_return(LDSEM_READ_BIAS, &sem->count);
if (count <= 0) {
- lock_stat(sem, contended);
+ lock_contended(&sem->dep_map, _RET_IP_);
if (!down_read_failed(sem, count, timeout)) {
- lockdep_release(sem, 1, _RET_IP_);
+ rwsem_release(&sem->dep_map, _RET_IP_);
return 0;
}
}
- lock_stat(sem, acquired);
+ lock_acquired(&sem->dep_map, _RET_IP_);
return 1;
}
-static inline int __ldsem_down_write_nested(struct ld_semaphore *sem,
+static int __ldsem_down_write_nested(struct ld_semaphore *sem,
int subclass, long timeout)
{
long count;
- lockdep_acquire(sem, subclass, 0, _RET_IP_);
+ rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
- count = ldsem_atomic_update(LDSEM_WRITE_BIAS, sem);
+ count = atomic_long_add_return(LDSEM_WRITE_BIAS, &sem->count);
if ((count & LDSEM_ACTIVE_MASK) != LDSEM_ACTIVE_BIAS) {
- lock_stat(sem, contended);
+ lock_contended(&sem->dep_map, _RET_IP_);
if (!down_write_failed(sem, count, timeout)) {
- lockdep_release(sem, 1, _RET_IP_);
+ rwsem_release(&sem->dep_map, _RET_IP_);
return 0;
}
}
- lock_stat(sem, acquired);
+ lock_acquired(&sem->dep_map, _RET_IP_);
return 1;
}
@@ -368,12 +345,12 @@ int __sched ldsem_down_read(struct ld_semaphore *sem, long timeout)
*/
int ldsem_down_read_trylock(struct ld_semaphore *sem)
{
- long count = sem->count;
+ long count = atomic_long_read(&sem->count);
while (count >= 0) {
- if (ldsem_cmpxchg(&count, count + LDSEM_READ_BIAS, sem)) {
- lockdep_acquire_read(sem, 0, 1, _RET_IP_);
- lock_stat(sem, acquired);
+ if (atomic_long_try_cmpxchg(&sem->count, &count, count + LDSEM_READ_BIAS)) {
+ rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_);
+ lock_acquired(&sem->dep_map, _RET_IP_);
return 1;
}
}
@@ -390,32 +367,15 @@ int __sched ldsem_down_write(struct ld_semaphore *sem, long timeout)
}
/*
- * trylock for writing -- returns 1 if successful, 0 if contention
- */
-int ldsem_down_write_trylock(struct ld_semaphore *sem)
-{
- long count = sem->count;
-
- while ((count & LDSEM_ACTIVE_MASK) == 0) {
- if (ldsem_cmpxchg(&count, count + LDSEM_WRITE_BIAS, sem)) {
- lockdep_acquire(sem, 0, 1, _RET_IP_);
- lock_stat(sem, acquired);
- return 1;
- }
- }
- return 0;
-}
-
-/*
* release a read lock
*/
void ldsem_up_read(struct ld_semaphore *sem)
{
long count;
- lockdep_release(sem, 1, _RET_IP_);
+ rwsem_release(&sem->dep_map, _RET_IP_);
- count = ldsem_atomic_update(-LDSEM_READ_BIAS, sem);
+ count = atomic_long_add_return(-LDSEM_READ_BIAS, &sem->count);
if (count < 0 && (count & LDSEM_ACTIVE_MASK) == 0)
ldsem_wake(sem);
}
@@ -427,9 +387,9 @@ void ldsem_up_write(struct ld_semaphore *sem)
{
long count;
- lockdep_release(sem, 1, _RET_IP_);
+ rwsem_release(&sem->dep_map, _RET_IP_);
- count = ldsem_atomic_update(-LDSEM_WRITE_BIAS, sem);
+ count = atomic_long_add_return(-LDSEM_WRITE_BIAS, &sem->count);
if (count < 0)
ldsem_wake(sem);
}