diff options
Diffstat (limited to 'Documentation/locking')
-rw-r--r-- | Documentation/locking/hwspinlock.rst | 68 | ||||
-rw-r--r-- | Documentation/locking/percpu-rw-semaphore.rst | 4 | ||||
-rw-r--r-- | Documentation/locking/seqlock.rst | 2 |
3 files changed, 15 insertions, 59 deletions
diff --git a/Documentation/locking/hwspinlock.rst b/Documentation/locking/hwspinlock.rst index 6f03713b7003..a737c702a7d1 100644 --- a/Documentation/locking/hwspinlock.rst +++ b/Documentation/locking/hwspinlock.rst @@ -40,17 +40,6 @@ User API :: - struct hwspinlock *hwspin_lock_request(void); - -Dynamically assign an hwspinlock and return its address, or NULL -in case an unused hwspinlock isn't available. Users of this -API will usually want to communicate the lock's id to the remote core -before it can be used to achieve synchronization. - -Should be called from a process context (might sleep). - -:: - struct hwspinlock *hwspin_lock_request_specific(unsigned int id); Assign a specific hwspinlock id and return its address, or NULL @@ -87,6 +76,17 @@ Should be called from a process context (might sleep). :: + int hwspin_lock_bust(struct hwspinlock *hwlock, unsigned int id); + +After verifying the owner of the hwspinlock, release a previously acquired +hwspinlock; returns 0 on success, or an appropriate error code on failure +(e.g. -EOPNOTSUPP if the bust operation is not defined for the specific +hwspinlock). + +Should be called from a process context (might sleep). + +:: + int hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int timeout); Lock a previously-assigned hwspinlock with a timeout limit (specified in @@ -301,17 +301,6 @@ The caller should **never** unlock an hwspinlock which is already unlocked. Doing so is considered a bug (there is no protection against this). This function will never sleep. -:: - - int hwspin_lock_get_id(struct hwspinlock *hwlock); - -Retrieve id number of a given hwspinlock. This is needed when an -hwspinlock is dynamically assigned: before it can be used to achieve -mutual exclusion with a remote cpu, the id number should be communicated -to the remote task with which we want to synchronize. - -Returns the hwspinlock id number, or -EINVAL if hwlock is null. - Typical usage ============= @@ -320,40 +309,7 @@ Typical usage #include <linux/hwspinlock.h> #include <linux/err.h> - int hwspinlock_example1(void) - { - struct hwspinlock *hwlock; - int ret; - - /* dynamically assign a hwspinlock */ - hwlock = hwspin_lock_request(); - if (!hwlock) - ... - - id = hwspin_lock_get_id(hwlock); - /* probably need to communicate id to a remote processor now */ - - /* take the lock, spin for 1 sec if it's already taken */ - ret = hwspin_lock_timeout(hwlock, 1000); - if (ret) - ... - - /* - * we took the lock, do our thing now, but do NOT sleep - */ - - /* release the lock */ - hwspin_unlock(hwlock); - - /* free the lock */ - ret = hwspin_lock_free(hwlock); - if (ret) - ... - - return ret; - } - - int hwspinlock_example2(void) + int hwspinlock_example(void) { struct hwspinlock *hwlock; int ret; diff --git a/Documentation/locking/percpu-rw-semaphore.rst b/Documentation/locking/percpu-rw-semaphore.rst index 247de6410855..a105bf2dd812 100644 --- a/Documentation/locking/percpu-rw-semaphore.rst +++ b/Documentation/locking/percpu-rw-semaphore.rst @@ -16,8 +16,8 @@ writing is very expensive, it calls synchronize_rcu() that can take hundreds of milliseconds. The lock is declared with "struct percpu_rw_semaphore" type. -The lock is initialized percpu_init_rwsem, it returns 0 on success and --ENOMEM on allocation failure. +The lock is initialized with percpu_init_rwsem, it returns 0 on success +and -ENOMEM on allocation failure. The lock must be freed with percpu_free_rwsem to avoid memory leak. The lock is locked for read with percpu_down_read, percpu_up_read and diff --git a/Documentation/locking/seqlock.rst b/Documentation/locking/seqlock.rst index bfda1a5fecad..ec6411d02ac8 100644 --- a/Documentation/locking/seqlock.rst +++ b/Documentation/locking/seqlock.rst @@ -153,7 +153,7 @@ Use seqcount_latch_t when the write side sections cannot be protected from interruption by readers. This is typically the case when the read side can be invoked from NMI handlers. -Check `raw_write_seqcount_latch()` for more information. +Check `write_seqcount_latch()` for more information. .. _seqlock_t: |