summaryrefslogtreecommitdiff
path: root/ipc
diff options
context:
space:
mode:
Diffstat (limited to 'ipc')
-rw-r--r--ipc/mqueue.c6
-rw-r--r--ipc/msg.c6
-rw-r--r--ipc/sem.c18
3 files changed, 18 insertions, 12 deletions
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index 461fcf8c873d..5becca9be867 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -1006,12 +1006,14 @@ static inline void __pipelined_op(struct wake_q_head *wake_q,
struct mqueue_inode_info *info,
struct ext_wait_queue *this)
{
+ struct task_struct *task;
+
list_del(&this->list);
- get_task_struct(this->task);
+ task = get_task_struct(this->task);
/* see MQ_BARRIER for purpose/pairing */
smp_store_release(&this->state, STATE_READY);
- wake_q_add_safe(wake_q, this->task);
+ wake_q_add_safe(wake_q, task);
}
/* pipelined_send() - send a message directly to the task waiting in
diff --git a/ipc/msg.c b/ipc/msg.c
index acd1bc7af55a..6e6c8e0c9380 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -251,11 +251,13 @@ static void expunge_all(struct msg_queue *msq, int res,
struct msg_receiver *msr, *t;
list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) {
- get_task_struct(msr->r_tsk);
+ struct task_struct *r_tsk;
+
+ r_tsk = get_task_struct(msr->r_tsk);
/* see MSG_BARRIER for purpose/pairing */
smp_store_release(&msr->r_msg, ERR_PTR(res));
- wake_q_add_safe(wake_q, msr->r_tsk);
+ wake_q_add_safe(wake_q, r_tsk);
}
}
diff --git a/ipc/sem.c b/ipc/sem.c
index f6c30a85dadf..bf534c74293e 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -36,7 +36,7 @@
* - two Linux specific semctl() commands: SEM_STAT, SEM_INFO.
* - undo adjustments at process exit are limited to 0..SEMVMX.
* - namespace are supported.
- * - SEMMSL, SEMMNS, SEMOPM and SEMMNI can be configured at runtine by writing
+ * - SEMMSL, SEMMNS, SEMOPM and SEMMNI can be configured at runtime by writing
* to /proc/sys/kernel/sem.
* - statistics about the usage are reported in /proc/sysvipc/sem.
*
@@ -224,7 +224,7 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
* Setting it to a result code is a RELEASE, this is ensured by both a
* smp_store_release() (for case a) and while holding sem_lock()
* (for case b).
- * The AQUIRE when reading the result code without holding sem_lock() is
+ * The ACQUIRE when reading the result code without holding sem_lock() is
* achieved by using READ_ONCE() + smp_acquire__after_ctrl_dep().
* (case a above).
* Reading the result code while holding sem_lock() needs no further barriers,
@@ -784,12 +784,14 @@ would_block:
static inline void wake_up_sem_queue_prepare(struct sem_queue *q, int error,
struct wake_q_head *wake_q)
{
- get_task_struct(q->sleeper);
+ struct task_struct *sleeper;
- /* see SEM_BARRIER_2 for purpuse/pairing */
+ sleeper = get_task_struct(q->sleeper);
+
+ /* see SEM_BARRIER_2 for purpose/pairing */
smp_store_release(&q->status, error);
- wake_q_add_safe(wake_q, q->sleeper);
+ wake_q_add_safe(wake_q, sleeper);
}
static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
@@ -821,7 +823,7 @@ static inline int check_restart(struct sem_array *sma, struct sem_queue *q)
/* It is impossible that someone waits for the new value:
* - complex operations always restart.
- * - wait-for-zero are handled seperately.
+ * - wait-for-zero are handled separately.
* - q is a previously sleeping simple operation that
* altered the array. It must be a decrement, because
* simple increments never sleep.
@@ -1046,7 +1048,7 @@ static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsop
* - No complex ops, thus all sleeping ops are
* decrease.
* - if we decreased the value, then any sleeping
- * semaphore ops wont be able to run: If the
+ * semaphore ops won't be able to run: If the
* previous value was too small, then the new
* value will be too small, too.
*/
@@ -2108,7 +2110,7 @@ static long do_semtimedop(int semid, struct sembuf __user *tsops,
queue.dupsop = dupsop;
error = perform_atomic_semop(sma, &queue);
- if (error == 0) { /* non-blocking succesfull path */
+ if (error == 0) { /* non-blocking successful path */
DEFINE_WAKE_Q(wake_q);
/*