summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-01-06 17:10:04 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2009-01-06 17:10:04 -0800
commitf94181da7192f4ed8ccb1b633ea4ce56954df130 (patch)
tree2e28785f2df447573a11fbdd611dc19eb3fcb794 /kernel
parent932adbed6d99cc373fc3433d701b3a594fea872c (diff)
parentfdbc0450df12cc9cb397f3497db4b0cad7c1a8ff (diff)
Merge branch 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: rcu: fix rcutorture bug rcu: eliminate synchronize_rcu_xxx macro rcu: make treercu safe for suspend and resume rcu: fix rcutree grace-period-latency bug on small systems futex: catch certain assymetric (get|put)_futex_key calls futex: make futex_(get|put)_key() calls symmetric locking, percpu counters: introduce separate lock classes swiotlb: clean up EXPORT_SYMBOL usage swiotlb: remove unnecessary declaration swiotlb: replace architecture-specific swiotlb.h with linux/swiotlb.h swiotlb: add support for systems with highmem swiotlb: store phys address in io_tlb_orig_addr array swiotlb: add hwdev to swiotlb_phys_to_bus() / swiotlb_sg_to_bus()
Diffstat (limited to 'kernel')
-rw-r--r--kernel/futex.c72
-rw-r--r--kernel/rcupdate.c11
-rw-r--r--kernel/rcupreempt.c11
-rw-r--r--kernel/rcutorture.c18
-rw-r--r--kernel/rcutree.c13
5 files changed, 72 insertions, 53 deletions
diff --git a/kernel/futex.c b/kernel/futex.c
index 7c6cbabe52b3..002aa189eb09 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -170,8 +170,11 @@ static void get_futex_key_refs(union futex_key *key)
*/
static void drop_futex_key_refs(union futex_key *key)
{
- if (!key->both.ptr)
+ if (!key->both.ptr) {
+ /* If we're here then we tried to put a key we failed to get */
+ WARN_ON_ONCE(1);
return;
+ }
switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
case FUT_OFF_INODE:
@@ -730,8 +733,8 @@ static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset)
}
spin_unlock(&hb->lock);
-out:
put_futex_key(fshared, &key);
+out:
return ret;
}
@@ -755,7 +758,7 @@ retryfull:
goto out;
ret = get_futex_key(uaddr2, fshared, &key2);
if (unlikely(ret != 0))
- goto out;
+ goto out_put_key1;
hb1 = hash_futex(&key1);
hb2 = hash_futex(&key2);
@@ -777,12 +780,12 @@ retry:
* but we might get them from range checking
*/
ret = op_ret;
- goto out;
+ goto out_put_keys;
#endif
if (unlikely(op_ret != -EFAULT)) {
ret = op_ret;
- goto out;
+ goto out_put_keys;
}
/*
@@ -796,7 +799,7 @@ retry:
ret = futex_handle_fault((unsigned long)uaddr2,
attempt);
if (ret)
- goto out;
+ goto out_put_keys;
goto retry;
}
@@ -834,10 +837,11 @@ retry:
spin_unlock(&hb1->lock);
if (hb1 != hb2)
spin_unlock(&hb2->lock);
-out:
+out_put_keys:
put_futex_key(fshared, &key2);
+out_put_key1:
put_futex_key(fshared, &key1);
-
+out:
return ret;
}
@@ -854,13 +858,13 @@ static int futex_requeue(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
struct futex_q *this, *next;
int ret, drop_count = 0;
- retry:
+retry:
ret = get_futex_key(uaddr1, fshared, &key1);
if (unlikely(ret != 0))
goto out;
ret = get_futex_key(uaddr2, fshared, &key2);
if (unlikely(ret != 0))
- goto out;
+ goto out_put_key1;
hb1 = hash_futex(&key1);
hb2 = hash_futex(&key2);
@@ -882,7 +886,7 @@ static int futex_requeue(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
if (!ret)
goto retry;
- return ret;
+ goto out_put_keys;
}
if (curval != *cmpval) {
ret = -EAGAIN;
@@ -927,9 +931,11 @@ out_unlock:
while (--drop_count >= 0)
drop_futex_key_refs(&key1);
-out:
+out_put_keys:
put_futex_key(fshared, &key2);
+out_put_key1:
put_futex_key(fshared, &key1);
+out:
return ret;
}
@@ -990,7 +996,7 @@ static int unqueue_me(struct futex_q *q)
int ret = 0;
/* In the common case we don't take the spinlock, which is nice. */
- retry:
+retry:
lock_ptr = q->lock_ptr;
barrier();
if (lock_ptr != NULL) {
@@ -1172,11 +1178,11 @@ static int futex_wait(u32 __user *uaddr, int fshared,
q.pi_state = NULL;
q.bitset = bitset;
- retry:
+retry:
q.key = FUTEX_KEY_INIT;
ret = get_futex_key(uaddr, fshared, &q.key);
if (unlikely(ret != 0))
- goto out_release_sem;
+ goto out;
hb = queue_lock(&q);
@@ -1204,6 +1210,7 @@ static int futex_wait(u32 __user *uaddr, int fshared,
if (unlikely(ret)) {
queue_unlock(&q, hb);
+ put_futex_key(fshared, &q.key);
ret = get_user(uval, uaddr);
@@ -1213,7 +1220,7 @@ static int futex_wait(u32 __user *uaddr, int fshared,
}
ret = -EWOULDBLOCK;
if (uval != val)
- goto out_unlock_release_sem;
+ goto out_unlock_put_key;
/* Only actually queue if *uaddr contained val. */
queue_me(&q, hb);
@@ -1305,11 +1312,11 @@ static int futex_wait(u32 __user *uaddr, int fshared,
return -ERESTART_RESTARTBLOCK;
}
- out_unlock_release_sem:
+out_unlock_put_key:
queue_unlock(&q, hb);
-
- out_release_sem:
put_futex_key(fshared, &q.key);
+
+out:
return ret;
}
@@ -1358,16 +1365,16 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared,
}
q.pi_state = NULL;
- retry:
+retry:
q.key = FUTEX_KEY_INIT;
ret = get_futex_key(uaddr, fshared, &q.key);
if (unlikely(ret != 0))
- goto out_release_sem;
+ goto out;
- retry_unlocked:
+retry_unlocked:
hb = queue_lock(&q);
- retry_locked:
+retry_locked:
ret = lock_taken = 0;
/*
@@ -1388,14 +1395,14 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared,
*/
if (unlikely((curval & FUTEX_TID_MASK) == task_pid_vnr(current))) {
ret = -EDEADLK;
- goto out_unlock_release_sem;
+ goto out_unlock_put_key;
}
/*
* Surprise - we got the lock. Just return to userspace:
*/
if (unlikely(!curval))
- goto out_unlock_release_sem;
+ goto out_unlock_put_key;
uval = curval;
@@ -1431,7 +1438,7 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared,
* We took the lock due to owner died take over.
*/
if (unlikely(lock_taken))
- goto out_unlock_release_sem;
+ goto out_unlock_put_key;
/*
* We dont have the lock. Look up the PI state (or create it if
@@ -1470,7 +1477,7 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared,
goto retry_locked;
}
default:
- goto out_unlock_release_sem;
+ goto out_unlock_put_key;
}
}
@@ -1561,16 +1568,17 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared,
destroy_hrtimer_on_stack(&to->timer);
return ret != -EINTR ? ret : -ERESTARTNOINTR;
- out_unlock_release_sem:
+out_unlock_put_key:
queue_unlock(&q, hb);
- out_release_sem:
+out_put_key:
put_futex_key(fshared, &q.key);
+out:
if (to)
destroy_hrtimer_on_stack(&to->timer);
return ret;
- uaddr_faulted:
+uaddr_faulted:
/*
* We have to r/w *(int __user *)uaddr, and we have to modify it
* atomically. Therefore, if we continue to fault after get_user()
@@ -1583,7 +1591,7 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared,
if (attempt++) {
ret = futex_handle_fault((unsigned long)uaddr, attempt);
if (ret)
- goto out_release_sem;
+ goto out_put_key;
goto retry_unlocked;
}
@@ -1675,9 +1683,9 @@ retry_unlocked:
out_unlock:
spin_unlock(&hb->lock);
-out:
put_futex_key(fshared, &key);
+out:
return ret;
pi_faulted:
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index ad63af8b2521..d92a76a881aa 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -77,8 +77,15 @@ void wakeme_after_rcu(struct rcu_head *head)
* sections are delimited by rcu_read_lock() and rcu_read_unlock(),
* and may be nested.
*/
-void synchronize_rcu(void); /* Makes kernel-doc tools happy */
-synchronize_rcu_xxx(synchronize_rcu, call_rcu)
+void synchronize_rcu(void)
+{
+ struct rcu_synchronize rcu;
+ init_completion(&rcu.completion);
+ /* Will wake me after RCU finished. */
+ call_rcu(&rcu.head, wakeme_after_rcu);
+ /* Wait for it. */
+ wait_for_completion(&rcu.completion);
+}
EXPORT_SYMBOL_GPL(synchronize_rcu);
static void rcu_barrier_callback(struct rcu_head *notused)
diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c
index f9dc8f3720f6..33cfc50781f9 100644
--- a/kernel/rcupreempt.c
+++ b/kernel/rcupreempt.c
@@ -1177,7 +1177,16 @@ EXPORT_SYMBOL_GPL(call_rcu_sched);
* in -rt this does -not- necessarily result in all currently executing
* interrupt -handlers- having completed.
*/
-synchronize_rcu_xxx(__synchronize_sched, call_rcu_sched)
+void __synchronize_sched(void)
+{
+ struct rcu_synchronize rcu;
+
+ init_completion(&rcu.completion);
+ /* Will wake me after RCU finished. */
+ call_rcu_sched(&rcu.head, wakeme_after_rcu);
+ /* Wait for it. */
+ wait_for_completion(&rcu.completion);
+}
EXPORT_SYMBOL_GPL(__synchronize_sched);
/*
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 3245b40952c6..1cff28db56b6 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -136,7 +136,7 @@ static int stutter_pause_test = 0;
#endif
int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT;
-#define FULLSTOP_SIGNALED 1 /* Bail due to signal. */
+#define FULLSTOP_SHUTDOWN 1 /* Bail due to system shutdown/panic. */
#define FULLSTOP_CLEANUP 2 /* Orderly shutdown. */
static int fullstop; /* stop generating callbacks at test end. */
DEFINE_MUTEX(fullstop_mutex); /* protect fullstop transitions and */
@@ -151,12 +151,10 @@ rcutorture_shutdown_notify(struct notifier_block *unused1,
{
if (fullstop)
return NOTIFY_DONE;
- if (signal_pending(current)) {
- mutex_lock(&fullstop_mutex);
- if (!ACCESS_ONCE(fullstop))
- fullstop = FULLSTOP_SIGNALED;
- mutex_unlock(&fullstop_mutex);
- }
+ mutex_lock(&fullstop_mutex);
+ if (!fullstop)
+ fullstop = FULLSTOP_SHUTDOWN;
+ mutex_unlock(&fullstop_mutex);
return NOTIFY_DONE;
}
@@ -624,7 +622,7 @@ rcu_torture_writer(void *arg)
rcu_stutter_wait();
} while (!kthread_should_stop() && !fullstop);
VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping");
- while (!kthread_should_stop() && fullstop != FULLSTOP_SIGNALED)
+ while (!kthread_should_stop() && fullstop != FULLSTOP_SHUTDOWN)
schedule_timeout_uninterruptible(1);
return 0;
}
@@ -649,7 +647,7 @@ rcu_torture_fakewriter(void *arg)
} while (!kthread_should_stop() && !fullstop);
VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping");
- while (!kthread_should_stop() && fullstop != FULLSTOP_SIGNALED)
+ while (!kthread_should_stop() && fullstop != FULLSTOP_SHUTDOWN)
schedule_timeout_uninterruptible(1);
return 0;
}
@@ -759,7 +757,7 @@ rcu_torture_reader(void *arg)
VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping");
if (irqreader && cur_ops->irqcapable)
del_timer_sync(&t);
- while (!kthread_should_stop() && fullstop != FULLSTOP_SIGNALED)
+ while (!kthread_should_stop() && fullstop != FULLSTOP_SHUTDOWN)
schedule_timeout_uninterruptible(1);
return 0;
}
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index a342b032112c..f2d8638e6c60 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -79,7 +79,10 @@ struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state);
DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
#ifdef CONFIG_NO_HZ
-DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks);
+DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
+ .dynticks_nesting = 1,
+ .dynticks = 1,
+};
#endif /* #ifdef CONFIG_NO_HZ */
static int blimit = 10; /* Maximum callbacks per softirq. */
@@ -572,6 +575,7 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
/* Special-case the common single-level case. */
if (NUM_RCU_NODES == 1) {
rnp->qsmask = rnp->qsmaskinit;
+ rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */
spin_unlock_irqrestore(&rnp->lock, flags);
return;
}
@@ -1379,13 +1383,6 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
static void __cpuinit rcu_online_cpu(int cpu)
{
-#ifdef CONFIG_NO_HZ
- struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
-
- rdtp->dynticks_nesting = 1;
- rdtp->dynticks |= 1; /* need consecutive #s even for hotplug. */
- rdtp->dynticks_nmi = (rdtp->dynticks_nmi + 1) & ~0x1;
-#endif /* #ifdef CONFIG_NO_HZ */
rcu_init_percpu_data(cpu, &rcu_state);
rcu_init_percpu_data(cpu, &rcu_bh_state);
open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);