summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cgroup.c5
-rw-r--r--kernel/cgroup_freezer.c11
-rw-r--r--kernel/cpuset.c29
-rw-r--r--kernel/events/core.c95
-rw-r--r--kernel/events/internal.h3
-rw-r--r--kernel/events/ring_buffer.c3
-rw-r--r--kernel/exit.c9
-rw-r--r--kernel/fork.c5
-rw-r--r--kernel/futex.c28
-rw-r--r--kernel/hrtimer.c6
-rw-r--r--kernel/hung_task.c14
-rw-r--r--kernel/irq/manage.c7
-rw-r--r--kernel/irq/spurious.c6
-rw-r--r--kernel/jump_label.c3
-rw-r--r--kernel/lockdep.c39
-rw-r--r--kernel/panic.c17
-rw-r--r--kernel/power/hibernate.c37
-rw-r--r--kernel/power/main.c3
-rw-r--r--kernel/printk.c5
-rw-r--r--kernel/ptrace.c13
-rw-r--r--kernel/rtmutex-debug.c1
-rw-r--r--kernel/sched.c17
-rw-r--r--kernel/sched_fair.c159
-rw-r--r--kernel/sched_features.h1
-rw-r--r--kernel/sched_rt.c3
-rw-r--r--kernel/signal.c2
-rw-r--r--kernel/sysctl_binary.c2
-rw-r--r--kernel/time/alarmtimer.c2
-rw-r--r--kernel/time/clocksource.c74
-rw-r--r--kernel/time/tick-broadcast.c2
-rw-r--r--kernel/time/timekeeping.c92
-rw-r--r--kernel/timer.c64
-rw-r--r--kernel/trace/ftrace.c5
-rw-r--r--kernel/trace/trace_events.c1
-rw-r--r--kernel/trace/trace_events_filter.c13
-rw-r--r--kernel/wait.c4
36 files changed, 639 insertions, 141 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index d9d5648f3cdc..a184470cf9b5 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -2098,11 +2098,6 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
continue;
/* get old css_set pointer */
task_lock(tsk);
- if (tsk->flags & PF_EXITING) {
- /* ignore this task if it's going away */
- task_unlock(tsk);
- continue;
- }
oldcg = tsk->cgroups;
get_css_set(oldcg);
task_unlock(tsk);
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
index 5e828a2ca8e6..213c0351dad8 100644
--- a/kernel/cgroup_freezer.c
+++ b/kernel/cgroup_freezer.c
@@ -153,6 +153,13 @@ static void freezer_destroy(struct cgroup_subsys *ss,
kfree(cgroup_freezer(cgroup));
}
+/* task is frozen or will freeze immediately when next it gets woken */
+static bool is_task_frozen_enough(struct task_struct *task)
+{
+ return frozen(task) ||
+ (task_is_stopped_or_traced(task) && freezing(task));
+}
+
/*
* The call to cgroup_lock() in the freezer.state write method prevents
* a write to that file racing against an attach, and hence the
@@ -231,7 +238,7 @@ static void update_if_frozen(struct cgroup *cgroup,
cgroup_iter_start(cgroup, &it);
while ((task = cgroup_iter_next(cgroup, &it))) {
ntotal++;
- if (frozen(task))
+ if (is_task_frozen_enough(task))
nfrozen++;
}
@@ -284,7 +291,7 @@ static int try_to_freeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
while ((task = cgroup_iter_next(cgroup, &it))) {
if (!freeze_task(task, true))
continue;
- if (frozen(task))
+ if (is_task_frozen_enough(task))
continue;
if (!freezing(task) && !freezer_should_skip(task))
num_cant_freeze_now++;
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 9fe58c46a426..0b1712dba587 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -123,6 +123,19 @@ static inline struct cpuset *task_cs(struct task_struct *task)
struct cpuset, css);
}
+#ifdef CONFIG_NUMA
+static inline bool task_has_mempolicy(struct task_struct *task)
+{
+ return task->mempolicy;
+}
+#else
+static inline bool task_has_mempolicy(struct task_struct *task)
+{
+ return false;
+}
+#endif
+
+
/* bits in struct cpuset flags field */
typedef enum {
CS_CPU_EXCLUSIVE,
@@ -949,7 +962,7 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
static void cpuset_change_task_nodemask(struct task_struct *tsk,
nodemask_t *newmems)
{
- bool masks_disjoint = !nodes_intersects(*newmems, tsk->mems_allowed);
+ bool need_loop;
repeat:
/*
@@ -962,6 +975,14 @@ repeat:
return;
task_lock(tsk);
+ /*
+ * Determine if a loop is necessary if another thread is doing
+ * get_mems_allowed(). If at least one node remains unchanged and
+ * tsk does not have a mempolicy, then an empty nodemask will not be
+ * possible when mems_allowed is larger than a word.
+ */
+ need_loop = task_has_mempolicy(tsk) ||
+ !nodes_intersects(*newmems, tsk->mems_allowed);
nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1);
@@ -981,11 +1002,9 @@ repeat:
/*
* Allocation of memory is very fast, we needn't sleep when waiting
- * for the read-side. No wait is necessary, however, if at least one
- * node remains unchanged.
+ * for the read-side.
*/
- while (masks_disjoint &&
- ACCESS_ONCE(tsk->mems_allowed_change_disable)) {
+ while (need_loop && ACCESS_ONCE(tsk->mems_allowed_change_disable)) {
task_unlock(tsk);
if (!task_curr(tsk))
yield();
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 0e8457da6f95..58690af323e4 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -185,6 +185,9 @@ static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
static void update_context_time(struct perf_event_context *ctx);
static u64 perf_event_time(struct perf_event *event);
+static void ring_buffer_attach(struct perf_event *event,
+ struct ring_buffer *rb);
+
void __weak perf_event_print_debug(void) { }
extern __weak const char *perf_pmu_name(void)
@@ -2171,9 +2174,10 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
*/
cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
- perf_event_sched_in(cpuctx, ctx, task);
+ if (ctx->nr_events)
+ cpuctx->task_ctx = ctx;
- cpuctx->task_ctx = ctx;
+ perf_event_sched_in(cpuctx, cpuctx->task_ctx, task);
perf_pmu_enable(ctx->pmu);
perf_ctx_unlock(cpuctx, ctx);
@@ -3190,12 +3194,33 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
struct ring_buffer *rb;
unsigned int events = POLL_HUP;
+ /*
+ * Race between perf_event_set_output() and perf_poll(): perf_poll()
+ * grabs the rb reference but perf_event_set_output() overrides it.
+ * Here is the timeline for two threads T1, T2:
+ * t0: T1, rb = rcu_dereference(event->rb)
+ * t1: T2, old_rb = event->rb
+ * t2: T2, event->rb = new rb
+ * t3: T2, ring_buffer_detach(old_rb)
+ * t4: T1, ring_buffer_attach(rb1)
+ * t5: T1, poll_wait(event->waitq)
+ *
+ * To avoid this problem, we grab mmap_mutex in perf_poll()
+ * thereby ensuring that the assignment of the new ring buffer
+ * and the detachment of the old buffer appear atomic to perf_poll()
+ */
+ mutex_lock(&event->mmap_mutex);
+
rcu_read_lock();
rb = rcu_dereference(event->rb);
- if (rb)
+ if (rb) {
+ ring_buffer_attach(event, rb);
events = atomic_xchg(&rb->poll, 0);
+ }
rcu_read_unlock();
+ mutex_unlock(&event->mmap_mutex);
+
poll_wait(file, &event->waitq, wait);
return events;
@@ -3496,6 +3521,53 @@ unlock:
return ret;
}
+static void ring_buffer_attach(struct perf_event *event,
+ struct ring_buffer *rb)
+{
+ unsigned long flags;
+
+ if (!list_empty(&event->rb_entry))
+ return;
+
+ spin_lock_irqsave(&rb->event_lock, flags);
+ if (!list_empty(&event->rb_entry))
+ goto unlock;
+
+ list_add(&event->rb_entry, &rb->event_list);
+unlock:
+ spin_unlock_irqrestore(&rb->event_lock, flags);
+}
+
+static void ring_buffer_detach(struct perf_event *event,
+ struct ring_buffer *rb)
+{
+ unsigned long flags;
+
+ if (list_empty(&event->rb_entry))
+ return;
+
+ spin_lock_irqsave(&rb->event_lock, flags);
+ list_del_init(&event->rb_entry);
+ wake_up_all(&event->waitq);
+ spin_unlock_irqrestore(&rb->event_lock, flags);
+}
+
+static void ring_buffer_wakeup(struct perf_event *event)
+{
+ struct ring_buffer *rb;
+
+ rcu_read_lock();
+ rb = rcu_dereference(event->rb);
+ if (!rb)
+ goto unlock;
+
+ list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
+ wake_up_all(&event->waitq);
+
+unlock:
+ rcu_read_unlock();
+}
+
static void rb_free_rcu(struct rcu_head *rcu_head)
{
struct ring_buffer *rb;
@@ -3521,9 +3593,19 @@ static struct ring_buffer *ring_buffer_get(struct perf_event *event)
static void ring_buffer_put(struct ring_buffer *rb)
{
+ struct perf_event *event, *n;
+ unsigned long flags;
+
if (!atomic_dec_and_test(&rb->refcount))
return;
+ spin_lock_irqsave(&rb->event_lock, flags);
+ list_for_each_entry_safe(event, n, &rb->event_list, rb_entry) {
+ list_del_init(&event->rb_entry);
+ wake_up_all(&event->waitq);
+ }
+ spin_unlock_irqrestore(&rb->event_lock, flags);
+
call_rcu(&rb->rcu_head, rb_free_rcu);
}
@@ -3546,6 +3628,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
vma->vm_mm->pinned_vm -= event->mmap_locked;
rcu_assign_pointer(event->rb, NULL);
+ ring_buffer_detach(event, rb);
mutex_unlock(&event->mmap_mutex);
ring_buffer_put(rb);
@@ -3700,7 +3783,7 @@ static const struct file_operations perf_fops = {
void perf_event_wakeup(struct perf_event *event)
{
- wake_up_all(&event->waitq);
+ ring_buffer_wakeup(event);
if (event->pending_kill) {
kill_fasync(&event->fasync, SIGIO, event->pending_kill);
@@ -5822,6 +5905,8 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
INIT_LIST_HEAD(&event->group_entry);
INIT_LIST_HEAD(&event->event_entry);
INIT_LIST_HEAD(&event->sibling_list);
+ INIT_LIST_HEAD(&event->rb_entry);
+
init_waitqueue_head(&event->waitq);
init_irq_work(&event->pending, perf_pending_event);
@@ -6028,6 +6113,8 @@ set:
old_rb = event->rb;
rcu_assign_pointer(event->rb, rb);
+ if (old_rb)
+ ring_buffer_detach(event, old_rb);
ret = 0;
unlock:
mutex_unlock(&event->mmap_mutex);
diff --git a/kernel/events/internal.h b/kernel/events/internal.h
index 09097dd8116c..64568a699375 100644
--- a/kernel/events/internal.h
+++ b/kernel/events/internal.h
@@ -22,6 +22,9 @@ struct ring_buffer {
local_t lost; /* nr records lost */
long watermark; /* wakeup watermark */
+ /* poll crap */
+ spinlock_t event_lock;
+ struct list_head event_list;
struct perf_event_mmap_page *user_page;
void *data_pages[0];
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index a2a29205cc0f..7f3011c6b57f 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -209,6 +209,9 @@ ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
rb->writable = 1;
atomic_set(&rb->refcount, 1);
+
+ INIT_LIST_HEAD(&rb->event_list);
+ spin_lock_init(&rb->event_lock);
}
#ifndef CONFIG_PERF_USE_VMALLOC
diff --git a/kernel/exit.c b/kernel/exit.c
index d0b7d988f873..e6e01b959a0e 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -1540,8 +1540,15 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace,
}
/* dead body doesn't have much to contribute */
- if (p->exit_state == EXIT_DEAD)
+ if (unlikely(p->exit_state == EXIT_DEAD)) {
+ /*
+ * But do not ignore this task until the tracer does
+ * wait_task_zombie()->do_notify_parent().
+ */
+ if (likely(!ptrace) && unlikely(ptrace_reparented(p)))
+ wo->notask_error = 0;
return 0;
+ }
/* slay zombie? */
if (p->exit_state == EXIT_ZOMBIE) {
diff --git a/kernel/fork.c b/kernel/fork.c
index ba0d17261329..da4a6a10d088 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -162,7 +162,6 @@ static void account_kernel_stack(struct thread_info *ti, int account)
void free_task(struct task_struct *tsk)
{
- prop_local_destroy_single(&tsk->dirties);
account_kernel_stack(tsk->stack, -1);
free_thread_info(tsk->stack);
rt_mutex_debug_task_free(tsk);
@@ -274,10 +273,6 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
tsk->stack = ti;
- err = prop_local_init_single(&tsk->dirties);
- if (err)
- goto out;
-
setup_thread_stack(tsk, orig);
clear_user_return_notifier(tsk);
clear_tsk_need_resched(tsk);
diff --git a/kernel/futex.c b/kernel/futex.c
index ea87f4d2f455..1614be20173d 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -314,17 +314,29 @@ again:
#endif
lock_page(page_head);
+
+ /*
+ * If page_head->mapping is NULL, then it cannot be a PageAnon
+ * page; but it might be the ZERO_PAGE or in the gate area or
+ * in a special mapping (all cases which we are happy to fail);
+ * or it may have been a good file page when get_user_pages_fast
+ * found it, but truncated or holepunched or subjected to
+ * invalidate_complete_page2 before we got the page lock (also
+ * cases which we are happy to fail). And we hold a reference,
+ * so refcount care in invalidate_complete_page's remove_mapping
+ * prevents drop_caches from setting mapping to NULL beneath us.
+ *
+ * The case we do have to guard against is when memory pressure made
+ * shmem_writepage move it from filecache to swapcache beneath us:
+ * an unlikely race, but we do need to retry for page_head->mapping.
+ */
if (!page_head->mapping) {
+ int shmem_swizzled = PageSwapCache(page_head);
unlock_page(page_head);
put_page(page_head);
- /*
- * ZERO_PAGE pages don't have a mapping. Avoid a busy loop
- * trying to find one. RW mapping would have COW'd (and thus
- * have a mapping) so this page is RO and won't ever change.
- */
- if ((page_head == ZERO_PAGE(address)))
- return -EFAULT;
- goto again;
+ if (shmem_swizzled)
+ goto again;
+ return -EFAULT;
}
/*
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 422e567eecf6..ae34bf51682b 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -885,10 +885,13 @@ static void __remove_hrtimer(struct hrtimer *timer,
struct hrtimer_clock_base *base,
unsigned long newstate, int reprogram)
{
+ struct timerqueue_node *next_timer;
if (!(timer->state & HRTIMER_STATE_ENQUEUED))
goto out;
- if (&timer->node == timerqueue_getnext(&base->active)) {
+ next_timer = timerqueue_getnext(&base->active);
+ timerqueue_del(&base->active, &timer->node);
+ if (&timer->node == next_timer) {
#ifdef CONFIG_HIGH_RES_TIMERS
/* Reprogram the clock event device. if enabled */
if (reprogram && hrtimer_hres_active()) {
@@ -901,7 +904,6 @@ static void __remove_hrtimer(struct hrtimer *timer,
}
#endif
}
- timerqueue_del(&base->active, &timer->node);
if (!timerqueue_getnext(&base->active))
base->cpu_base->active_bases &= ~(1 << base->index);
out:
diff --git a/kernel/hung_task.c b/kernel/hung_task.c
index 8b1748d0172c..2e48ec0c2e91 100644
--- a/kernel/hung_task.c
+++ b/kernel/hung_task.c
@@ -74,11 +74,17 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
/*
* Ensure the task is not frozen.
- * Also, when a freshly created task is scheduled once, changes
- * its state to TASK_UNINTERRUPTIBLE without having ever been
- * switched out once, it musn't be checked.
+ * Also, skip vfork and any other user process that freezer should skip.
*/
- if (unlikely(t->flags & PF_FROZEN || !switch_count))
+ if (unlikely(t->flags & (PF_FROZEN | PF_FREEZER_SKIP)))
+ return;
+
+ /*
+ * When a freshly created task is scheduled once, changes its state to
+ * TASK_UNINTERRUPTIBLE without having ever been switched out once, it
+ * musn't be checked.
+ */
+ if (unlikely(!switch_count))
return;
if (switch_count != t->last_switch_count) {
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 67ce837ae52c..1da999f5e746 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -623,8 +623,9 @@ static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
static int irq_wait_for_interrupt(struct irqaction *action)
{
+ set_current_state(TASK_INTERRUPTIBLE);
+
while (!kthread_should_stop()) {
- set_current_state(TASK_INTERRUPTIBLE);
if (test_and_clear_bit(IRQTF_RUNTHREAD,
&action->thread_flags)) {
@@ -632,7 +633,9 @@ static int irq_wait_for_interrupt(struct irqaction *action)
return 0;
}
schedule();
+ set_current_state(TASK_INTERRUPTIBLE);
}
+ __set_current_state(TASK_RUNNING);
return -1;
}
@@ -1596,7 +1599,7 @@ int request_percpu_irq(unsigned int irq, irq_handler_t handler,
return -ENOMEM;
action->handler = handler;
- action->flags = IRQF_PERCPU;
+ action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND;
action->name = devname;
action->percpu_dev_id = dev_id;
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index aa57d5da18c1..dc813a948be2 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -84,7 +84,9 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force)
*/
action = desc->action;
if (!action || !(action->flags & IRQF_SHARED) ||
- (action->flags & __IRQF_TIMER) || !action->next)
+ (action->flags & __IRQF_TIMER) ||
+ (action->handler(irq, action->dev_id) == IRQ_HANDLED) ||
+ !action->next)
goto out;
/* Already running on another processor */
@@ -115,7 +117,7 @@ static int misrouted_irq(int irq)
struct irq_desc *desc;
int i, ok = 0;
- if (atomic_inc_return(&irq_poll_active) == 1)
+ if (atomic_inc_return(&irq_poll_active) != 1)
goto out;
irq_poll_cpu = smp_processor_id();
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index bbdfe2a462a0..66ff7109f697 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -66,8 +66,9 @@ void jump_label_inc(struct jump_label_key *key)
return;
jump_label_lock();
- if (atomic_add_return(1, &key->enabled) == 1)
+ if (atomic_read(&key->enabled) == 0)
jump_label_update(key, JUMP_LABEL_ENABLE);
+ atomic_inc(&key->enabled);
jump_label_unlock();
}
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index e69434b070da..e69d633d6aa6 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -44,6 +44,7 @@
#include <linux/stringify.h>
#include <linux/bitops.h>
#include <linux/gfp.h>
+#include <linux/kmemcheck.h>
#include <asm/sections.h>
@@ -430,6 +431,7 @@ unsigned int max_lockdep_depth;
* about it later on, in lockdep_info().
*/
static int lockdep_init_error;
+static const char *lock_init_error;
static unsigned long lockdep_init_trace_data[20];
static struct stack_trace lockdep_init_trace = {
.max_entries = ARRAY_SIZE(lockdep_init_trace_data),
@@ -567,11 +569,12 @@ static void lockdep_print_held_locks(struct task_struct *curr)
}
}
-static void print_kernel_version(void)
+static void print_kernel_ident(void)
{
- printk("%s %.*s\n", init_utsname()->release,
+ printk("%s %.*s %s\n", init_utsname()->release,
(int)strcspn(init_utsname()->version, " "),
- init_utsname()->version);
+ init_utsname()->version,
+ print_tainted());
}
static int very_verbose(struct lock_class *class)
@@ -655,6 +658,7 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
if (unlikely(!lockdep_initialized)) {
lockdep_init();
lockdep_init_error = 1;
+ lock_init_error = lock->name;
save_stack_trace(&lockdep_init_trace);
}
#endif
@@ -722,7 +726,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
class = look_up_lock_class(lock, subclass);
if (likely(class))
- return class;
+ goto out_set_class_cache;
/*
* Debug-check: all keys must be persistent!
@@ -807,6 +811,7 @@ out_unlock_set:
graph_unlock();
raw_local_irq_restore(flags);
+out_set_class_cache:
if (!subclass || force)
lock->class_cache[0] = class;
else if (subclass < NR_LOCKDEP_CACHING_CLASSES)
@@ -1148,7 +1153,7 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth,
printk("\n");
printk("======================================================\n");
printk("[ INFO: possible circular locking dependency detected ]\n");
- print_kernel_version();
+ print_kernel_ident();
printk("-------------------------------------------------------\n");
printk("%s/%d is trying to acquire lock:\n",
curr->comm, task_pid_nr(curr));
@@ -1487,7 +1492,7 @@ print_bad_irq_dependency(struct task_struct *curr,
printk("======================================================\n");
printk("[ INFO: %s-safe -> %s-unsafe lock order detected ]\n",
irqclass, irqclass);
- print_kernel_version();
+ print_kernel_ident();
printk("------------------------------------------------------\n");
printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
curr->comm, task_pid_nr(curr),
@@ -1716,7 +1721,7 @@ print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
printk("\n");
printk("=============================================\n");
printk("[ INFO: possible recursive locking detected ]\n");
- print_kernel_version();
+ print_kernel_ident();
printk("---------------------------------------------\n");
printk("%s/%d is trying to acquire lock:\n",
curr->comm, task_pid_nr(curr));
@@ -2223,7 +2228,7 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this,
printk("\n");
printk("=================================\n");
printk("[ INFO: inconsistent lock state ]\n");
- print_kernel_version();
+ print_kernel_ident();
printk("---------------------------------\n");
printk("inconsistent {%s} -> {%s} usage.\n",
@@ -2288,7 +2293,7 @@ print_irq_inversion_bug(struct task_struct *curr,
printk("\n");
printk("=========================================================\n");
printk("[ INFO: possible irq lock inversion dependency detected ]\n");
- print_kernel_version();
+ print_kernel_ident();
printk("---------------------------------------------------------\n");
printk("%s/%d just changed the state of lock:\n",
curr->comm, task_pid_nr(curr));
@@ -2948,7 +2953,12 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
void lockdep_init_map(struct lockdep_map *lock, const char *name,
struct lock_class_key *key, int subclass)
{
- memset(lock, 0, sizeof(*lock));
+ int i;
+
+ kmemcheck_mark_initialized(lock, sizeof(*lock));
+
+ for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
+ lock->class_cache[i] = NULL;
#ifdef CONFIG_LOCK_STAT
lock->cpu = raw_smp_processor_id();
@@ -3169,6 +3179,7 @@ print_unlock_inbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
printk("\n");
printk("=====================================\n");
printk("[ BUG: bad unlock balance detected! ]\n");
+ print_kernel_ident();
printk("-------------------------------------\n");
printk("%s/%d is trying to release lock (",
curr->comm, task_pid_nr(curr));
@@ -3613,6 +3624,7 @@ print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
printk("\n");
printk("=================================\n");
printk("[ BUG: bad contention detected! ]\n");
+ print_kernel_ident();
printk("---------------------------------\n");
printk("%s/%d is trying to contend lock (",
curr->comm, task_pid_nr(curr));
@@ -3968,7 +3980,8 @@ void __init lockdep_info(void)
#ifdef CONFIG_DEBUG_LOCKDEP
if (lockdep_init_error) {
- printk("WARNING: lockdep init error! Arch code didn't call lockdep_init() early enough?\n");
+ printk("WARNING: lockdep init error! lock-%s was acquired"
+ "before lockdep_init\n", lock_init_error);
printk("Call stack leading to lockdep invocation was:\n");
print_stack_trace(&lockdep_init_trace, 0);
}
@@ -3987,6 +4000,7 @@ print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
printk("\n");
printk("=========================\n");
printk("[ BUG: held lock freed! ]\n");
+ print_kernel_ident();
printk("-------------------------\n");
printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n",
curr->comm, task_pid_nr(curr), mem_from, mem_to-1);
@@ -4044,6 +4058,7 @@ static void print_held_locks_bug(struct task_struct *curr)
printk("\n");
printk("=====================================\n");
printk("[ BUG: lock held at task exit time! ]\n");
+ print_kernel_ident();
printk("-------------------------------------\n");
printk("%s/%d is exiting with locks still held!\n",
curr->comm, task_pid_nr(curr));
@@ -4141,6 +4156,7 @@ void lockdep_sys_exit(void)
printk("\n");
printk("================================================\n");
printk("[ BUG: lock held when returning to user space! ]\n");
+ print_kernel_ident();
printk("------------------------------------------------\n");
printk("%s/%d is leaving the kernel with locks still held!\n",
curr->comm, curr->pid);
@@ -4160,6 +4176,7 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
printk("\n");
printk("===============================\n");
printk("[ INFO: suspicious RCU usage. ]\n");
+ print_kernel_ident();
printk("-------------------------------\n");
printk("%s:%d %s!\n", file, line, s);
printk("\nother info that might help us debug this:\n\n");
diff --git a/kernel/panic.c b/kernel/panic.c
index b26593604214..3458469eb7c3 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -237,11 +237,20 @@ void add_taint(unsigned flag)
* Can't trust the integrity of the kernel anymore.
* We don't call directly debug_locks_off() because the issue
* is not necessarily serious enough to set oops_in_progress to 1
- * Also we want to keep up lockdep for staging development and
- * post-warning case.
+ * Also we want to keep up lockdep for staging/out-of-tree
+ * development and post-warning case.
*/
- if (flag != TAINT_CRAP && flag != TAINT_WARN && __debug_locks_off())
- printk(KERN_WARNING "Disabling lock debugging due to kernel taint\n");
+ switch (flag) {
+ case TAINT_CRAP:
+ case TAINT_OOT_MODULE:
+ case TAINT_WARN:
+ case TAINT_FIRMWARE_WORKAROUND:
+ break;
+
+ default:
+ if (__debug_locks_off())
+ printk(KERN_WARNING "Disabling lock debugging due to kernel taint\n");
+ }
set_bit(flag, &tainted_mask);
}
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index b4511b6d3ef9..a6b0503574ee 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -55,6 +55,8 @@ enum {
static int hibernation_mode = HIBERNATION_SHUTDOWN;
+static bool freezer_test_done;
+
static const struct platform_hibernation_ops *hibernation_ops;
/**
@@ -345,11 +347,24 @@ int hibernation_snapshot(int platform_mode)
error = freeze_kernel_threads();
if (error)
- goto Close;
+ goto Cleanup;
+
+ if (hibernation_test(TEST_FREEZER) ||
+ hibernation_testmode(HIBERNATION_TESTPROC)) {
+
+ /*
+ * Indicate to the caller that we are returning due to a
+ * successful freezer test.
+ */
+ freezer_test_done = true;
+ goto Cleanup;
+ }
error = dpm_prepare(PMSG_FREEZE);
- if (error)
- goto Complete_devices;
+ if (error) {
+ dpm_complete(msg);
+ goto Cleanup;
+ }
suspend_console();
pm_restrict_gfp_mask();
@@ -378,8 +393,6 @@ int hibernation_snapshot(int platform_mode)
pm_restore_gfp_mask();
resume_console();
-
- Complete_devices:
dpm_complete(msg);
Close:
@@ -389,6 +402,10 @@ int hibernation_snapshot(int platform_mode)
Recover_platform:
platform_recover(platform_mode);
goto Resume_devices;
+
+ Cleanup:
+ swsusp_free();
+ goto Close;
}
/**
@@ -641,15 +658,13 @@ int hibernate(void)
if (error)
goto Finish;
- if (hibernation_test(TEST_FREEZER))
- goto Thaw;
-
- if (hibernation_testmode(HIBERNATION_TESTPROC))
- goto Thaw;
-
error = hibernation_snapshot(hibernation_mode == HIBERNATION_PLATFORM);
if (error)
goto Thaw;
+ if (freezer_test_done) {
+ freezer_test_done = false;
+ goto Thaw;
+ }
if (in_suspend) {
unsigned int flags = 0;
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 71f49fe4377e..36e0f0903c32 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -290,13 +290,14 @@ static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
if (*s && len == strlen(*s) && !strncmp(buf, *s, len))
break;
}
- if (state < PM_SUSPEND_MAX && *s)
+ if (state < PM_SUSPEND_MAX && *s) {
error = enter_state(state);
if (error) {
suspend_stats.fail++;
dpm_save_failed_errno(error);
} else
suspend_stats.success++;
+ }
#endif
Exit:
diff --git a/kernel/printk.c b/kernel/printk.c
index dfd8f73dcb76..989e4a52da76 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -199,7 +199,7 @@ void __init setup_log_buf(int early)
unsigned long mem;
mem = memblock_alloc(new_log_buf_len, PAGE_SIZE);
- if (mem == MEMBLOCK_ERROR)
+ if (!mem)
return;
new_log_buf = __va(mem);
} else {
@@ -1292,10 +1292,11 @@ again:
raw_spin_lock(&logbuf_lock);
if (con_start != log_end)
retry = 1;
+ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+
if (retry && console_trylock())
goto again;
- raw_spin_unlock_irqrestore(&logbuf_lock, flags);
if (wake_klogd)
wake_up_klogd();
}
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 24d04477b257..78ab24a7b0e4 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -96,9 +96,20 @@ void __ptrace_unlink(struct task_struct *child)
*/
if (!(child->flags & PF_EXITING) &&
(child->signal->flags & SIGNAL_STOP_STOPPED ||
- child->signal->group_stop_count))
+ child->signal->group_stop_count)) {
child->jobctl |= JOBCTL_STOP_PENDING;
+ /*
+ * This is only possible if this thread was cloned by the
+ * traced task running in the stopped group, set the signal
+ * for the future reports.
+ * FIXME: we should change ptrace_init_task() to handle this
+ * case.
+ */
+ if (!(child->jobctl & JOBCTL_STOP_SIGMASK))
+ child->jobctl |= SIGSTOP;
+ }
+
/*
* If transition to TASK_STOPPED is pending or in TASK_TRACED, kick
* @child in the butt. Note that @resume should be used iff @child
diff --git a/kernel/rtmutex-debug.c b/kernel/rtmutex-debug.c
index 8eafd1bd273e..16502d3a71c8 100644
--- a/kernel/rtmutex-debug.c
+++ b/kernel/rtmutex-debug.c
@@ -101,6 +101,7 @@ void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter)
printk("\n============================================\n");
printk( "[ BUG: circular locking deadlock detected! ]\n");
+ printk("%s\n", print_tainted());
printk( "--------------------------------------------\n");
printk("%s/%d is deadlocking current task %s/%d\n\n",
task->comm, task_pid_nr(task),
diff --git a/kernel/sched.c b/kernel/sched.c
index 0e9344a71be3..d6b149ccf925 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -71,6 +71,7 @@
#include <linux/ctype.h>
#include <linux/ftrace.h>
#include <linux/slab.h>
+#include <linux/init_task.h>
#include <asm/tlb.h>
#include <asm/irq_regs.h>
@@ -4810,6 +4811,9 @@ EXPORT_SYMBOL(wait_for_completion);
* This waits for either a completion of a specific task to be signaled or for a
* specified timeout to expire. The timeout is in jiffies. It is not
* interruptible.
+ *
+ * The return value is 0 if timed out, and positive (at least 1, or number of
+ * jiffies left till timeout) if completed.
*/
unsigned long __sched
wait_for_completion_timeout(struct completion *x, unsigned long timeout)
@@ -4824,6 +4828,8 @@ EXPORT_SYMBOL(wait_for_completion_timeout);
*
* This waits for completion of a specific task to be signaled. It is
* interruptible.
+ *
+ * The return value is -ERESTARTSYS if interrupted, 0 if completed.
*/
int __sched wait_for_completion_interruptible(struct completion *x)
{
@@ -4841,6 +4847,9 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
*
* This waits for either a completion of a specific task to be signaled or for a
* specified timeout to expire. It is interruptible. The timeout is in jiffies.
+ *
+ * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
+ * positive (at least 1, or number of jiffies left till timeout) if completed.
*/
long __sched
wait_for_completion_interruptible_timeout(struct completion *x,
@@ -4856,6 +4865,8 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
*
* This waits to be signaled for completion of a specific task. It can be
* interrupted by a kill signal.
+ *
+ * The return value is -ERESTARTSYS if interrupted, 0 if completed.
*/
int __sched wait_for_completion_killable(struct completion *x)
{
@@ -4874,6 +4885,9 @@ EXPORT_SYMBOL(wait_for_completion_killable);
* This waits for either a completion of a specific task to be
* signaled or for a specified timeout to expire. It can be
* interrupted by a kill signal. The timeout is in jiffies.
+ *
+ * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
+ * positive (at least 1, or number of jiffies left till timeout) if completed.
*/
long __sched
wait_for_completion_killable_timeout(struct completion *x,
@@ -6099,6 +6113,9 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
*/
idle->sched_class = &idle_sched_class;
ftrace_graph_init_idle_task(idle, cpu);
+#if defined(CONFIG_SMP)
+ sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
+#endif
}
/*
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 5c9e67923b7c..8a39fa3e3c6c 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -772,19 +772,32 @@ static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
list_del_leaf_cfs_rq(cfs_rq);
}
+static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
+{
+ long tg_weight;
+
+ /*
+ * Use this CPU's actual weight instead of the last load_contribution
+ * to gain a more accurate current total weight. See
+ * update_cfs_rq_load_contribution().
+ */
+ tg_weight = atomic_read(&tg->load_weight);
+ tg_weight -= cfs_rq->load_contribution;
+ tg_weight += cfs_rq->load.weight;
+
+ return tg_weight;
+}
+
static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
{
- long load_weight, load, shares;
+ long tg_weight, load, shares;
+ tg_weight = calc_tg_weight(tg, cfs_rq);
load = cfs_rq->load.weight;
- load_weight = atomic_read(&tg->load_weight);
- load_weight += load;
- load_weight -= cfs_rq->load_contribution;
-
shares = (tg->shares * load);
- if (load_weight)
- shares /= load_weight;
+ if (tg_weight)
+ shares /= tg_weight;
if (shares < MIN_SHARES)
shares = MIN_SHARES;
@@ -1743,7 +1756,7 @@ static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
{
- if (!cfs_rq->runtime_enabled || !cfs_rq->nr_running)
+ if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
return;
__return_cfs_rq_runtime(cfs_rq);
@@ -2036,36 +2049,100 @@ static void task_waking_fair(struct task_struct *p)
* Adding load to a group doesn't make a group heavier, but can cause movement
* of group shares between cpus. Assuming the shares were perfectly aligned one
* can calculate the shift in shares.
+ *
+ * Calculate the effective load difference if @wl is added (subtracted) to @tg
+ * on this @cpu and results in a total addition (subtraction) of @wg to the
+ * total group weight.
+ *
+ * Given a runqueue weight distribution (rw_i) we can compute a shares
+ * distribution (s_i) using:
+ *
+ * s_i = rw_i / \Sum rw_j (1)
+ *
+ * Suppose we have 4 CPUs and our @tg is a direct child of the root group and
+ * has 7 equal weight tasks, distributed as below (rw_i), with the resulting
+ * shares distribution (s_i):
+ *
+ * rw_i = { 2, 4, 1, 0 }
+ * s_i = { 2/7, 4/7, 1/7, 0 }
+ *
+ * As per wake_affine() we're interested in the load of two CPUs (the CPU the
+ * task used to run on and the CPU the waker is running on), we need to
+ * compute the effect of waking a task on either CPU and, in case of a sync
+ * wakeup, compute the effect of the current task going to sleep.
+ *
+ * So for a change of @wl to the local @cpu with an overall group weight change
+ * of @wl we can compute the new shares distribution (s'_i) using:
+ *
+ * s'_i = (rw_i + @wl) / (@wg + \Sum rw_j) (2)
+ *
+ * Suppose we're interested in CPUs 0 and 1, and want to compute the load
+ * differences in waking a task to CPU 0. The additional task changes the
+ * weight and shares distributions like:
+ *
+ * rw'_i = { 3, 4, 1, 0 }
+ * s'_i = { 3/8, 4/8, 1/8, 0 }
+ *
+ * We can then compute the difference in effective weight by using:
+ *
+ * dw_i = S * (s'_i - s_i) (3)
+ *
+ * Where 'S' is the group weight as seen by its parent.
+ *
+ * Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7)
+ * times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 -
+ * 4/7) times the weight of the group.
*/
static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
{
struct sched_entity *se = tg->se[cpu];
- if (!tg->parent)
+ if (!tg->parent) /* the trivial, non-cgroup case */
return wl;
for_each_sched_entity(se) {
- long lw, w;
+ long w, W;
tg = se->my_q->tg;
- w = se->my_q->load.weight;
- /* use this cpu's instantaneous contribution */
- lw = atomic_read(&tg->load_weight);
- lw -= se->my_q->load_contribution;
- lw += w + wg;
+ /*
+ * W = @wg + \Sum rw_j
+ */
+ W = wg + calc_tg_weight(tg, se->my_q);
- wl += w;
+ /*
+ * w = rw_i + @wl
+ */
+ w = se->my_q->load.weight + wl;
- if (lw > 0 && wl < lw)
- wl = (wl * tg->shares) / lw;
+ /*
+ * wl = S * s'_i; see (2)
+ */
+ if (W > 0 && w < W)
+ wl = (w * tg->shares) / W;
else
wl = tg->shares;
- /* zero point is MIN_SHARES */
+ /*
+ * Per the above, wl is the new se->load.weight value; since
+ * those are clipped to [MIN_SHARES, ...) do so now. See
+ * calc_cfs_shares().
+ */
if (wl < MIN_SHARES)
wl = MIN_SHARES;
+
+ /*
+ * wl = dw_i = S * (s'_i - s_i); see (3)
+ */
wl -= se->load.weight;
+
+ /*
+ * Recursively apply this logic to all parent groups to compute
+ * the final effective load change on the root group. Since
+ * only the @tg group gets extra weight, all parent groups can
+ * only redistribute existing shares. @wl is the shift in shares
+ * resulting from this level per the above.
+ */
wg = 0;
}
@@ -2249,7 +2326,8 @@ static int select_idle_sibling(struct task_struct *p, int target)
int cpu = smp_processor_id();
int prev_cpu = task_cpu(p);
struct sched_domain *sd;
- int i;
+ struct sched_group *sg;
+ int i, smt = 0;
/*
* If the task is going to be woken-up on this cpu and if it is
@@ -2269,25 +2347,40 @@ static int select_idle_sibling(struct task_struct *p, int target)
* Otherwise, iterate the domains and find an elegible idle cpu.
*/
rcu_read_lock();
+again:
for_each_domain(target, sd) {
+ if (!smt && (sd->flags & SD_SHARE_CPUPOWER))
+ continue;
+
+ if (smt && !(sd->flags & SD_SHARE_CPUPOWER))
+ break;
+
if (!(sd->flags & SD_SHARE_PKG_RESOURCES))
break;
- for_each_cpu_and(i, sched_domain_span(sd), tsk_cpus_allowed(p)) {
- if (idle_cpu(i)) {
- target = i;
- break;
+ sg = sd->groups;
+ do {
+ if (!cpumask_intersects(sched_group_cpus(sg),
+ tsk_cpus_allowed(p)))
+ goto next;
+
+ for_each_cpu(i, sched_group_cpus(sg)) {
+ if (!idle_cpu(i))
+ goto next;
}
- }
- /*
- * Lets stop looking for an idle sibling when we reached
- * the domain that spans the current cpu and prev_cpu.
- */
- if (cpumask_test_cpu(cpu, sched_domain_span(sd)) &&
- cpumask_test_cpu(prev_cpu, sched_domain_span(sd)))
- break;
+ target = cpumask_first_and(sched_group_cpus(sg),
+ tsk_cpus_allowed(p));
+ goto done;
+next:
+ sg = sg->next;
+ } while (sg != sd->groups);
+ }
+ if (!smt) {
+ smt = 1;
+ goto again;
}
+done:
rcu_read_unlock();
return target;
@@ -3511,7 +3604,7 @@ static bool update_sd_pick_busiest(struct sched_domain *sd,
}
/**
- * update_sd_lb_stats - Update sched_group's statistics for load balancing.
+ * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
* @sd: sched_domain whose statistics are to be updated.
* @this_cpu: Cpu for which load balance is currently performed.
* @idle: Idle status of this_cpu
diff --git a/kernel/sched_features.h b/kernel/sched_features.h
index efa0a7b75dde..84802245abd2 100644
--- a/kernel/sched_features.h
+++ b/kernel/sched_features.h
@@ -67,3 +67,4 @@ SCHED_FEAT(NONTASK_POWER, 1)
SCHED_FEAT(TTWU_QUEUE, 1)
SCHED_FEAT(FORCE_SD_OVERLAP, 0)
+SCHED_FEAT(RT_RUNTIME_SHARE, 1)
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 056cbd2e2a27..583a1368afe6 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -560,6 +560,9 @@ static int balance_runtime(struct rt_rq *rt_rq)
{
int more = 0;
+ if (!sched_feat(RT_RUNTIME_SHARE))
+ return more;
+
if (rt_rq->rt_time > rt_rq->rt_runtime) {
raw_spin_unlock(&rt_rq->rt_runtime_lock);
more = do_balance_runtime(rt_rq);
diff --git a/kernel/signal.c b/kernel/signal.c
index b3f78d09a105..206551563cce 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1994,8 +1994,6 @@ static bool do_signal_stop(int signr)
*/
if (!(sig->flags & SIGNAL_STOP_STOPPED))
sig->group_exit_code = signr;
- else
- WARN_ON_ONCE(!current->ptrace);
sig->group_stop_count = 0;
diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
index 6318b511afa1..a650694883a1 100644
--- a/kernel/sysctl_binary.c
+++ b/kernel/sysctl_binary.c
@@ -1354,7 +1354,7 @@ static ssize_t binary_sysctl(const int *name, int nlen,
fput(file);
out_putname:
- putname(pathname);
+ __putname(pathname);
out:
return result;
}
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index c436e790b21b..8a46f5d64504 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -195,7 +195,7 @@ static enum hrtimer_restart alarmtimer_fired(struct hrtimer *timer)
struct alarm *alarm;
ktime_t expired = next->expires;
- if (expired.tv64 >= now.tv64)
+ if (expired.tv64 > now.tv64)
break;
alarm = container_of(next, struct alarm, node);
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index cf52fda2e096..d3ad022136e5 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -492,6 +492,22 @@ void clocksource_touch_watchdog(void)
}
/**
+ * clocksource_max_adjustment- Returns max adjustment amount
+ * @cs: Pointer to clocksource
+ *
+ */
+static u32 clocksource_max_adjustment(struct clocksource *cs)
+{
+ u64 ret;
+ /*
+ * We won't try to correct for more then 11% adjustments (110,000 ppm),
+ */
+ ret = (u64)cs->mult * 11;
+ do_div(ret,100);
+ return (u32)ret;
+}
+
+/**
* clocksource_max_deferment - Returns max time the clocksource can be deferred
* @cs: Pointer to clocksource
*
@@ -503,25 +519,28 @@ static u64 clocksource_max_deferment(struct clocksource *cs)
/*
* Calculate the maximum number of cycles that we can pass to the
* cyc2ns function without overflowing a 64-bit signed result. The
- * maximum number of cycles is equal to ULLONG_MAX/cs->mult which
- * is equivalent to the below.
- * max_cycles < (2^63)/cs->mult
- * max_cycles < 2^(log2((2^63)/cs->mult))
- * max_cycles < 2^(log2(2^63) - log2(cs->mult))
- * max_cycles < 2^(63 - log2(cs->mult))
- * max_cycles < 1 << (63 - log2(cs->mult))
+ * maximum number of cycles is equal to ULLONG_MAX/(cs->mult+cs->maxadj)
+ * which is equivalent to the below.
+ * max_cycles < (2^63)/(cs->mult + cs->maxadj)
+ * max_cycles < 2^(log2((2^63)/(cs->mult + cs->maxadj)))
+ * max_cycles < 2^(log2(2^63) - log2(cs->mult + cs->maxadj))
+ * max_cycles < 2^(63 - log2(cs->mult + cs->maxadj))
+ * max_cycles < 1 << (63 - log2(cs->mult + cs->maxadj))
* Please note that we add 1 to the result of the log2 to account for
* any rounding errors, ensure the above inequality is satisfied and
* no overflow will occur.
*/
- max_cycles = 1ULL << (63 - (ilog2(cs->mult) + 1));
+ max_cycles = 1ULL << (63 - (ilog2(cs->mult + cs->maxadj) + 1));
/*
* The actual maximum number of cycles we can defer the clocksource is
* determined by the minimum of max_cycles and cs->mask.
+ * Note: Here we subtract the maxadj to make sure we don't sleep for
+ * too long if there's a large negative adjustment.
*/
max_cycles = min_t(u64, max_cycles, (u64) cs->mask);
- max_nsecs = clocksource_cyc2ns(max_cycles, cs->mult, cs->shift);
+ max_nsecs = clocksource_cyc2ns(max_cycles, cs->mult - cs->maxadj,
+ cs->shift);
/*
* To ensure that the clocksource does not wrap whilst we are idle,
@@ -529,7 +548,7 @@ static u64 clocksource_max_deferment(struct clocksource *cs)
* note a margin of 12.5% is used because this can be computed with
* a shift, versus say 10% which would require division.
*/
- return max_nsecs - (max_nsecs >> 5);
+ return max_nsecs - (max_nsecs >> 3);
}
#ifndef CONFIG_ARCH_USES_GETTIMEOFFSET
@@ -628,7 +647,7 @@ static void clocksource_enqueue(struct clocksource *cs)
/**
* __clocksource_updatefreq_scale - Used update clocksource with new freq
- * @t: clocksource to be registered
+ * @cs: clocksource to be registered
* @scale: Scale factor multiplied against freq to get clocksource hz
* @freq: clocksource frequency (cycles per second) divided by scale
*
@@ -640,7 +659,6 @@ static void clocksource_enqueue(struct clocksource *cs)
void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq)
{
u64 sec;
-
/*
* Calc the maximum number of seconds which we can run before
* wrapping around. For clocksources which have a mask > 32bit
@@ -651,7 +669,7 @@ void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq)
* ~ 0.06ppm granularity for NTP. We apply the same 12.5%
* margin as we do in clocksource_max_deferment()
*/
- sec = (cs->mask - (cs->mask >> 5));
+ sec = (cs->mask - (cs->mask >> 3));
do_div(sec, freq);
do_div(sec, scale);
if (!sec)
@@ -661,13 +679,27 @@ void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq)
clocks_calc_mult_shift(&cs->mult, &cs->shift, freq,
NSEC_PER_SEC / scale, sec * scale);
+
+ /*
+ * for clocksources that have large mults, to avoid overflow.
+ * Since mult may be adjusted by ntp, add an safety extra margin
+ *
+ */
+ cs->maxadj = clocksource_max_adjustment(cs);
+ while ((cs->mult + cs->maxadj < cs->mult)
+ || (cs->mult - cs->maxadj > cs->mult)) {
+ cs->mult >>= 1;
+ cs->shift--;
+ cs->maxadj = clocksource_max_adjustment(cs);
+ }
+
cs->max_idle_ns = clocksource_max_deferment(cs);
}
EXPORT_SYMBOL_GPL(__clocksource_updatefreq_scale);
/**
* __clocksource_register_scale - Used to install new clocksources
- * @t: clocksource to be registered
+ * @cs: clocksource to be registered
* @scale: Scale factor multiplied against freq to get clocksource hz
* @freq: clocksource frequency (cycles per second) divided by scale
*
@@ -695,12 +727,18 @@ EXPORT_SYMBOL_GPL(__clocksource_register_scale);
/**
* clocksource_register - Used to install new clocksources
- * @t: clocksource to be registered
+ * @cs: clocksource to be registered
*
* Returns -EBUSY if registration fails, zero otherwise.
*/
int clocksource_register(struct clocksource *cs)
{
+ /* calculate max adjustment for given mult/shift */
+ cs->maxadj = clocksource_max_adjustment(cs);
+ WARN_ONCE(cs->mult + cs->maxadj < cs->mult,
+ "Clocksource %s might overflow on 11%% adjustment\n",
+ cs->name);
+
/* calculate max idle time permitted for this clocksource */
cs->max_idle_ns = clocksource_max_deferment(cs);
@@ -723,6 +761,8 @@ static void __clocksource_change_rating(struct clocksource *cs, int rating)
/**
* clocksource_change_rating - Change the rating of a registered clocksource
+ * @cs: clocksource to be changed
+ * @rating: new rating
*/
void clocksource_change_rating(struct clocksource *cs, int rating)
{
@@ -734,6 +774,7 @@ EXPORT_SYMBOL(clocksource_change_rating);
/**
* clocksource_unregister - remove a registered clocksource
+ * @cs: clocksource to be unregistered
*/
void clocksource_unregister(struct clocksource *cs)
{
@@ -749,6 +790,7 @@ EXPORT_SYMBOL(clocksource_unregister);
/**
* sysfs_show_current_clocksources - sysfs interface for current clocksource
* @dev: unused
+ * @attr: unused
* @buf: char buffer to be filled with clocksource list
*
* Provides sysfs interface for listing current clocksource.
@@ -769,6 +811,7 @@ sysfs_show_current_clocksources(struct sys_device *dev,
/**
* sysfs_override_clocksource - interface for manually overriding clocksource
* @dev: unused
+ * @attr: unused
* @buf: name of override clocksource
* @count: length of buffer
*
@@ -804,6 +847,7 @@ static ssize_t sysfs_override_clocksource(struct sys_device *dev,
/**
* sysfs_show_available_clocksources - sysfs interface for listing clocksource
* @dev: unused
+ * @attr: unused
* @buf: char buffer to be filled with clocksource list
*
* Provides sysfs interface for listing registered clocksources
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index f954282d9a82..fd4a7b1625a2 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -71,7 +71,7 @@ int tick_check_broadcast_device(struct clock_event_device *dev)
(dev->features & CLOCK_EVT_FEAT_C3STOP))
return 0;
- clockevents_exchange_device(NULL, dev);
+ clockevents_exchange_device(tick_broadcast_device.evtdev, dev);
tick_broadcast_device.evtdev = dev;
if (!cpumask_empty(tick_get_broadcast_mask()))
tick_broadcast_start_periodic(dev);
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 2b021b0e8507..237841378c03 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -249,6 +249,8 @@ ktime_t ktime_get(void)
secs = xtime.tv_sec + wall_to_monotonic.tv_sec;
nsecs = xtime.tv_nsec + wall_to_monotonic.tv_nsec;
nsecs += timekeeping_get_ns();
+ /* If arch requires, add in gettimeoffset() */
+ nsecs += arch_gettimeoffset();
} while (read_seqretry(&xtime_lock, seq));
/*
@@ -280,6 +282,8 @@ void ktime_get_ts(struct timespec *ts)
*ts = xtime;
tomono = wall_to_monotonic;
nsecs = timekeeping_get_ns();
+ /* If arch requires, add in gettimeoffset() */
+ nsecs += arch_gettimeoffset();
} while (read_seqretry(&xtime_lock, seq));
@@ -802,14 +806,44 @@ static void timekeeping_adjust(s64 offset)
s64 error, interval = timekeeper.cycle_interval;
int adj;
+ /*
+ * The point of this is to check if the error is greater then half
+ * an interval.
+ *
+ * First we shift it down from NTP_SHIFT to clocksource->shifted nsecs.
+ *
+ * Note we subtract one in the shift, so that error is really error*2.
+ * This "saves" dividing(shifting) intererval twice, but keeps the
+ * (error > interval) comparision as still measuring if error is
+ * larger then half an interval.
+ *
+ * Note: It does not "save" on aggrivation when reading the code.
+ */
error = timekeeper.ntp_error >> (timekeeper.ntp_error_shift - 1);
if (error > interval) {
+ /*
+ * We now divide error by 4(via shift), which checks if
+ * the error is greater then twice the interval.
+ * If it is greater, we need a bigadjust, if its smaller,
+ * we can adjust by 1.
+ */
error >>= 2;
+ /*
+ * XXX - In update_wall_time, we round up to the next
+ * nanosecond, and store the amount rounded up into
+ * the error. This causes the likely below to be unlikely.
+ *
+ * The properfix is to avoid rounding up by using
+ * the high precision timekeeper.xtime_nsec instead of
+ * xtime.tv_nsec everywhere. Fixing this will take some
+ * time.
+ */
if (likely(error <= interval))
adj = 1;
else
adj = timekeeping_bigadjust(error, &interval, &offset);
} else if (error < -interval) {
+ /* See comment above, this is just switched for the negative */
error >>= 2;
if (likely(error >= -interval)) {
adj = -1;
@@ -817,9 +851,65 @@ static void timekeeping_adjust(s64 offset)
offset = -offset;
} else
adj = timekeeping_bigadjust(error, &interval, &offset);
- } else
+ } else /* No adjustment needed */
return;
+ WARN_ONCE(timekeeper.clock->maxadj &&
+ (timekeeper.mult + adj > timekeeper.clock->mult +
+ timekeeper.clock->maxadj),
+ "Adjusting %s more then 11%% (%ld vs %ld)\n",
+ timekeeper.clock->name, (long)timekeeper.mult + adj,
+ (long)timekeeper.clock->mult +
+ timekeeper.clock->maxadj);
+ /*
+ * So the following can be confusing.
+ *
+ * To keep things simple, lets assume adj == 1 for now.
+ *
+ * When adj != 1, remember that the interval and offset values
+ * have been appropriately scaled so the math is the same.
+ *
+ * The basic idea here is that we're increasing the multiplier
+ * by one, this causes the xtime_interval to be incremented by
+ * one cycle_interval. This is because:
+ * xtime_interval = cycle_interval * mult
+ * So if mult is being incremented by one:
+ * xtime_interval = cycle_interval * (mult + 1)
+ * Its the same as:
+ * xtime_interval = (cycle_interval * mult) + cycle_interval
+ * Which can be shortened to:
+ * xtime_interval += cycle_interval
+ *
+ * So offset stores the non-accumulated cycles. Thus the current
+ * time (in shifted nanoseconds) is:
+ * now = (offset * adj) + xtime_nsec
+ * Now, even though we're adjusting the clock frequency, we have
+ * to keep time consistent. In other words, we can't jump back
+ * in time, and we also want to avoid jumping forward in time.
+ *
+ * So given the same offset value, we need the time to be the same
+ * both before and after the freq adjustment.
+ * now = (offset * adj_1) + xtime_nsec_1
+ * now = (offset * adj_2) + xtime_nsec_2
+ * So:
+ * (offset * adj_1) + xtime_nsec_1 =
+ * (offset * adj_2) + xtime_nsec_2
+ * And we know:
+ * adj_2 = adj_1 + 1
+ * So:
+ * (offset * adj_1) + xtime_nsec_1 =
+ * (offset * (adj_1+1)) + xtime_nsec_2
+ * (offset * adj_1) + xtime_nsec_1 =
+ * (offset * adj_1) + offset + xtime_nsec_2
+ * Canceling the sides:
+ * xtime_nsec_1 = offset + xtime_nsec_2
+ * Which gives us:
+ * xtime_nsec_2 = xtime_nsec_1 - offset
+ * Which simplfies to:
+ * xtime_nsec -= offset
+ *
+ * XXX - TODO: Doc ntp_error calculation.
+ */
timekeeper.mult += adj;
timekeeper.xtime_interval += interval;
timekeeper.xtime_nsec -= offset;
diff --git a/kernel/timer.c b/kernel/timer.c
index dbaa62422b13..a297ffcf888e 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -427,6 +427,12 @@ static int timer_fixup_init(void *addr, enum debug_obj_state state)
}
}
+/* Stub timer callback for improperly used timers. */
+static void stub_timer(unsigned long data)
+{
+ WARN_ON(1);
+}
+
/*
* fixup_activate is called when:
* - an active object is activated
@@ -450,7 +456,8 @@ static int timer_fixup_activate(void *addr, enum debug_obj_state state)
debug_object_activate(timer, &timer_debug_descr);
return 0;
} else {
- WARN_ON_ONCE(1);
+ setup_timer(timer, stub_timer, 0);
+ return 1;
}
return 0;
@@ -480,12 +487,40 @@ static int timer_fixup_free(void *addr, enum debug_obj_state state)
}
}
+/*
+ * fixup_assert_init is called when:
+ * - an untracked/uninit-ed object is found
+ */
+static int timer_fixup_assert_init(void *addr, enum debug_obj_state state)
+{
+ struct timer_list *timer = addr;
+
+ switch (state) {
+ case ODEBUG_STATE_NOTAVAILABLE:
+ if (timer->entry.prev == TIMER_ENTRY_STATIC) {
+ /*
+ * This is not really a fixup. The timer was
+ * statically initialized. We just make sure that it
+ * is tracked in the object tracker.
+ */
+ debug_object_init(timer, &timer_debug_descr);
+ return 0;
+ } else {
+ setup_timer(timer, stub_timer, 0);
+ return 1;
+ }
+ default:
+ return 0;
+ }
+}
+
static struct debug_obj_descr timer_debug_descr = {
- .name = "timer_list",
- .debug_hint = timer_debug_hint,
- .fixup_init = timer_fixup_init,
- .fixup_activate = timer_fixup_activate,
- .fixup_free = timer_fixup_free,
+ .name = "timer_list",
+ .debug_hint = timer_debug_hint,
+ .fixup_init = timer_fixup_init,
+ .fixup_activate = timer_fixup_activate,
+ .fixup_free = timer_fixup_free,
+ .fixup_assert_init = timer_fixup_assert_init,
};
static inline void debug_timer_init(struct timer_list *timer)
@@ -508,6 +543,11 @@ static inline void debug_timer_free(struct timer_list *timer)
debug_object_free(timer, &timer_debug_descr);
}
+static inline void debug_timer_assert_init(struct timer_list *timer)
+{
+ debug_object_assert_init(timer, &timer_debug_descr);
+}
+
static void __init_timer(struct timer_list *timer,
const char *name,
struct lock_class_key *key);
@@ -531,6 +571,7 @@ EXPORT_SYMBOL_GPL(destroy_timer_on_stack);
static inline void debug_timer_init(struct timer_list *timer) { }
static inline void debug_timer_activate(struct timer_list *timer) { }
static inline void debug_timer_deactivate(struct timer_list *timer) { }
+static inline void debug_timer_assert_init(struct timer_list *timer) { }
#endif
static inline void debug_init(struct timer_list *timer)
@@ -552,6 +593,11 @@ static inline void debug_deactivate(struct timer_list *timer)
trace_timer_cancel(timer);
}
+static inline void debug_assert_init(struct timer_list *timer)
+{
+ debug_timer_assert_init(timer);
+}
+
static void __init_timer(struct timer_list *timer,
const char *name,
struct lock_class_key *key)
@@ -902,6 +948,8 @@ int del_timer(struct timer_list *timer)
unsigned long flags;
int ret = 0;
+ debug_assert_init(timer);
+
timer_stats_timer_clear_start_info(timer);
if (timer_pending(timer)) {
base = lock_timer_base(timer, &flags);
@@ -932,6 +980,8 @@ int try_to_del_timer_sync(struct timer_list *timer)
unsigned long flags;
int ret = -1;
+ debug_assert_init(timer);
+
base = lock_timer_base(timer, &flags);
if (base->running_timer == timer)
@@ -1368,7 +1418,7 @@ SYSCALL_DEFINE0(getppid)
int pid;
rcu_read_lock();
- pid = task_tgid_vnr(current->real_parent);
+ pid = task_tgid_vnr(rcu_dereference(current->real_parent));
rcu_read_unlock();
return pid;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 900b409543db..b1e8943fed1d 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -152,7 +152,6 @@ void clear_ftrace_function(void)
ftrace_pid_function = ftrace_stub;
}
-#undef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
/*
* For those archs that do not test ftrace_trace_stop in their
@@ -1212,7 +1211,9 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
if (!src->count) {
free_ftrace_hash_rcu(*dst);
rcu_assign_pointer(*dst, EMPTY_HASH);
- return 0;
+ /* still need to update the function records */
+ ret = 0;
+ goto out;
}
/*
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 581876f9f387..c212a7f934ec 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -1078,7 +1078,6 @@ event_subsystem_dir(const char *name, struct dentry *d_events)
/* First see if we did not already create this dir */
list_for_each_entry(system, &event_subsystems, list) {
if (strcmp(system->name, name) == 0) {
- __get_system(system);
system->nr_events++;
return system->entry;
}
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 816d3d074979..95dc31efd6dd 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -1649,7 +1649,9 @@ static int replace_system_preds(struct event_subsystem *system,
*/
err = replace_preds(call, NULL, ps, filter_string, true);
if (err)
- goto fail;
+ call->flags |= TRACE_EVENT_FL_NO_SET_FILTER;
+ else
+ call->flags &= ~TRACE_EVENT_FL_NO_SET_FILTER;
}
list_for_each_entry(call, &ftrace_events, list) {
@@ -1658,6 +1660,9 @@ static int replace_system_preds(struct event_subsystem *system,
if (strcmp(call->class->system, system->name) != 0)
continue;
+ if (call->flags & TRACE_EVENT_FL_NO_SET_FILTER)
+ continue;
+
filter_item = kzalloc(sizeof(*filter_item), GFP_KERNEL);
if (!filter_item)
goto fail_mem;
@@ -1686,7 +1691,7 @@ static int replace_system_preds(struct event_subsystem *system,
* replace the filter for the call.
*/
filter = call->filter;
- call->filter = filter_item->filter;
+ rcu_assign_pointer(call->filter, filter_item->filter);
filter_item->filter = filter;
fail = false;
@@ -1741,7 +1746,7 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
filter = call->filter;
if (!filter)
goto out_unlock;
- call->filter = NULL;
+ RCU_INIT_POINTER(call->filter, NULL);
/* Make sure the filter is not being used */
synchronize_sched();
__free_filter(filter);
@@ -1782,7 +1787,7 @@ out:
* string
*/
tmp = call->filter;
- call->filter = filter;
+ rcu_assign_pointer(call->filter, filter);
if (tmp) {
/* Make sure the call is done with the filter */
synchronize_sched();
diff --git a/kernel/wait.c b/kernel/wait.c
index 26fa7797f90f..7fdd9eaca2c3 100644
--- a/kernel/wait.c
+++ b/kernel/wait.c
@@ -10,10 +10,10 @@
#include <linux/wait.h>
#include <linux/hash.h>
-void __init_waitqueue_head(wait_queue_head_t *q, struct lock_class_key *key)
+void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *key)
{
spin_lock_init(&q->lock);
- lockdep_set_class(&q->lock, key);
+ lockdep_set_class_and_name(&q->lock, key, name);
INIT_LIST_HEAD(&q->task_list);
}