summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cgroup.c39
-rw-r--r--kernel/cpu.c74
-rw-r--r--kernel/cpuset.c9
-rw-r--r--kernel/crash_dump.c11
-rw-r--r--kernel/debug/gdbstub.c12
-rw-r--r--kernel/events/core.c103
-rw-r--r--kernel/exit.c2
-rw-r--r--kernel/fork.c13
-rw-r--r--kernel/freezer.c2
-rw-r--r--kernel/irq/irqdomain.c12
-rw-r--r--kernel/kexec.c41
-rw-r--r--kernel/module.c11
-rw-r--r--kernel/panic.c2
-rw-r--r--kernel/power/qos.c3
-rw-r--r--kernel/printk.c10
-rw-r--r--kernel/sched.c2
-rw-r--r--kernel/stop_machine.c22
-rw-r--r--kernel/sys.c2
-rw-r--r--kernel/sys_ni.c4
-rw-r--r--kernel/sysctl.c17
-rw-r--r--kernel/utsname_sysctl.c23
-rw-r--r--kernel/watchdog.c4
22 files changed, 242 insertions, 176 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 453100a4159d..d9d5648f3cdc 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -2027,7 +2027,7 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
goto out_free_group_list;
/* prevent changes to the threadgroup list while we take a snapshot. */
- rcu_read_lock();
+ read_lock(&tasklist_lock);
if (!thread_group_leader(leader)) {
/*
* a race with de_thread from another thread's exec() may strip
@@ -2036,7 +2036,7 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
* throw this task away and try again (from cgroup_procs_write);
* this is "double-double-toil-and-trouble-check locking".
*/
- rcu_read_unlock();
+ read_unlock(&tasklist_lock);
retval = -EAGAIN;
goto out_free_group_list;
}
@@ -2057,7 +2057,7 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
} while_each_thread(leader, tsk);
/* remember the number of threads in the array for later. */
group_size = i;
- rcu_read_unlock();
+ read_unlock(&tasklist_lock);
/*
* step 1: check that we can legitimately attach to the cgroup.
@@ -2135,14 +2135,17 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
oldcgrp = task_cgroup_from_root(tsk, root);
if (cgrp == oldcgrp)
continue;
- /* attach each task to each subsystem */
- for_each_subsys(root, ss) {
- if (ss->attach_task)
- ss->attach_task(cgrp, tsk);
- }
/* if the thread is PF_EXITING, it can just get skipped. */
retval = cgroup_task_migrate(cgrp, oldcgrp, tsk, true);
- BUG_ON(retval != 0 && retval != -ESRCH);
+ if (retval == 0) {
+ /* attach each task to each subsystem */
+ for_each_subsys(root, ss) {
+ if (ss->attach_task)
+ ss->attach_task(cgrp, tsk);
+ }
+ } else {
+ BUG_ON(retval != -ESRCH);
+ }
}
/* nothing is sensitive to fork() after this point. */
@@ -4880,9 +4883,9 @@ void free_css_id(struct cgroup_subsys *ss, struct cgroup_subsys_state *css)
rcu_assign_pointer(id->css, NULL);
rcu_assign_pointer(css->id, NULL);
- spin_lock(&ss->id_lock);
+ write_lock(&ss->id_lock);
idr_remove(&ss->idr, id->id);
- spin_unlock(&ss->id_lock);
+ write_unlock(&ss->id_lock);
kfree_rcu(id, rcu_head);
}
EXPORT_SYMBOL_GPL(free_css_id);
@@ -4908,10 +4911,10 @@ static struct css_id *get_new_cssid(struct cgroup_subsys *ss, int depth)
error = -ENOMEM;
goto err_out;
}
- spin_lock(&ss->id_lock);
+ write_lock(&ss->id_lock);
/* Don't use 0. allocates an ID of 1-65535 */
error = idr_get_new_above(&ss->idr, newid, 1, &myid);
- spin_unlock(&ss->id_lock);
+ write_unlock(&ss->id_lock);
/* Returns error when there are no free spaces for new ID.*/
if (error) {
@@ -4926,9 +4929,9 @@ static struct css_id *get_new_cssid(struct cgroup_subsys *ss, int depth)
return newid;
remove_idr:
error = -ENOSPC;
- spin_lock(&ss->id_lock);
+ write_lock(&ss->id_lock);
idr_remove(&ss->idr, myid);
- spin_unlock(&ss->id_lock);
+ write_unlock(&ss->id_lock);
err_out:
kfree(newid);
return ERR_PTR(error);
@@ -4940,7 +4943,7 @@ static int __init_or_module cgroup_init_idr(struct cgroup_subsys *ss,
{
struct css_id *newid;
- spin_lock_init(&ss->id_lock);
+ rwlock_init(&ss->id_lock);
idr_init(&ss->idr);
newid = get_new_cssid(ss, 0);
@@ -5035,9 +5038,9 @@ css_get_next(struct cgroup_subsys *ss, int id,
* scan next entry from bitmap(tree), tmpid is updated after
* idr_get_next().
*/
- spin_lock(&ss->id_lock);
+ read_lock(&ss->id_lock);
tmp = idr_get_next(&ss->idr, &tmpid);
- spin_unlock(&ss->id_lock);
+ read_unlock(&ss->id_lock);
if (!tmp)
break;
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 6a81ca906a06..563f13609470 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -15,6 +15,7 @@
#include <linux/stop_machine.h>
#include <linux/mutex.h>
#include <linux/gfp.h>
+#include <linux/suspend.h>
#ifdef CONFIG_SMP
/* Serializes the updates to cpu_online_mask, cpu_present_mask */
@@ -476,6 +477,79 @@ static int alloc_frozen_cpus(void)
return 0;
}
core_initcall(alloc_frozen_cpus);
+
+/*
+ * Prevent regular CPU hotplug from racing with the freezer, by disabling CPU
+ * hotplug when tasks are about to be frozen. Also, don't allow the freezer
+ * to continue until any currently running CPU hotplug operation gets
+ * completed.
+ * To modify the 'cpu_hotplug_disabled' flag, we need to acquire the
+ * 'cpu_add_remove_lock'. And this same lock is also taken by the regular
+ * CPU hotplug path and released only after it is complete. Thus, we
+ * (and hence the freezer) will block here until any currently running CPU
+ * hotplug operation gets completed.
+ */
+void cpu_hotplug_disable_before_freeze(void)
+{
+ cpu_maps_update_begin();
+ cpu_hotplug_disabled = 1;
+ cpu_maps_update_done();
+}
+
+
+/*
+ * When tasks have been thawed, re-enable regular CPU hotplug (which had been
+ * disabled while beginning to freeze tasks).
+ */
+void cpu_hotplug_enable_after_thaw(void)
+{
+ cpu_maps_update_begin();
+ cpu_hotplug_disabled = 0;
+ cpu_maps_update_done();
+}
+
+/*
+ * When callbacks for CPU hotplug notifications are being executed, we must
+ * ensure that the state of the system with respect to the tasks being frozen
+ * or not, as reported by the notification, remains unchanged *throughout the
+ * duration* of the execution of the callbacks.
+ * Hence we need to prevent the freezer from racing with regular CPU hotplug.
+ *
+ * This synchronization is implemented by mutually excluding regular CPU
+ * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
+ * Hibernate notifications.
+ */
+static int
+cpu_hotplug_pm_callback(struct notifier_block *nb,
+ unsigned long action, void *ptr)
+{
+ switch (action) {
+
+ case PM_SUSPEND_PREPARE:
+ case PM_HIBERNATION_PREPARE:
+ cpu_hotplug_disable_before_freeze();
+ break;
+
+ case PM_POST_SUSPEND:
+ case PM_POST_HIBERNATION:
+ cpu_hotplug_enable_after_thaw();
+ break;
+
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_OK;
+}
+
+
+int cpu_hotplug_pm_sync_init(void)
+{
+ pm_notifier(cpu_hotplug_pm_callback, 0);
+ return 0;
+}
+core_initcall(cpu_hotplug_pm_sync_init);
+
#endif /* CONFIG_PM_SLEEP_SMP */
/**
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index d970fb508e34..9fe58c46a426 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -949,6 +949,8 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
static void cpuset_change_task_nodemask(struct task_struct *tsk,
nodemask_t *newmems)
{
+ bool masks_disjoint = !nodes_intersects(*newmems, tsk->mems_allowed);
+
repeat:
/*
* Allow tasks that have access to memory reserves because they have
@@ -963,7 +965,6 @@ repeat:
nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1);
-
/*
* ensure checking ->mems_allowed_change_disable after setting all new
* allowed nodes.
@@ -980,9 +981,11 @@ repeat:
/*
* Allocation of memory is very fast, we needn't sleep when waiting
- * for the read-side.
+ * for the read-side. No wait is necessary, however, if at least one
+ * node remains unchanged.
*/
- while (ACCESS_ONCE(tsk->mems_allowed_change_disable)) {
+ while (masks_disjoint &&
+ ACCESS_ONCE(tsk->mems_allowed_change_disable)) {
task_unlock(tsk);
if (!task_curr(tsk))
yield();
diff --git a/kernel/crash_dump.c b/kernel/crash_dump.c
index 20e699265cef..c766ee54c0b1 100644
--- a/kernel/crash_dump.c
+++ b/kernel/crash_dump.c
@@ -20,8 +20,15 @@ unsigned long saved_max_pfn;
unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX;
/*
+ * stores the size of elf header of crash image
+ */
+unsigned long long elfcorehdr_size;
+
+/*
* elfcorehdr= specifies the location of elf core header stored by the crashed
* kernel. This option will be passed by kexec loader to the capture kernel.
+ *
+ * Syntax: elfcorehdr=[size[KMG]@]offset[KMG]
*/
static int __init setup_elfcorehdr(char *arg)
{
@@ -29,6 +36,10 @@ static int __init setup_elfcorehdr(char *arg)
if (!arg)
return -EINVAL;
elfcorehdr_addr = memparse(arg, &end);
+ if (*end == '@') {
+ elfcorehdr_size = elfcorehdr_addr;
+ elfcorehdr_addr = memparse(end + 1, &end);
+ }
return end > arg ? 0 : -EINVAL;
}
early_param("elfcorehdr", setup_elfcorehdr);
diff --git a/kernel/debug/gdbstub.c b/kernel/debug/gdbstub.c
index 34872482315e..c22d8c28ad84 100644
--- a/kernel/debug/gdbstub.c
+++ b/kernel/debug/gdbstub.c
@@ -217,7 +217,7 @@ void gdbstub_msg_write(const char *s, int len)
/* Pack in hex chars */
for (i = 0; i < wcount; i++)
- bufptr = pack_hex_byte(bufptr, s[i]);
+ bufptr = hex_byte_pack(bufptr, s[i]);
*bufptr = '\0';
/* Move up */
@@ -249,7 +249,7 @@ char *kgdb_mem2hex(char *mem, char *buf, int count)
if (err)
return NULL;
while (count > 0) {
- buf = pack_hex_byte(buf, *tmp);
+ buf = hex_byte_pack(buf, *tmp);
tmp++;
count--;
}
@@ -411,14 +411,14 @@ static char *pack_threadid(char *pkt, unsigned char *id)
limit = id + (BUF_THREAD_ID_SIZE / 2);
while (id < limit) {
if (!lzero || *id != 0) {
- pkt = pack_hex_byte(pkt, *id);
+ pkt = hex_byte_pack(pkt, *id);
lzero = 0;
}
id++;
}
if (lzero)
- pkt = pack_hex_byte(pkt, 0);
+ pkt = hex_byte_pack(pkt, 0);
return pkt;
}
@@ -486,7 +486,7 @@ static void gdb_cmd_status(struct kgdb_state *ks)
dbg_remove_all_break();
remcom_out_buffer[0] = 'S';
- pack_hex_byte(&remcom_out_buffer[1], ks->signo);
+ hex_byte_pack(&remcom_out_buffer[1], ks->signo);
}
static void gdb_get_regs_helper(struct kgdb_state *ks)
@@ -954,7 +954,7 @@ int gdb_serial_stub(struct kgdb_state *ks)
/* Reply to host that an exception has occurred */
ptr = remcom_out_buffer;
*ptr++ = 'T';
- ptr = pack_hex_byte(ptr, ks->signo);
+ ptr = hex_byte_pack(ptr, ks->signo);
ptr += strlen(strcpy(ptr, "thread:"));
int_to_threadref(thref, shadow_pid(current->pid));
ptr = pack_threadid(ptr, thref);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 92b8811f2234..0e8457da6f95 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -30,7 +30,6 @@
#include <linux/hardirq.h>
#include <linux/rculist.h>
#include <linux/uaccess.h>
-#include <linux/suspend.h>
#include <linux/syscalls.h>
#include <linux/anon_inodes.h>
#include <linux/kernel_stat.h>
@@ -3545,7 +3544,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
struct ring_buffer *rb = event->rb;
atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
- vma->vm_mm->locked_vm -= event->mmap_locked;
+ vma->vm_mm->pinned_vm -= event->mmap_locked;
rcu_assign_pointer(event->rb, NULL);
mutex_unlock(&event->mmap_mutex);
@@ -3626,7 +3625,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
lock_limit = rlimit(RLIMIT_MEMLOCK);
lock_limit >>= PAGE_SHIFT;
- locked = vma->vm_mm->locked_vm + extra;
+ locked = vma->vm_mm->pinned_vm + extra;
if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
!capable(CAP_IPC_LOCK)) {
@@ -3652,7 +3651,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
atomic_long_add(user_extra, &user->locked_vm);
event->mmap_locked = extra;
event->mmap_user = get_current_user();
- vma->vm_mm->locked_vm += event->mmap_locked;
+ vma->vm_mm->pinned_vm += event->mmap_locked;
unlock:
if (!ret)
@@ -6854,7 +6853,7 @@ static void __cpuinit perf_event_init_cpu(int cpu)
struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
mutex_lock(&swhash->hlist_mutex);
- if (swhash->hlist_refcount > 0 && !swhash->swevent_hlist) {
+ if (swhash->hlist_refcount > 0) {
struct swevent_hlist *hlist;
hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
@@ -6943,14 +6942,7 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
{
unsigned int cpu = (long)hcpu;
- /*
- * Ignore suspend/resume action, the perf_pm_notifier will
- * take care of that.
- */
- if (action & CPU_TASKS_FROZEN)
- return NOTIFY_OK;
-
- switch (action) {
+ switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
case CPU_DOWN_FAILED:
@@ -6969,90 +6961,6 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
return NOTIFY_OK;
}
-static void perf_pm_resume_cpu(void *unused)
-{
- struct perf_cpu_context *cpuctx;
- struct perf_event_context *ctx;
- struct pmu *pmu;
- int idx;
-
- idx = srcu_read_lock(&pmus_srcu);
- list_for_each_entry_rcu(pmu, &pmus, entry) {
- cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
- ctx = cpuctx->task_ctx;
-
- perf_ctx_lock(cpuctx, ctx);
- perf_pmu_disable(cpuctx->ctx.pmu);
-
- cpu_ctx_sched_out(cpuctx, EVENT_ALL);
- if (ctx)
- ctx_sched_out(ctx, cpuctx, EVENT_ALL);
-
- perf_pmu_enable(cpuctx->ctx.pmu);
- perf_ctx_unlock(cpuctx, ctx);
- }
- srcu_read_unlock(&pmus_srcu, idx);
-}
-
-static void perf_pm_suspend_cpu(void *unused)
-{
- struct perf_cpu_context *cpuctx;
- struct perf_event_context *ctx;
- struct pmu *pmu;
- int idx;
-
- idx = srcu_read_lock(&pmus_srcu);
- list_for_each_entry_rcu(pmu, &pmus, entry) {
- cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
- ctx = cpuctx->task_ctx;
-
- perf_ctx_lock(cpuctx, ctx);
- perf_pmu_disable(cpuctx->ctx.pmu);
-
- perf_event_sched_in(cpuctx, ctx, current);
-
- perf_pmu_enable(cpuctx->ctx.pmu);
- perf_ctx_unlock(cpuctx, ctx);
- }
- srcu_read_unlock(&pmus_srcu, idx);
-}
-
-static int perf_resume(void)
-{
- get_online_cpus();
- smp_call_function(perf_pm_resume_cpu, NULL, 1);
- put_online_cpus();
-
- return NOTIFY_OK;
-}
-
-static int perf_suspend(void)
-{
- get_online_cpus();
- smp_call_function(perf_pm_suspend_cpu, NULL, 1);
- put_online_cpus();
-
- return NOTIFY_OK;
-}
-
-static int perf_pm(struct notifier_block *self, unsigned long action, void *ptr)
-{
- switch (action) {
- case PM_POST_HIBERNATION:
- case PM_POST_SUSPEND:
- return perf_resume();
- case PM_HIBERNATION_PREPARE:
- case PM_SUSPEND_PREPARE:
- return perf_suspend();
- default:
- return NOTIFY_DONE;
- }
-}
-
-static struct notifier_block perf_pm_notifier = {
- .notifier_call = perf_pm,
-};
-
void __init perf_event_init(void)
{
int ret;
@@ -7067,7 +6975,6 @@ void __init perf_event_init(void)
perf_tp_register();
perf_cpu_notifier(perf_cpu_notify);
register_reboot_notifier(&perf_reboot_notifier);
- register_pm_notifier(&perf_pm_notifier);
ret = init_hw_breakpoint();
WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
diff --git a/kernel/exit.c b/kernel/exit.c
index 2913b3509d42..d0b7d988f873 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -681,8 +681,6 @@ static void exit_mm(struct task_struct * tsk)
enter_lazy_tlb(mm, current);
/* We don't want this task to be frozen prematurely */
clear_freeze_flag(tsk);
- if (tsk->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
- atomic_dec(&mm->oom_disable_count);
task_unlock(tsk);
mm_update_next_owner(mm);
mmput(mm);
diff --git a/kernel/fork.c b/kernel/fork.c
index 8e6b6f4fb272..ba0d17261329 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -501,7 +501,6 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p)
mm->cached_hole_size = ~0UL;
mm_init_aio(mm);
mm_init_owner(mm, p);
- atomic_set(&mm->oom_disable_count, 0);
if (likely(!mm_alloc_pgd(mm))) {
mm->def_flags = 0;
@@ -816,8 +815,6 @@ good_mm:
/* Initializing for Swap token stuff */
mm->token_priority = 0;
mm->last_interval = 0;
- if (tsk->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
- atomic_inc(&mm->oom_disable_count);
tsk->mm = mm;
tsk->active_mm = mm;
@@ -1302,6 +1299,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->pdeath_signal = 0;
p->exit_state = 0;
+ p->nr_dirtied = 0;
+ p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10);
+
/*
* Ok, make it visible to the rest of the system.
* We dont wake it up yet.
@@ -1391,13 +1391,8 @@ bad_fork_cleanup_io:
bad_fork_cleanup_namespaces:
exit_task_namespaces(p);
bad_fork_cleanup_mm:
- if (p->mm) {
- task_lock(p);
- if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
- atomic_dec(&p->mm->oom_disable_count);
- task_unlock(p);
+ if (p->mm)
mmput(p->mm);
- }
bad_fork_cleanup_signal:
if (!(clone_flags & CLONE_THREAD))
free_signal_struct(p->signal);
diff --git a/kernel/freezer.c b/kernel/freezer.c
index f24aa0005530..7be56c534397 100644
--- a/kernel/freezer.c
+++ b/kernel/freezer.c
@@ -67,7 +67,7 @@ static void fake_signal_wake_up(struct task_struct *p)
unsigned long flags;
spin_lock_irqsave(&p->sighand->siglock, flags);
- signal_wake_up(p, 1);
+ signal_wake_up(p, 0);
spin_unlock_irqrestore(&p->sighand->siglock, flags);
}
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index b57a3776de44..200ce832c585 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -20,15 +20,15 @@ static DEFINE_MUTEX(irq_domain_mutex);
void irq_domain_add(struct irq_domain *domain)
{
struct irq_data *d;
- int hwirq;
+ int hwirq, irq;
/*
* This assumes that the irq_domain owner has already allocated
* the irq_descs. This block will be removed when support for dynamic
* allocation of irq_descs is added to irq_domain.
*/
- for (hwirq = 0; hwirq < domain->nr_irq; hwirq++) {
- d = irq_get_irq_data(irq_domain_to_irq(domain, hwirq));
+ irq_domain_for_each_irq(domain, hwirq, irq) {
+ d = irq_get_irq_data(irq);
if (!d) {
WARN(1, "error: assigning domain to non existant irq_desc");
return;
@@ -54,15 +54,15 @@ void irq_domain_add(struct irq_domain *domain)
void irq_domain_del(struct irq_domain *domain)
{
struct irq_data *d;
- int hwirq;
+ int hwirq, irq;
mutex_lock(&irq_domain_mutex);
list_del(&domain->list);
mutex_unlock(&irq_domain_mutex);
/* Clear the irq_domain assignments */
- for (hwirq = 0; hwirq < domain->nr_irq; hwirq++) {
- d = irq_get_irq_data(irq_domain_to_irq(domain, hwirq));
+ irq_domain_for_each_irq(domain, hwirq, irq) {
+ d = irq_get_irq_data(irq);
d->domain = NULL;
}
}
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 296fbc84d659..dc7bc0829286 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -498,7 +498,7 @@ static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
while (hole_end <= crashk_res.end) {
unsigned long i;
- if (hole_end > KEXEC_CONTROL_MEMORY_LIMIT)
+ if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT)
break;
if (hole_end > crashk_res.end)
break;
@@ -999,6 +999,7 @@ SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
kimage_free(xchg(&kexec_crash_image, NULL));
result = kimage_crash_alloc(&image, entry,
nr_segments, segments);
+ crash_map_reserved_pages();
}
if (result)
goto out;
@@ -1015,6 +1016,8 @@ SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
goto out;
}
kimage_terminate(image);
+ if (flags & KEXEC_ON_CRASH)
+ crash_unmap_reserved_pages();
}
/* Install the new kernel, and Uninstall the old */
image = xchg(dest_image, image);
@@ -1026,6 +1029,18 @@ out:
return result;
}
+/*
+ * Add and remove page tables for crashkernel memory
+ *
+ * Provide an empty default implementation here -- architecture
+ * code may override this
+ */
+void __weak crash_map_reserved_pages(void)
+{}
+
+void __weak crash_unmap_reserved_pages(void)
+{}
+
#ifdef CONFIG_COMPAT
asmlinkage long compat_sys_kexec_load(unsigned long entry,
unsigned long nr_segments,
@@ -1134,14 +1149,16 @@ int crash_shrink_memory(unsigned long new_size)
goto unlock;
}
- start = roundup(start, PAGE_SIZE);
- end = roundup(start + new_size, PAGE_SIZE);
+ start = roundup(start, KEXEC_CRASH_MEM_ALIGN);
+ end = roundup(start + new_size, KEXEC_CRASH_MEM_ALIGN);
+ crash_map_reserved_pages();
crash_free_reserved_phys_range(end, crashk_res.end);
if ((start == end) && (crashk_res.parent != NULL))
release_resource(&crashk_res);
crashk_res.end = end - 1;
+ crash_unmap_reserved_pages();
unlock:
mutex_unlock(&kexec_mutex);
@@ -1380,24 +1397,23 @@ int __init parse_crashkernel(char *cmdline,
}
-
-void crash_save_vmcoreinfo(void)
+static void update_vmcoreinfo_note(void)
{
- u32 *buf;
+ u32 *buf = vmcoreinfo_note;
if (!vmcoreinfo_size)
return;
-
- vmcoreinfo_append_str("CRASHTIME=%ld", get_seconds());
-
- buf = (u32 *)vmcoreinfo_note;
-
buf = append_elf_note(buf, VMCOREINFO_NOTE_NAME, 0, vmcoreinfo_data,
vmcoreinfo_size);
-
final_note(buf);
}
+void crash_save_vmcoreinfo(void)
+{
+ vmcoreinfo_append_str("CRASHTIME=%ld", get_seconds());
+ update_vmcoreinfo_note();
+}
+
void vmcoreinfo_append_str(const char *fmt, ...)
{
va_list args;
@@ -1483,6 +1499,7 @@ static int __init crash_save_vmcoreinfo_init(void)
VMCOREINFO_NUMBER(PG_swapcache);
arch_crash_save_vmcoreinfo();
+ update_vmcoreinfo_note();
return 0;
}
diff --git a/kernel/module.c b/kernel/module.c
index 84205ae1607a..178333c48d1e 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2487,6 +2487,9 @@ static int check_modinfo(struct module *mod, struct load_info *info)
return -ENOEXEC;
}
+ if (!get_modinfo(info, "intree"))
+ add_taint_module(mod, TAINT_OOT_MODULE);
+
if (get_modinfo(info, "staging")) {
add_taint_module(mod, TAINT_CRAP);
printk(KERN_WARNING "%s: module is from the staging directory,"
@@ -2878,8 +2881,7 @@ static struct module *load_module(void __user *umod,
}
/* This has to be done once we're sure module name is unique. */
- if (!mod->taints || mod->taints == (1U<<TAINT_CRAP))
- dynamic_debug_setup(info.debug, info.num_debug);
+ dynamic_debug_setup(info.debug, info.num_debug);
/* Find duplicate symbols */
err = verify_export_symbols(mod);
@@ -2915,8 +2917,7 @@ static struct module *load_module(void __user *umod,
module_bug_cleanup(mod);
ddebug:
- if (!mod->taints || mod->taints == (1U<<TAINT_CRAP))
- dynamic_debug_remove(info.debug);
+ dynamic_debug_remove(info.debug);
unlock:
mutex_unlock(&module_mutex);
synchronize_sched();
@@ -3257,6 +3258,8 @@ static char *module_flags(struct module *mod, char *buf)
buf[bx++] = '(';
if (mod->taints & (1 << TAINT_PROPRIETARY_MODULE))
buf[bx++] = 'P';
+ else if (mod->taints & (1 << TAINT_OOT_MODULE))
+ buf[bx++] = 'O';
if (mod->taints & (1 << TAINT_FORCED_MODULE))
buf[bx++] = 'F';
if (mod->taints & (1 << TAINT_CRAP))
diff --git a/kernel/panic.c b/kernel/panic.c
index d7bb6974efb5..b26593604214 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -177,6 +177,7 @@ static const struct tnt tnts[] = {
{ TAINT_WARN, 'W', ' ' },
{ TAINT_CRAP, 'C', ' ' },
{ TAINT_FIRMWARE_WORKAROUND, 'I', ' ' },
+ { TAINT_OOT_MODULE, 'O', ' ' },
};
/**
@@ -194,6 +195,7 @@ static const struct tnt tnts[] = {
* 'W' - Taint on warning.
* 'C' - modules from drivers/staging are loaded.
* 'I' - Working around severe firmware bug.
+ * 'O' - Out-of-tree module has been loaded.
*
* The string is overwritten by the next call to print_tainted().
*/
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index 2c0a65e27626..56db75147186 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -387,8 +387,7 @@ static int pm_qos_power_open(struct inode *inode, struct file *filp)
pm_qos_add_request(req, pm_qos_class, PM_QOS_DEFAULT_VALUE);
filp->private_data = req;
- if (filp->private_data)
- return 0;
+ return 0;
}
return -EPERM;
}
diff --git a/kernel/printk.c b/kernel/printk.c
index b7da18391c38..1455a0d4eedd 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -532,6 +532,9 @@ static int __init ignore_loglevel_setup(char *str)
}
early_param("ignore_loglevel", ignore_loglevel_setup);
+module_param_named(ignore_loglevel, ignore_loglevel, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ignore_loglevel, "ignore loglevel setting, to"
+ "print all kernel messages to the console.");
/*
* Write out chars from start to end - 1 inclusive
@@ -592,9 +595,6 @@ static size_t log_prefix(const char *p, unsigned int *level, char *special)
/* multi digit including the level and facility number */
char *endp = NULL;
- if (p[1] < '0' && p[1] > '9')
- return 0;
-
lev = (simple_strtoul(&p[1], &endp, 10) & 7);
if (endp == NULL || endp[0] != '>')
return 0;
@@ -1108,6 +1108,10 @@ static int __init console_suspend_disable(char *str)
return 1;
}
__setup("no_console_suspend", console_suspend_disable);
+module_param_named(console_suspend, console_suspend_enabled,
+ bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(console_suspend, "suspend console during suspend"
+ " and hibernate operations");
/**
* suspend_console - suspend the console subsystem
diff --git a/kernel/sched.c b/kernel/sched.c
index d87c6e5d4e8c..0e9344a71be3 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -7087,8 +7087,6 @@ static int __init isolated_cpu_setup(char *str)
__setup("isolcpus=", isolated_cpu_setup);
-#define SD_NODES_PER_DOMAIN 16
-
#ifdef CONFIG_NUMA
/**
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index e78db365fa83..2f194e965715 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -41,6 +41,7 @@ struct cpu_stopper {
};
static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper);
+static bool stop_machine_initialized = false;
static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo)
{
@@ -386,6 +387,8 @@ static int __init cpu_stop_init(void)
cpu_stop_cpu_callback(&cpu_stop_cpu_notifier, CPU_ONLINE, bcpu);
register_cpu_notifier(&cpu_stop_cpu_notifier);
+ stop_machine_initialized = true;
+
return 0;
}
early_initcall(cpu_stop_init);
@@ -485,6 +488,25 @@ int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
.num_threads = num_online_cpus(),
.active_cpus = cpus };
+ if (!stop_machine_initialized) {
+ /*
+ * Handle the case where stop_machine() is called
+ * early in boot before stop_machine() has been
+ * initialized.
+ */
+ unsigned long flags;
+ int ret;
+
+ WARN_ON_ONCE(smdata.num_threads != 1);
+
+ local_irq_save(flags);
+ hard_irq_disable();
+ ret = (*fn)(data);
+ local_irq_restore(flags);
+
+ return ret;
+ }
+
/* Set the initial state and stop all online cpus. */
set_state(&smdata, STOPMACHINE_PREPARE);
return stop_cpus(cpu_online_mask, stop_machine_cpu_stop, &smdata);
diff --git a/kernel/sys.c b/kernel/sys.c
index d2cb1cda823a..481611fbd079 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1287,6 +1287,7 @@ SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
memset(u->nodename + len, 0, sizeof(u->nodename) - len);
errno = 0;
}
+ uts_proc_notify(UTS_PROC_HOSTNAME);
up_write(&uts_sem);
return errno;
}
@@ -1337,6 +1338,7 @@ SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len)
memset(u->domainname + len, 0, sizeof(u->domainname) - len);
errno = 0;
}
+ uts_proc_notify(UTS_PROC_DOMAINNAME);
up_write(&uts_sem);
return errno;
}
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index a9a5de07c4f1..47bfa16430d7 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -145,6 +145,10 @@ cond_syscall(sys_io_submit);
cond_syscall(sys_io_cancel);
cond_syscall(sys_io_getevents);
cond_syscall(sys_syslog);
+cond_syscall(sys_process_vm_readv);
+cond_syscall(sys_process_vm_writev);
+cond_syscall(compat_sys_process_vm_readv);
+cond_syscall(compat_sys_process_vm_writev);
/* arch-specific weak syscall entries */
cond_syscall(sys_pciconfig_read);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 2d2ecdcc8cdb..ae2719643854 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -57,6 +57,7 @@
#include <linux/pipe_fs_i.h>
#include <linux/oom.h>
#include <linux/kmod.h>
+#include <linux/capability.h>
#include <asm/uaccess.h>
#include <asm/processor.h>
@@ -134,6 +135,7 @@ static int minolduid;
static int min_percpu_pagelist_fract = 8;
static int ngroups_max = NGROUPS_MAX;
+static const int cap_last_cap = CAP_LAST_CAP;
#ifdef CONFIG_INOTIFY_USER
#include <linux/inotify.h>
@@ -151,14 +153,6 @@ extern int pwrsw_enabled;
extern int unaligned_enabled;
#endif
-#ifdef CONFIG_S390
-#ifdef CONFIG_MATHEMU
-extern int sysctl_ieee_emulation_warnings;
-#endif
-extern int sysctl_userprocess_debug;
-extern int spin_retry;
-#endif
-
#ifdef CONFIG_IA64
extern int no_unaligned_warning;
extern int unaligned_dump_stack;
@@ -740,6 +734,13 @@ static struct ctl_table kern_table[] = {
.mode = 0444,
.proc_handler = proc_dointvec,
},
+ {
+ .procname = "cap_last_cap",
+ .data = (void *)&cap_last_cap,
+ .maxlen = sizeof(int),
+ .mode = 0444,
+ .proc_handler = proc_dointvec,
+ },
#if defined(CONFIG_LOCKUP_DETECTOR)
{
.procname = "watchdog",
diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
index 5a709452ec19..63da38c2d820 100644
--- a/kernel/utsname_sysctl.c
+++ b/kernel/utsname_sysctl.c
@@ -13,6 +13,7 @@
#include <linux/uts.h>
#include <linux/utsname.h>
#include <linux/sysctl.h>
+#include <linux/wait.h>
static void *get_uts(ctl_table *table, int write)
{
@@ -51,12 +52,19 @@ static int proc_do_uts_string(ctl_table *table, int write,
uts_table.data = get_uts(table, write);
r = proc_dostring(&uts_table,write,buffer,lenp, ppos);
put_uts(table, write, uts_table.data);
+
+ if (write)
+ proc_sys_poll_notify(table->poll);
+
return r;
}
#else
#define proc_do_uts_string NULL
#endif
+static DEFINE_CTL_TABLE_POLL(hostname_poll);
+static DEFINE_CTL_TABLE_POLL(domainname_poll);
+
static struct ctl_table uts_kern_table[] = {
{
.procname = "ostype",
@@ -85,6 +93,7 @@ static struct ctl_table uts_kern_table[] = {
.maxlen = sizeof(init_uts_ns.name.nodename),
.mode = 0644,
.proc_handler = proc_do_uts_string,
+ .poll = &hostname_poll,
},
{
.procname = "domainname",
@@ -92,6 +101,7 @@ static struct ctl_table uts_kern_table[] = {
.maxlen = sizeof(init_uts_ns.name.domainname),
.mode = 0644,
.proc_handler = proc_do_uts_string,
+ .poll = &domainname_poll,
},
{}
};
@@ -105,6 +115,19 @@ static struct ctl_table uts_root_table[] = {
{}
};
+#ifdef CONFIG_PROC_SYSCTL
+/*
+ * Notify userspace about a change in a certain entry of uts_kern_table,
+ * identified by the parameter proc.
+ */
+void uts_proc_notify(enum uts_proc proc)
+{
+ struct ctl_table *table = &uts_kern_table[proc];
+
+ proc_sys_poll_notify(table->poll);
+}
+#endif
+
static int __init utsname_sysctl_init(void)
{
register_sysctl_table(uts_root_table);
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index d680381b0e9c..1d7bca7f4f52 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -481,6 +481,8 @@ static void watchdog_disable(int cpu)
}
}
+/* sysctl functions */
+#ifdef CONFIG_SYSCTL
static void watchdog_enable_all_cpus(void)
{
int cpu;
@@ -510,8 +512,6 @@ static void watchdog_disable_all_cpus(void)
}
-/* sysctl functions */
-#ifdef CONFIG_SYSCTL
/*
* proc handler for /proc/sys/kernel/nmi_watchdog,watchdog_thresh
*/