summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/auditfilter.c13
-rw-r--r--kernel/auditsc.c135
-rw-r--r--kernel/cpuset.c2
-rw-r--r--kernel/exit.c3
-rw-r--r--kernel/fork.c20
-rw-r--r--kernel/futex.c21
-rw-r--r--kernel/hrtimer.c15
-rw-r--r--kernel/irq/proc.c10
-rw-r--r--kernel/kmod.c303
-rw-r--r--kernel/kprobes.c9
-rw-r--r--kernel/ksysfs.c28
-rw-r--r--kernel/lockdep.c1497
-rw-r--r--kernel/lockdep_proc.c301
-rw-r--r--kernel/mutex.c8
-rw-r--r--kernel/nsproxy.c2
-rw-r--r--kernel/posix-timers.c2
-rw-r--r--kernel/power/Kconfig29
-rw-r--r--kernel/power/disk.c251
-rw-r--r--kernel/power/main.c108
-rw-r--r--kernel/power/power.h29
-rw-r--r--kernel/power/process.c90
-rw-r--r--kernel/power/swap.c20
-rw-r--r--kernel/power/user.c154
-rw-r--r--kernel/ptrace.c2
-rw-r--r--kernel/relay.c13
-rw-r--r--kernel/rwsem.c8
-rw-r--r--kernel/sched.c33
-rw-r--r--kernel/signal.c10
-rw-r--r--kernel/spinlock.c32
-rw-r--r--kernel/sys.c91
-rw-r--r--kernel/sysctl.c66
-rw-r--r--kernel/time.c101
-rw-r--r--kernel/time/ntp.c69
-rw-r--r--kernel/time/tick-broadcast.c35
-rw-r--r--kernel/time/tick-common.c16
-rw-r--r--kernel/time/tick-oneshot.c15
-rw-r--r--kernel/time/tick-sched.c7
-rw-r--r--kernel/time/timekeeping.c8
-rw-r--r--kernel/timer.c212
-rw-r--r--kernel/user.c2
40 files changed, 2353 insertions, 1417 deletions
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index 1bf093dcffe0..359645cff5b2 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -304,7 +304,7 @@ int __init audit_register_class(int class, unsigned *list)
int audit_match_class(int class, unsigned syscall)
{
- if (unlikely(syscall >= AUDIT_BITMASK_SIZE * sizeof(__u32)))
+ if (unlikely(syscall >= AUDIT_BITMASK_SIZE * 32))
return 0;
if (unlikely(class >= AUDIT_SYSCALL_CLASSES || !classes[class]))
return 0;
@@ -456,6 +456,13 @@ static struct audit_entry *audit_rule_to_entry(struct audit_rule *rule)
case AUDIT_DEVMINOR:
case AUDIT_EXIT:
case AUDIT_SUCCESS:
+ /* bit ops are only useful on syscall args */
+ if (f->op == AUDIT_BIT_MASK ||
+ f->op == AUDIT_BIT_TEST) {
+ err = -EINVAL;
+ goto exit_free;
+ }
+ break;
case AUDIT_ARG0:
case AUDIT_ARG1:
case AUDIT_ARG2:
@@ -1566,6 +1573,10 @@ int audit_comparator(const u32 left, const u32 op, const u32 right)
return (left > right);
case AUDIT_GREATER_THAN_OR_EQUAL:
return (left >= right);
+ case AUDIT_BIT_MASK:
+ return (left & right);
+ case AUDIT_BIT_TEST:
+ return ((left & right) == right);
}
BUG();
return 0;
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index b7640a5f382a..bde1124d5908 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -153,7 +153,7 @@ struct audit_aux_data_execve {
struct audit_aux_data d;
int argc;
int envc;
- char mem[0];
+ struct mm_struct *mm;
};
struct audit_aux_data_socketcall {
@@ -173,12 +173,6 @@ struct audit_aux_data_fd_pair {
int fd[2];
};
-struct audit_aux_data_path {
- struct audit_aux_data d;
- struct dentry *dentry;
- struct vfsmount *mnt;
-};
-
struct audit_aux_data_pids {
struct audit_aux_data d;
pid_t target_pid[AUDIT_AUX_PIDS];
@@ -654,12 +648,6 @@ static inline void audit_free_aux(struct audit_context *context)
struct audit_aux_data *aux;
while ((aux = context->aux)) {
- if (aux->type == AUDIT_AVC_PATH) {
- struct audit_aux_data_path *axi = (void *)aux;
- dput(axi->dentry);
- mntput(axi->mnt);
- }
-
context->aux = aux->next;
kfree(aux);
}
@@ -831,6 +819,55 @@ static int audit_log_pid_context(struct audit_context *context, pid_t pid,
return rc;
}
+static void audit_log_execve_info(struct audit_buffer *ab,
+ struct audit_aux_data_execve *axi)
+{
+ int i;
+ long len, ret;
+ const char __user *p = (const char __user *)axi->mm->arg_start;
+ char *buf;
+
+ if (axi->mm != current->mm)
+ return; /* execve failed, no additional info */
+
+ for (i = 0; i < axi->argc; i++, p += len) {
+ len = strnlen_user(p, MAX_ARG_STRLEN);
+ /*
+ * We just created this mm, if we can't find the strings
+ * we just copied into it something is _very_ wrong. Similar
+ * for strings that are too long, we should not have created
+ * any.
+ */
+ if (!len || len > MAX_ARG_STRLEN) {
+ WARN_ON(1);
+ send_sig(SIGKILL, current, 0);
+ }
+
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf) {
+ audit_panic("out of memory for argv string\n");
+ break;
+ }
+
+ ret = copy_from_user(buf, p, len);
+ /*
+ * There is no reason for this copy to be short. We just
+ * copied them here, and the mm hasn't been exposed to user-
+ * space yet.
+ */
+ if (!ret) {
+ WARN_ON(1);
+ send_sig(SIGKILL, current, 0);
+ }
+
+ audit_log_format(ab, "a%d=", i);
+ audit_log_untrustedstring(ab, buf);
+ audit_log_format(ab, "\n");
+
+ kfree(buf);
+ }
+}
+
static void audit_log_exit(struct audit_context *context, struct task_struct *tsk)
{
int i, call_panic = 0;
@@ -946,7 +983,7 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts
case AUDIT_IPC: {
struct audit_aux_data_ipcctl *axi = (void *)aux;
audit_log_format(ab,
- "ouid=%u ogid=%u mode=%x",
+ "ouid=%u ogid=%u mode=%#o",
axi->uid, axi->gid, axi->mode);
if (axi->osid != 0) {
char *ctx = NULL;
@@ -965,19 +1002,13 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts
case AUDIT_IPC_SET_PERM: {
struct audit_aux_data_ipcctl *axi = (void *)aux;
audit_log_format(ab,
- "qbytes=%lx ouid=%u ogid=%u mode=%x",
+ "qbytes=%lx ouid=%u ogid=%u mode=%#o",
axi->qbytes, axi->uid, axi->gid, axi->mode);
break; }
case AUDIT_EXECVE: {
struct audit_aux_data_execve *axi = (void *)aux;
- int i;
- const char *p;
- for (i = 0, p = axi->mem; i < axi->argc; i++) {
- audit_log_format(ab, "a%d=", i);
- p = audit_log_untrustedstring(ab, p);
- audit_log_format(ab, "\n");
- }
+ audit_log_execve_info(ab, axi);
break; }
case AUDIT_SOCKETCALL: {
@@ -995,11 +1026,6 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts
audit_log_hex(ab, axs->a, axs->len);
break; }
- case AUDIT_AVC_PATH: {
- struct audit_aux_data_path *axi = (void *)aux;
- audit_log_d_path(ab, "path=", axi->dentry, axi->mnt);
- break; }
-
case AUDIT_FD_PAIR: {
struct audit_aux_data_fd_pair *axs = (void *)aux;
audit_log_format(ab, "fd0=%d fd1=%d", axs->fd[0], axs->fd[1]);
@@ -1821,32 +1847,31 @@ int __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, mode_t mode
return 0;
}
+int audit_argv_kb = 32;
+
int audit_bprm(struct linux_binprm *bprm)
{
struct audit_aux_data_execve *ax;
struct audit_context *context = current->audit_context;
- unsigned long p, next;
- void *to;
if (likely(!audit_enabled || !context || context->dummy))
return 0;
- ax = kmalloc(sizeof(*ax) + PAGE_SIZE * MAX_ARG_PAGES - bprm->p,
- GFP_KERNEL);
+ /*
+ * Even though the stack code doesn't limit the arg+env size any more,
+ * the audit code requires that _all_ arguments be logged in a single
+ * netlink skb. Hence cap it :-(
+ */
+ if (bprm->argv_len > (audit_argv_kb << 10))
+ return -E2BIG;
+
+ ax = kmalloc(sizeof(*ax), GFP_KERNEL);
if (!ax)
return -ENOMEM;
ax->argc = bprm->argc;
ax->envc = bprm->envc;
- for (p = bprm->p, to = ax->mem; p < MAX_ARG_PAGES*PAGE_SIZE; p = next) {
- struct page *page = bprm->page[p / PAGE_SIZE];
- void *kaddr = kmap(page);
- next = (p + PAGE_SIZE) & ~(PAGE_SIZE - 1);
- memcpy(to, kaddr + (p & (PAGE_SIZE - 1)), next - p);
- to += next - p;
- kunmap(page);
- }
-
+ ax->mm = bprm->mm;
ax->d.type = AUDIT_EXECVE;
ax->d.next = context->aux;
context->aux = (void *)ax;
@@ -1949,36 +1974,6 @@ void __audit_ptrace(struct task_struct *t)
}
/**
- * audit_avc_path - record the granting or denial of permissions
- * @dentry: dentry to record
- * @mnt: mnt to record
- *
- * Returns 0 for success or NULL context or < 0 on error.
- *
- * Called from security/selinux/avc.c::avc_audit()
- */
-int audit_avc_path(struct dentry *dentry, struct vfsmount *mnt)
-{
- struct audit_aux_data_path *ax;
- struct audit_context *context = current->audit_context;
-
- if (likely(!context))
- return 0;
-
- ax = kmalloc(sizeof(*ax), GFP_ATOMIC);
- if (!ax)
- return -ENOMEM;
-
- ax->dentry = dget(dentry);
- ax->mnt = mntget(mnt);
-
- ax->d.type = AUDIT_AVC_PATH;
- ax->d.next = context->aux;
- context->aux = (void *)ax;
- return 0;
-}
-
-/**
* audit_signal_info - record signal info for shutting down audit subsystem
* @sig: signal value
* @t: task being signaled
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index b4796d850140..57e6448b171e 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -516,7 +516,7 @@ static void cpuset_release_agent(const char *pathbuf)
envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
envp[i] = NULL;
- call_usermodehelper(argv[0], argv, envp, 0);
+ call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
kfree(pathbuf);
}
diff --git a/kernel/exit.c b/kernel/exit.c
index e8af8d0c2483..464c2b172f07 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -45,6 +45,7 @@
#include <linux/resource.h>
#include <linux/blkdev.h>
#include <linux/task_io_accounting_ops.h>
+#include <linux/freezer.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
@@ -594,6 +595,8 @@ static void exit_mm(struct task_struct * tsk)
tsk->mm = NULL;
up_read(&mm->mmap_sem);
enter_lazy_tlb(mm, current);
+ /* We don't want this task to be frozen prematurely */
+ clear_freeze_flag(tsk);
task_unlock(tsk);
mmput(mm);
}
diff --git a/kernel/fork.c b/kernel/fork.c
index ba39bdb2a7b8..7332e236d367 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -137,7 +137,7 @@ void __init fork_init(unsigned long mempages)
/* create a slab on which task_structs can be allocated */
task_struct_cachep =
kmem_cache_create("task_struct", sizeof(struct task_struct),
- ARCH_MIN_TASKALIGN, SLAB_PANIC, NULL, NULL);
+ ARCH_MIN_TASKALIGN, SLAB_PANIC, NULL);
#endif
/*
@@ -334,6 +334,8 @@ static struct mm_struct * mm_init(struct mm_struct * mm)
atomic_set(&mm->mm_count, 1);
init_rwsem(&mm->mmap_sem);
INIT_LIST_HEAD(&mm->mmlist);
+ mm->flags = (current->mm) ? current->mm->flags
+ : MMF_DUMP_FILTER_DEFAULT;
mm->core_waiters = 0;
mm->nr_ptes = 0;
set_mm_counter(mm, file_rss, 0);
@@ -1444,22 +1446,22 @@ void __init proc_caches_init(void)
sighand_cachep = kmem_cache_create("sighand_cache",
sizeof(struct sighand_struct), 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU,
- sighand_ctor, NULL);
+ sighand_ctor);
signal_cachep = kmem_cache_create("signal_cache",
sizeof(struct signal_struct), 0,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
- files_cachep = kmem_cache_create("files_cache",
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+ files_cachep = kmem_cache_create("files_cache",
sizeof(struct files_struct), 0,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
- fs_cachep = kmem_cache_create("fs_cache",
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+ fs_cachep = kmem_cache_create("fs_cache",
sizeof(struct fs_struct), 0,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
vm_area_cachep = kmem_cache_create("vm_area_struct",
sizeof(struct vm_area_struct), 0,
- SLAB_PANIC, NULL, NULL);
+ SLAB_PANIC, NULL);
mm_cachep = kmem_cache_create("mm_struct",
sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
}
/*
diff --git a/kernel/futex.c b/kernel/futex.c
index 5c3f45d07c53..a12425051ee9 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -346,15 +346,20 @@ static int futex_handle_fault(unsigned long address,
vma = find_vma(mm, address);
if (vma && address >= vma->vm_start &&
(vma->vm_flags & VM_WRITE)) {
- switch (handle_mm_fault(mm, vma, address, 1)) {
- case VM_FAULT_MINOR:
- ret = 0;
- current->min_flt++;
- break;
- case VM_FAULT_MAJOR:
+ int fault;
+ fault = handle_mm_fault(mm, vma, address, 1);
+ if (unlikely((fault & VM_FAULT_ERROR))) {
+#if 0
+ /* XXX: let's do this when we verify it is OK */
+ if (ret & VM_FAULT_OOM)
+ ret = -ENOMEM;
+#endif
+ } else {
ret = 0;
- current->maj_flt++;
- break;
+ if (fault & VM_FAULT_MAJOR)
+ current->maj_flt++;
+ else
+ current->min_flt++;
}
}
if (!fshared)
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 72d034258ba1..eb1ddebd2c04 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -558,7 +558,8 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
*/
static int hrtimer_switch_to_hres(void)
{
- struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases);
+ int cpu = smp_processor_id();
+ struct hrtimer_cpu_base *base = &per_cpu(hrtimer_bases, cpu);
unsigned long flags;
if (base->hres_active)
@@ -568,6 +569,8 @@ static int hrtimer_switch_to_hres(void)
if (tick_init_highres()) {
local_irq_restore(flags);
+ printk(KERN_WARNING "Could not switch to high resolution "
+ "mode on CPU %d\n", cpu);
return 0;
}
base->hres_active = 1;
@@ -683,6 +686,7 @@ static void enqueue_hrtimer(struct hrtimer *timer,
struct rb_node **link = &base->active.rb_node;
struct rb_node *parent = NULL;
struct hrtimer *entry;
+ int leftmost = 1;
/*
* Find the right place in the rbtree:
@@ -694,18 +698,19 @@ static void enqueue_hrtimer(struct hrtimer *timer,
* We dont care about collisions. Nodes with
* the same expiry time stay together.
*/
- if (timer->expires.tv64 < entry->expires.tv64)
+ if (timer->expires.tv64 < entry->expires.tv64) {
link = &(*link)->rb_left;
- else
+ } else {
link = &(*link)->rb_right;
+ leftmost = 0;
+ }
}
/*
* Insert the timer to the rbtree and check whether it
* replaces the first pending timer
*/
- if (!base->first || timer->expires.tv64 <
- rb_entry(base->first, struct hrtimer, node)->expires.tv64) {
+ if (leftmost) {
/*
* Reprogram the clock event device. When the timer is already
* expired hrtimer_enqueue_reprogram has either called the
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index b4f1674fca79..50b81b98046a 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -19,7 +19,15 @@ static struct proc_dir_entry *root_irq_dir;
static int irq_affinity_read_proc(char *page, char **start, off_t off,
int count, int *eof, void *data)
{
- int len = cpumask_scnprintf(page, count, irq_desc[(long)data].affinity);
+ struct irq_desc *desc = irq_desc + (long)data;
+ cpumask_t *mask = &desc->affinity;
+ int len;
+
+#ifdef CONFIG_GENERIC_PENDING_IRQ
+ if (desc->status & IRQ_MOVE_PENDING)
+ mask = &desc->pending_mask;
+#endif
+ len = cpumask_scnprintf(page, count, *mask);
if (count - len < 2)
return -EINVAL;
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 4d32eb077179..beedbdc64608 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -33,6 +33,8 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/resource.h>
+#include <linux/notifier.h>
+#include <linux/suspend.h>
#include <asm/uaccess.h>
extern int max_threads;
@@ -119,9 +121,10 @@ struct subprocess_info {
char **argv;
char **envp;
struct key *ring;
- int wait;
+ enum umh_wait wait;
int retval;
struct file *stdin;
+ void (*cleanup)(char **argv, char **envp);
};
/*
@@ -180,6 +183,14 @@ static int ____call_usermodehelper(void *data)
do_exit(0);
}
+void call_usermodehelper_freeinfo(struct subprocess_info *info)
+{
+ if (info->cleanup)
+ (*info->cleanup)(info->argv, info->envp);
+ kfree(info);
+}
+EXPORT_SYMBOL(call_usermodehelper_freeinfo);
+
/* Keventd can't block, but this (a child) can. */
static int wait_for_helper(void *data)
{
@@ -216,8 +227,8 @@ static int wait_for_helper(void *data)
sub_info->retval = ret;
}
- if (sub_info->wait < 0)
- kfree(sub_info);
+ if (sub_info->wait == UMH_NO_WAIT)
+ call_usermodehelper_freeinfo(sub_info);
else
complete(sub_info->complete);
return 0;
@@ -229,34 +240,204 @@ static void __call_usermodehelper(struct work_struct *work)
struct subprocess_info *sub_info =
container_of(work, struct subprocess_info, work);
pid_t pid;
- int wait = sub_info->wait;
+ enum umh_wait wait = sub_info->wait;
/* CLONE_VFORK: wait until the usermode helper has execve'd
* successfully We need the data structures to stay around
* until that is done. */
- if (wait)
+ if (wait == UMH_WAIT_PROC || wait == UMH_NO_WAIT)
pid = kernel_thread(wait_for_helper, sub_info,
CLONE_FS | CLONE_FILES | SIGCHLD);
else
pid = kernel_thread(____call_usermodehelper, sub_info,
CLONE_VFORK | SIGCHLD);
- if (wait < 0)
- return;
+ switch (wait) {
+ case UMH_NO_WAIT:
+ break;
- if (pid < 0) {
+ case UMH_WAIT_PROC:
+ if (pid > 0)
+ break;
sub_info->retval = pid;
+ /* FALLTHROUGH */
+
+ case UMH_WAIT_EXEC:
complete(sub_info->complete);
- } else if (!wait)
- complete(sub_info->complete);
+ }
+}
+
+#ifdef CONFIG_PM
+/*
+ * If set, call_usermodehelper_exec() will exit immediately returning -EBUSY
+ * (used for preventing user land processes from being created after the user
+ * land has been frozen during a system-wide hibernation or suspend operation).
+ */
+static int usermodehelper_disabled;
+
+/* Number of helpers running */
+static atomic_t running_helpers = ATOMIC_INIT(0);
+
+/*
+ * Wait queue head used by usermodehelper_pm_callback() to wait for all running
+ * helpers to finish.
+ */
+static DECLARE_WAIT_QUEUE_HEAD(running_helpers_waitq);
+
+/*
+ * Time to wait for running_helpers to become zero before the setting of
+ * usermodehelper_disabled in usermodehelper_pm_callback() fails
+ */
+#define RUNNING_HELPERS_TIMEOUT (5 * HZ)
+
+static int usermodehelper_pm_callback(struct notifier_block *nfb,
+ unsigned long action,
+ void *ignored)
+{
+ long retval;
+
+ switch (action) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ usermodehelper_disabled = 1;
+ smp_mb();
+ /*
+ * From now on call_usermodehelper_exec() won't start any new
+ * helpers, so it is sufficient if running_helpers turns out to
+ * be zero at one point (it may be increased later, but that
+ * doesn't matter).
+ */
+ retval = wait_event_timeout(running_helpers_waitq,
+ atomic_read(&running_helpers) == 0,
+ RUNNING_HELPERS_TIMEOUT);
+ if (retval) {
+ return NOTIFY_OK;
+ } else {
+ usermodehelper_disabled = 0;
+ return NOTIFY_BAD;
+ }
+ case PM_POST_HIBERNATION:
+ case PM_POST_SUSPEND:
+ usermodehelper_disabled = 0;
+ return NOTIFY_OK;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static void helper_lock(void)
+{
+ atomic_inc(&running_helpers);
+ smp_mb__after_atomic_inc();
+}
+
+static void helper_unlock(void)
+{
+ if (atomic_dec_and_test(&running_helpers))
+ wake_up(&running_helpers_waitq);
+}
+
+static void register_pm_notifier_callback(void)
+{
+ pm_notifier(usermodehelper_pm_callback, 0);
}
+#else /* CONFIG_PM */
+#define usermodehelper_disabled 0
+
+static inline void helper_lock(void) {}
+static inline void helper_unlock(void) {}
+static inline void register_pm_notifier_callback(void) {}
+#endif /* CONFIG_PM */
/**
- * call_usermodehelper_keys - start a usermode application
- * @path: pathname for the application
- * @argv: null-terminated argument list
- * @envp: null-terminated environment list
- * @session_keyring: session keyring for process (NULL for an empty keyring)
+ * call_usermodehelper_setup - prepare to call a usermode helper
+ * @path - path to usermode executable
+ * @argv - arg vector for process
+ * @envp - environment for process
+ *
+ * Returns either NULL on allocation failure, or a subprocess_info
+ * structure. This should be passed to call_usermodehelper_exec to
+ * exec the process and free the structure.
+ */
+struct subprocess_info *call_usermodehelper_setup(char *path,
+ char **argv, char **envp)
+{
+ struct subprocess_info *sub_info;
+ sub_info = kzalloc(sizeof(struct subprocess_info), GFP_ATOMIC);
+ if (!sub_info)
+ goto out;
+
+ INIT_WORK(&sub_info->work, __call_usermodehelper);
+ sub_info->path = path;
+ sub_info->argv = argv;
+ sub_info->envp = envp;
+
+ out:
+ return sub_info;
+}
+EXPORT_SYMBOL(call_usermodehelper_setup);
+
+/**
+ * call_usermodehelper_setkeys - set the session keys for usermode helper
+ * @info: a subprocess_info returned by call_usermodehelper_setup
+ * @session_keyring: the session keyring for the process
+ */
+void call_usermodehelper_setkeys(struct subprocess_info *info,
+ struct key *session_keyring)
+{
+ info->ring = session_keyring;
+}
+EXPORT_SYMBOL(call_usermodehelper_setkeys);
+
+/**
+ * call_usermodehelper_setcleanup - set a cleanup function
+ * @info: a subprocess_info returned by call_usermodehelper_setup
+ * @cleanup: a cleanup function
+ *
+ * The cleanup function is just befor ethe subprocess_info is about to
+ * be freed. This can be used for freeing the argv and envp. The
+ * Function must be runnable in either a process context or the
+ * context in which call_usermodehelper_exec is called.
+ */
+void call_usermodehelper_setcleanup(struct subprocess_info *info,
+ void (*cleanup)(char **argv, char **envp))
+{
+ info->cleanup = cleanup;
+}
+EXPORT_SYMBOL(call_usermodehelper_setcleanup);
+
+/**
+ * call_usermodehelper_stdinpipe - set up a pipe to be used for stdin
+ * @sub_info: a subprocess_info returned by call_usermodehelper_setup
+ * @filp: set to the write-end of a pipe
+ *
+ * This constructs a pipe, and sets the read end to be the stdin of the
+ * subprocess, and returns the write-end in *@filp.
+ */
+int call_usermodehelper_stdinpipe(struct subprocess_info *sub_info,
+ struct file **filp)
+{
+ struct file *f;
+
+ f = create_write_pipe();
+ if (IS_ERR(f))
+ return PTR_ERR(f);
+ *filp = f;
+
+ f = create_read_pipe(f);
+ if (IS_ERR(f)) {
+ free_write_pipe(*filp);
+ return PTR_ERR(f);
+ }
+ sub_info->stdin = f;
+
+ return 0;
+}
+EXPORT_SYMBOL(call_usermodehelper_stdinpipe);
+
+/**
+ * call_usermodehelper_exec - start a usermode application
+ * @sub_info: information about the subprocessa
* @wait: wait for the application to finish and return status.
* when -1 don't wait at all, but you get no useful error back when
* the program couldn't be exec'ed. This makes it safe to call
@@ -265,81 +446,70 @@ static void __call_usermodehelper(struct work_struct *work)
* Runs a user-space application. The application is started
* asynchronously if wait is not set, and runs as a child of keventd.
* (ie. it runs with full root capabilities).
- *
- * Must be called from process context. Returns a negative error code
- * if program was not execed successfully, or 0.
*/
-int call_usermodehelper_keys(char *path, char **argv, char **envp,
- struct key *session_keyring, int wait)
+int call_usermodehelper_exec(struct subprocess_info *sub_info,
+ enum umh_wait wait)
{
DECLARE_COMPLETION_ONSTACK(done);
- struct subprocess_info *sub_info;
int retval;
- if (!khelper_wq)
- return -EBUSY;
-
- if (path[0] == '\0')
- return 0;
+ helper_lock();
+ if (sub_info->path[0] == '\0') {
+ retval = 0;
+ goto out;
+ }
- sub_info = kzalloc(sizeof(struct subprocess_info), GFP_ATOMIC);
- if (!sub_info)
- return -ENOMEM;
+ if (!khelper_wq || usermodehelper_disabled) {
+ retval = -EBUSY;
+ goto out;
+ }
- INIT_WORK(&sub_info->work, __call_usermodehelper);
sub_info->complete = &done;
- sub_info->path = path;
- sub_info->argv = argv;
- sub_info->envp = envp;
- sub_info->ring = session_keyring;
sub_info->wait = wait;
queue_work(khelper_wq, &sub_info->work);
- if (wait < 0) /* task has freed sub_info */
+ if (wait == UMH_NO_WAIT) /* task has freed sub_info */
return 0;
wait_for_completion(&done);
retval = sub_info->retval;
- kfree(sub_info);
+
+ out:
+ call_usermodehelper_freeinfo(sub_info);
+ helper_unlock();
return retval;
}
-EXPORT_SYMBOL(call_usermodehelper_keys);
+EXPORT_SYMBOL(call_usermodehelper_exec);
+/**
+ * call_usermodehelper_pipe - call a usermode helper process with a pipe stdin
+ * @path: path to usermode executable
+ * @argv: arg vector for process
+ * @envp: environment for process
+ * @filp: set to the write-end of a pipe
+ *
+ * This is a simple wrapper which executes a usermode-helper function
+ * with a pipe as stdin. It is implemented entirely in terms of
+ * lower-level call_usermodehelper_* functions.
+ */
int call_usermodehelper_pipe(char *path, char **argv, char **envp,
struct file **filp)
{
- DECLARE_COMPLETION(done);
- struct subprocess_info sub_info = {
- .work = __WORK_INITIALIZER(sub_info.work,
- __call_usermodehelper),
- .complete = &done,
- .path = path,
- .argv = argv,
- .envp = envp,
- .retval = 0,
- };
- struct file *f;
-
- if (!khelper_wq)
- return -EBUSY;
+ struct subprocess_info *sub_info;
+ int ret;
- if (path[0] == '\0')
- return 0;
+ sub_info = call_usermodehelper_setup(path, argv, envp);
+ if (sub_info == NULL)
+ return -ENOMEM;
- f = create_write_pipe();
- if (IS_ERR(f))
- return PTR_ERR(f);
- *filp = f;
+ ret = call_usermodehelper_stdinpipe(sub_info, filp);
+ if (ret < 0)
+ goto out;
- f = create_read_pipe(f);
- if (IS_ERR(f)) {
- free_write_pipe(*filp);
- return PTR_ERR(f);
- }
- sub_info.stdin = f;
+ return call_usermodehelper_exec(sub_info, 1);
- queue_work(khelper_wq, &sub_info.work);
- wait_for_completion(&done);
- return sub_info.retval;
+ out:
+ call_usermodehelper_freeinfo(sub_info);
+ return ret;
}
EXPORT_SYMBOL(call_usermodehelper_pipe);
@@ -347,4 +517,5 @@ void __init usermodehelper_init(void)
{
khelper_wq = create_singlethread_workqueue("khelper");
BUG_ON(!khelper_wq);
+ register_pm_notifier_callback();
}
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 9e47d8c493f3..3e9f513a728d 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -675,9 +675,18 @@ static struct notifier_block kprobe_exceptions_nb = {
.priority = 0x7fffffff /* we need to be notified first */
};
+unsigned long __weak arch_deref_entry_point(void *entry)
+{
+ return (unsigned long)entry;
+}
int __kprobes register_jprobe(struct jprobe *jp)
{
+ unsigned long addr = arch_deref_entry_point(jp->entry);
+
+ if (!kernel_text_address(addr))
+ return -EINVAL;
+
/* Todo: Verify probepoint is a function entry point */
jp->kp.pre_handler = setjmp_pre_handler;
jp->kp.break_handler = longjmp_break_handler;
diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
index 559deca5ed15..d0e5c48e18c7 100644
--- a/kernel/ksysfs.c
+++ b/kernel/ksysfs.c
@@ -62,6 +62,28 @@ static ssize_t kexec_crash_loaded_show(struct kset *kset, char *page)
KERNEL_ATTR_RO(kexec_crash_loaded);
#endif /* CONFIG_KEXEC */
+/*
+ * Make /sys/kernel/notes give the raw contents of our kernel .notes section.
+ */
+extern const void __start_notes __attribute__((weak));
+extern const void __stop_notes __attribute__((weak));
+#define notes_size (&__stop_notes - &__start_notes)
+
+static ssize_t notes_read(struct kobject *kobj, struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ memcpy(buf, &__start_notes + off, count);
+ return count;
+}
+
+static struct bin_attribute notes_attr = {
+ .attr = {
+ .name = "notes",
+ .mode = S_IRUGO,
+ },
+ .read = &notes_read,
+};
+
decl_subsys(kernel, NULL, NULL);
EXPORT_SYMBOL_GPL(kernel_subsys);
@@ -88,6 +110,12 @@ static int __init ksysfs_init(void)
error = sysfs_create_group(&kernel_subsys.kobj,
&kernel_attr_group);
+ if (!error && notes_size > 0) {
+ notes_attr.size = notes_size;
+ error = sysfs_create_bin_file(&kernel_subsys.kobj,
+ &notes_attr);
+ }
+
return error;
}
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index edba2ffb43de..734da579ad13 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -5,7 +5,8 @@
*
* Started by Ingo Molnar:
*
- * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
*
* this code maps all the lock dependencies as they occur in a live kernel
* and will warn about the following classes of locking bugs:
@@ -37,11 +38,26 @@
#include <linux/debug_locks.h>
#include <linux/irqflags.h>
#include <linux/utsname.h>
+#include <linux/hash.h>
#include <asm/sections.h>
#include "lockdep_internals.h"
+#ifdef CONFIG_PROVE_LOCKING
+int prove_locking = 1;
+module_param(prove_locking, int, 0644);
+#else
+#define prove_locking 0
+#endif
+
+#ifdef CONFIG_LOCK_STAT
+int lock_stat = 1;
+module_param(lock_stat, int, 0644);
+#else
+#define lock_stat 0
+#endif
+
/*
* lockdep_lock: protects the lockdep graph, the hashes and the
* class/list/hash allocators.
@@ -96,23 +112,6 @@ unsigned long nr_list_entries;
static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
/*
- * Allocate a lockdep entry. (assumes the graph_lock held, returns
- * with NULL on failure)
- */
-static struct lock_list *alloc_list_entry(void)
-{
- if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) {
- if (!debug_locks_off_graph_unlock())
- return NULL;
-
- printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n");
- printk("turning off the locking correctness validator.\n");
- return NULL;
- }
- return list_entries + nr_list_entries++;
-}
-
-/*
* All data structures here are protected by the global debug_lock.
*
* Mutex key structs only get allocated, once during bootup, and never
@@ -121,6 +120,117 @@ static struct lock_list *alloc_list_entry(void)
unsigned long nr_lock_classes;
static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
+#ifdef CONFIG_LOCK_STAT
+static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats);
+
+static int lock_contention_point(struct lock_class *class, unsigned long ip)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(class->contention_point); i++) {
+ if (class->contention_point[i] == 0) {
+ class->contention_point[i] = ip;
+ break;
+ }
+ if (class->contention_point[i] == ip)
+ break;
+ }
+
+ return i;
+}
+
+static void lock_time_inc(struct lock_time *lt, s64 time)
+{
+ if (time > lt->max)
+ lt->max = time;
+
+ if (time < lt->min || !lt->min)
+ lt->min = time;
+
+ lt->total += time;
+ lt->nr++;
+}
+
+static inline void lock_time_add(struct lock_time *src, struct lock_time *dst)
+{
+ dst->min += src->min;
+ dst->max += src->max;
+ dst->total += src->total;
+ dst->nr += src->nr;
+}
+
+struct lock_class_stats lock_stats(struct lock_class *class)
+{
+ struct lock_class_stats stats;
+ int cpu, i;
+
+ memset(&stats, 0, sizeof(struct lock_class_stats));
+ for_each_possible_cpu(cpu) {
+ struct lock_class_stats *pcs =
+ &per_cpu(lock_stats, cpu)[class - lock_classes];
+
+ for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
+ stats.contention_point[i] += pcs->contention_point[i];
+
+ lock_time_add(&pcs->read_waittime, &stats.read_waittime);
+ lock_time_add(&pcs->write_waittime, &stats.write_waittime);
+
+ lock_time_add(&pcs->read_holdtime, &stats.read_holdtime);
+ lock_time_add(&pcs->write_holdtime, &stats.write_holdtime);
+
+ for (i = 0; i < ARRAY_SIZE(stats.bounces); i++)
+ stats.bounces[i] += pcs->bounces[i];
+ }
+
+ return stats;
+}
+
+void clear_lock_stats(struct lock_class *class)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct lock_class_stats *cpu_stats =
+ &per_cpu(lock_stats, cpu)[class - lock_classes];
+
+ memset(cpu_stats, 0, sizeof(struct lock_class_stats));
+ }
+ memset(class->contention_point, 0, sizeof(class->contention_point));
+}
+
+static struct lock_class_stats *get_lock_stats(struct lock_class *class)
+{
+ return &get_cpu_var(lock_stats)[class - lock_classes];
+}
+
+static void put_lock_stats(struct lock_class_stats *stats)
+{
+ put_cpu_var(lock_stats);
+}
+
+static void lock_release_holdtime(struct held_lock *hlock)
+{
+ struct lock_class_stats *stats;
+ s64 holdtime;
+
+ if (!lock_stat)
+ return;
+
+ holdtime = sched_clock() - hlock->holdtime_stamp;
+
+ stats = get_lock_stats(hlock->class);
+ if (hlock->read)
+ lock_time_inc(&stats->read_holdtime, holdtime);
+ else
+ lock_time_inc(&stats->write_holdtime, holdtime);
+ put_lock_stats(stats);
+}
+#else
+static inline void lock_release_holdtime(struct held_lock *hlock)
+{
+}
+#endif
+
/*
* We keep a global list of all lock classes. The list only grows,
* never shrinks. The list is only accessed with the lockdep
@@ -133,24 +243,18 @@ LIST_HEAD(all_lock_classes);
*/
#define CLASSHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1)
#define CLASSHASH_SIZE (1UL << CLASSHASH_BITS)
-#define CLASSHASH_MASK (CLASSHASH_SIZE - 1)
-#define __classhashfn(key) ((((unsigned long)key >> CLASSHASH_BITS) + (unsigned long)key) & CLASSHASH_MASK)
+#define __classhashfn(key) hash_long((unsigned long)key, CLASSHASH_BITS)
#define classhashentry(key) (classhash_table + __classhashfn((key)))
static struct list_head classhash_table[CLASSHASH_SIZE];
-unsigned long nr_lock_chains;
-static struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
-
/*
* We put the lock dependency chains into a hash-table as well, to cache
* their existence:
*/
#define CHAINHASH_BITS (MAX_LOCKDEP_CHAINS_BITS-1)
#define CHAINHASH_SIZE (1UL << CHAINHASH_BITS)
-#define CHAINHASH_MASK (CHAINHASH_SIZE - 1)
-#define __chainhashfn(chain) \
- (((chain >> CHAINHASH_BITS) + chain) & CHAINHASH_MASK)
+#define __chainhashfn(chain) hash_long(chain, CHAINHASH_BITS)
#define chainhashentry(chain) (chainhash_table + __chainhashfn((chain)))
static struct list_head chainhash_table[CHAINHASH_SIZE];
@@ -223,26 +327,6 @@ static int verbose(struct lock_class *class)
return 0;
}
-#ifdef CONFIG_TRACE_IRQFLAGS
-
-static int hardirq_verbose(struct lock_class *class)
-{
-#if HARDIRQ_VERBOSE
- return class_filter(class);
-#endif
- return 0;
-}
-
-static int softirq_verbose(struct lock_class *class)
-{
-#if SOFTIRQ_VERBOSE
- return class_filter(class);
-#endif
- return 0;
-}
-
-#endif
-
/*
* Stack-trace: tightly packed array of stack backtrace
* addresses. Protected by the graph_lock.
@@ -291,6 +375,11 @@ unsigned int max_recursion_depth;
* about it later on, in lockdep_info().
*/
static int lockdep_init_error;
+static unsigned long lockdep_init_trace_data[20];
+static struct stack_trace lockdep_init_trace = {
+ .max_entries = ARRAY_SIZE(lockdep_init_trace_data),
+ .entries = lockdep_init_trace_data,
+};
/*
* Various lockdep statistics:
@@ -482,6 +571,262 @@ static void print_lock_dependencies(struct lock_class *class, int depth)
}
}
+static void print_kernel_version(void)
+{
+ printk("%s %.*s\n", init_utsname()->release,
+ (int)strcspn(init_utsname()->version, " "),
+ init_utsname()->version);
+}
+
+static int very_verbose(struct lock_class *class)
+{
+#if VERY_VERBOSE
+ return class_filter(class);
+#endif
+ return 0;
+}
+
+/*
+ * Is this the address of a static object:
+ */
+static int static_obj(void *obj)
+{
+ unsigned long start = (unsigned long) &_stext,
+ end = (unsigned long) &_end,
+ addr = (unsigned long) obj;
+#ifdef CONFIG_SMP
+ int i;
+#endif
+
+ /*
+ * static variable?
+ */
+ if ((addr >= start) && (addr < end))
+ return 1;
+
+#ifdef CONFIG_SMP
+ /*
+ * percpu var?
+ */
+ for_each_possible_cpu(i) {
+ start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
+ end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
+ + per_cpu_offset(i);
+
+ if ((addr >= start) && (addr < end))
+ return 1;
+ }
+#endif
+
+ /*
+ * module var?
+ */
+ return is_module_address(addr);
+}
+
+/*
+ * To make lock name printouts unique, we calculate a unique
+ * class->name_version generation counter:
+ */
+static int count_matching_names(struct lock_class *new_class)
+{
+ struct lock_class *class;
+ int count = 0;
+
+ if (!new_class->name)
+ return 0;
+
+ list_for_each_entry(class, &all_lock_classes, lock_entry) {
+ if (new_class->key - new_class->subclass == class->key)
+ return class->name_version;
+ if (class->name && !strcmp(class->name, new_class->name))
+ count = max(count, class->name_version);
+ }
+
+ return count + 1;
+}
+
+/*
+ * Register a lock's class in the hash-table, if the class is not present
+ * yet. Otherwise we look it up. We cache the result in the lock object
+ * itself, so actual lookup of the hash should be once per lock object.
+ */
+static inline struct lock_class *
+look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
+{
+ struct lockdep_subclass_key *key;
+ struct list_head *hash_head;
+ struct lock_class *class;
+
+#ifdef CONFIG_DEBUG_LOCKDEP
+ /*
+ * If the architecture calls into lockdep before initializing
+ * the hashes then we'll warn about it later. (we cannot printk
+ * right now)
+ */
+ if (unlikely(!lockdep_initialized)) {
+ lockdep_init();
+ lockdep_init_error = 1;
+ save_stack_trace(&lockdep_init_trace);
+ }
+#endif
+
+ /*
+ * Static locks do not have their class-keys yet - for them the key
+ * is the lock object itself:
+ */
+ if (unlikely(!lock->key))
+ lock->key = (void *)lock;
+
+ /*
+ * NOTE: the class-key must be unique. For dynamic locks, a static
+ * lock_class_key variable is passed in through the mutex_init()
+ * (or spin_lock_init()) call - which acts as the key. For static
+ * locks we use the lock object itself as the key.
+ */
+ BUILD_BUG_ON(sizeof(struct lock_class_key) >
+ sizeof(struct lockdep_map));
+
+ key = lock->key->subkeys + subclass;
+
+ hash_head = classhashentry(key);
+
+ /*
+ * We can walk the hash lockfree, because the hash only
+ * grows, and we are careful when adding entries to the end:
+ */
+ list_for_each_entry(class, hash_head, hash_entry) {
+ if (class->key == key) {
+ WARN_ON_ONCE(class->name != lock->name);
+ return class;
+ }
+ }
+
+ return NULL;
+}
+
+/*
+ * Register a lock's class in the hash-table, if the class is not present
+ * yet. Otherwise we look it up. We cache the result in the lock object
+ * itself, so actual lookup of the hash should be once per lock object.
+ */
+static inline struct lock_class *
+register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
+{
+ struct lockdep_subclass_key *key;
+ struct list_head *hash_head;
+ struct lock_class *class;
+ unsigned long flags;
+
+ class = look_up_lock_class(lock, subclass);
+ if (likely(class))
+ return class;
+
+ /*
+ * Debug-check: all keys must be persistent!
+ */
+ if (!static_obj(lock->key)) {
+ debug_locks_off();
+ printk("INFO: trying to register non-static key.\n");
+ printk("the code is fine but needs lockdep annotation.\n");
+ printk("turning off the locking correctness validator.\n");
+ dump_stack();
+
+ return NULL;
+ }
+
+ key = lock->key->subkeys + subclass;
+ hash_head = classhashentry(key);
+
+ raw_local_irq_save(flags);
+ if (!graph_lock()) {
+ raw_local_irq_restore(flags);
+ return NULL;
+ }
+ /*
+ * We have to do the hash-walk again, to avoid races
+ * with another CPU:
+ */
+ list_for_each_entry(class, hash_head, hash_entry)
+ if (class->key == key)
+ goto out_unlock_set;
+ /*
+ * Allocate a new key from the static array, and add it to
+ * the hash:
+ */
+ if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
+ if (!debug_locks_off_graph_unlock()) {
+ raw_local_irq_restore(flags);
+ return NULL;
+ }
+ raw_local_irq_restore(flags);
+
+ printk("BUG: MAX_LOCKDEP_KEYS too low!\n");
+ printk("turning off the locking correctness validator.\n");
+ return NULL;
+ }
+ class = lock_classes + nr_lock_classes++;
+ debug_atomic_inc(&nr_unused_locks);
+ class->key = key;
+ class->name = lock->name;
+ class->subclass = subclass;
+ INIT_LIST_HEAD(&class->lock_entry);
+ INIT_LIST_HEAD(&class->locks_before);
+ INIT_LIST_HEAD(&class->locks_after);
+ class->name_version = count_matching_names(class);
+ /*
+ * We use RCU's safe list-add method to make
+ * parallel walking of the hash-list safe:
+ */
+ list_add_tail_rcu(&class->hash_entry, hash_head);
+
+ if (verbose(class)) {
+ graph_unlock();
+ raw_local_irq_restore(flags);
+
+ printk("\nnew class %p: %s", class->key, class->name);
+ if (class->name_version > 1)
+ printk("#%d", class->name_version);
+ printk("\n");
+ dump_stack();
+
+ raw_local_irq_save(flags);
+ if (!graph_lock()) {
+ raw_local_irq_restore(flags);
+ return NULL;
+ }
+ }
+out_unlock_set:
+ graph_unlock();
+ raw_local_irq_restore(flags);
+
+ if (!subclass || force)
+ lock->class_cache = class;
+
+ if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
+ return NULL;
+
+ return class;
+}
+
+#ifdef CONFIG_PROVE_LOCKING
+/*
+ * Allocate a lockdep entry. (assumes the graph_lock held, returns
+ * with NULL on failure)
+ */
+static struct lock_list *alloc_list_entry(void)
+{
+ if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) {
+ if (!debug_locks_off_graph_unlock())
+ return NULL;
+
+ printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n");
+ printk("turning off the locking correctness validator.\n");
+ return NULL;
+ }
+ return list_entries + nr_list_entries++;
+}
+
/*
* Add a new dependency to the head of the list:
*/
@@ -542,13 +887,6 @@ print_circular_bug_entry(struct lock_list *target, unsigned int depth)
return 0;
}
-static void print_kernel_version(void)
-{
- printk("%s %.*s\n", init_utsname()->release,
- (int)strcspn(init_utsname()->version, " "),
- init_utsname()->version);
-}
-
/*
* When a circular dependency is detected, print the
* header first:
@@ -640,15 +978,7 @@ check_noncircular(struct lock_class *source, unsigned int depth)
return 1;
}
-static int very_verbose(struct lock_class *class)
-{
-#if VERY_VERBOSE
- return class_filter(class);
-#endif
- return 0;
-}
#ifdef CONFIG_TRACE_IRQFLAGS
-
/*
* Forwards and backwards subgraph searching, for the purposes of
* proving that two subgraphs can be connected by a new dependency
@@ -821,6 +1151,78 @@ check_usage(struct task_struct *curr, struct held_lock *prev,
bit_backwards, bit_forwards, irqclass);
}
+static int
+check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
+ struct held_lock *next)
+{
+ /*
+ * Prove that the new dependency does not connect a hardirq-safe
+ * lock with a hardirq-unsafe lock - to achieve this we search
+ * the backwards-subgraph starting at <prev>, and the
+ * forwards-subgraph starting at <next>:
+ */
+ if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ,
+ LOCK_ENABLED_HARDIRQS, "hard"))
+ return 0;
+
+ /*
+ * Prove that the new dependency does not connect a hardirq-safe-read
+ * lock with a hardirq-unsafe lock - to achieve this we search
+ * the backwards-subgraph starting at <prev>, and the
+ * forwards-subgraph starting at <next>:
+ */
+ if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ_READ,
+ LOCK_ENABLED_HARDIRQS, "hard-read"))
+ return 0;
+
+ /*
+ * Prove that the new dependency does not connect a softirq-safe
+ * lock with a softirq-unsafe lock - to achieve this we search
+ * the backwards-subgraph starting at <prev>, and the
+ * forwards-subgraph starting at <next>:
+ */
+ if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ,
+ LOCK_ENABLED_SOFTIRQS, "soft"))
+ return 0;
+ /*
+ * Prove that the new dependency does not connect a softirq-safe-read
+ * lock with a softirq-unsafe lock - to achieve this we search
+ * the backwards-subgraph starting at <prev>, and the
+ * forwards-subgraph starting at <next>:
+ */
+ if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ_READ,
+ LOCK_ENABLED_SOFTIRQS, "soft"))
+ return 0;
+
+ return 1;
+}
+
+static void inc_chains(void)
+{
+ if (current->hardirq_context)
+ nr_hardirq_chains++;
+ else {
+ if (current->softirq_context)
+ nr_softirq_chains++;
+ else
+ nr_process_chains++;
+ }
+}
+
+#else
+
+static inline int
+check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
+ struct held_lock *next)
+{
+ return 1;
+}
+
+static inline void inc_chains(void)
+{
+ nr_process_chains++;
+}
+
#endif
static int
@@ -922,47 +1324,10 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
if (!(check_noncircular(next->class, 0)))
return print_circular_bug_tail();
-#ifdef CONFIG_TRACE_IRQFLAGS
- /*
- * Prove that the new dependency does not connect a hardirq-safe
- * lock with a hardirq-unsafe lock - to achieve this we search
- * the backwards-subgraph starting at <prev>, and the
- * forwards-subgraph starting at <next>:
- */
- if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ,
- LOCK_ENABLED_HARDIRQS, "hard"))
+ if (!check_prev_add_irq(curr, prev, next))
return 0;
/*
- * Prove that the new dependency does not connect a hardirq-safe-read
- * lock with a hardirq-unsafe lock - to achieve this we search
- * the backwards-subgraph starting at <prev>, and the
- * forwards-subgraph starting at <next>:
- */
- if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ_READ,
- LOCK_ENABLED_HARDIRQS, "hard-read"))
- return 0;
-
- /*
- * Prove that the new dependency does not connect a softirq-safe
- * lock with a softirq-unsafe lock - to achieve this we search
- * the backwards-subgraph starting at <prev>, and the
- * forwards-subgraph starting at <next>:
- */
- if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ,
- LOCK_ENABLED_SOFTIRQS, "soft"))
- return 0;
- /*
- * Prove that the new dependency does not connect a softirq-safe-read
- * lock with a softirq-unsafe lock - to achieve this we search
- * the backwards-subgraph starting at <prev>, and the
- * forwards-subgraph starting at <next>:
- */
- if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ_READ,
- LOCK_ENABLED_SOFTIRQS, "soft"))
- return 0;
-#endif
- /*
* For recursive read-locks we do all the dependency checks,
* but we dont store read-triggered dependencies (only
* write-triggered dependencies). This ensures that only the
@@ -1088,224 +1453,8 @@ out_bug:
return 0;
}
-
-/*
- * Is this the address of a static object:
- */
-static int static_obj(void *obj)
-{
- unsigned long start = (unsigned long) &_stext,
- end = (unsigned long) &_end,
- addr = (unsigned long) obj;
-#ifdef CONFIG_SMP
- int i;
-#endif
-
- /*
- * static variable?
- */
- if ((addr >= start) && (addr < end))
- return 1;
-
-#ifdef CONFIG_SMP
- /*
- * percpu var?
- */
- for_each_possible_cpu(i) {
- start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
- end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
- + per_cpu_offset(i);
-
- if ((addr >= start) && (addr < end))
- return 1;
- }
-#endif
-
- /*
- * module var?
- */
- return is_module_address(addr);
-}
-
-/*
- * To make lock name printouts unique, we calculate a unique
- * class->name_version generation counter:
- */
-static int count_matching_names(struct lock_class *new_class)
-{
- struct lock_class *class;
- int count = 0;
-
- if (!new_class->name)
- return 0;
-
- list_for_each_entry(class, &all_lock_classes, lock_entry) {
- if (new_class->key - new_class->subclass == class->key)
- return class->name_version;
- if (class->name && !strcmp(class->name, new_class->name))
- count = max(count, class->name_version);
- }
-
- return count + 1;
-}
-
-/*
- * Register a lock's class in the hash-table, if the class is not present
- * yet. Otherwise we look it up. We cache the result in the lock object
- * itself, so actual lookup of the hash should be once per lock object.
- */
-static inline struct lock_class *
-look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
-{
- struct lockdep_subclass_key *key;
- struct list_head *hash_head;
- struct lock_class *class;
-
-#ifdef CONFIG_DEBUG_LOCKDEP
- /*
- * If the architecture calls into lockdep before initializing
- * the hashes then we'll warn about it later. (we cannot printk
- * right now)
- */
- if (unlikely(!lockdep_initialized)) {
- lockdep_init();
- lockdep_init_error = 1;
- }
-#endif
-
- /*
- * Static locks do not have their class-keys yet - for them the key
- * is the lock object itself:
- */
- if (unlikely(!lock->key))
- lock->key = (void *)lock;
-
- /*
- * NOTE: the class-key must be unique. For dynamic locks, a static
- * lock_class_key variable is passed in through the mutex_init()
- * (or spin_lock_init()) call - which acts as the key. For static
- * locks we use the lock object itself as the key.
- */
- BUILD_BUG_ON(sizeof(struct lock_class_key) > sizeof(struct lock_class));
-
- key = lock->key->subkeys + subclass;
-
- hash_head = classhashentry(key);
-
- /*
- * We can walk the hash lockfree, because the hash only
- * grows, and we are careful when adding entries to the end:
- */
- list_for_each_entry(class, hash_head, hash_entry)
- if (class->key == key)
- return class;
-
- return NULL;
-}
-
-/*
- * Register a lock's class in the hash-table, if the class is not present
- * yet. Otherwise we look it up. We cache the result in the lock object
- * itself, so actual lookup of the hash should be once per lock object.
- */
-static inline struct lock_class *
-register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
-{
- struct lockdep_subclass_key *key;
- struct list_head *hash_head;
- struct lock_class *class;
- unsigned long flags;
-
- class = look_up_lock_class(lock, subclass);
- if (likely(class))
- return class;
-
- /*
- * Debug-check: all keys must be persistent!
- */
- if (!static_obj(lock->key)) {
- debug_locks_off();
- printk("INFO: trying to register non-static key.\n");
- printk("the code is fine but needs lockdep annotation.\n");
- printk("turning off the locking correctness validator.\n");
- dump_stack();
-
- return NULL;
- }
-
- key = lock->key->subkeys + subclass;
- hash_head = classhashentry(key);
-
- raw_local_irq_save(flags);
- if (!graph_lock()) {
- raw_local_irq_restore(flags);
- return NULL;
- }
- /*
- * We have to do the hash-walk again, to avoid races
- * with another CPU:
- */
- list_for_each_entry(class, hash_head, hash_entry)
- if (class->key == key)
- goto out_unlock_set;
- /*
- * Allocate a new key from the static array, and add it to
- * the hash:
- */
- if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
- if (!debug_locks_off_graph_unlock()) {
- raw_local_irq_restore(flags);
- return NULL;
- }
- raw_local_irq_restore(flags);
-
- printk("BUG: MAX_LOCKDEP_KEYS too low!\n");
- printk("turning off the locking correctness validator.\n");
- return NULL;
- }
- class = lock_classes + nr_lock_classes++;
- debug_atomic_inc(&nr_unused_locks);
- class->key = key;
- class->name = lock->name;
- class->subclass = subclass;
- INIT_LIST_HEAD(&class->lock_entry);
- INIT_LIST_HEAD(&class->locks_before);
- INIT_LIST_HEAD(&class->locks_after);
- class->name_version = count_matching_names(class);
- /*
- * We use RCU's safe list-add method to make
- * parallel walking of the hash-list safe:
- */
- list_add_tail_rcu(&class->hash_entry, hash_head);
-
- if (verbose(class)) {
- graph_unlock();
- raw_local_irq_restore(flags);
-
- printk("\nnew class %p: %s", class->key, class->name);
- if (class->name_version > 1)
- printk("#%d", class->name_version);
- printk("\n");
- dump_stack();
-
- raw_local_irq_save(flags);
- if (!graph_lock()) {
- raw_local_irq_restore(flags);
- return NULL;
- }
- }
-out_unlock_set:
- graph_unlock();
- raw_local_irq_restore(flags);
-
- if (!subclass || force)
- lock->class_cache = class;
-
- if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
- return NULL;
-
- return class;
-}
+unsigned long nr_lock_chains;
+static struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
/*
* Look up a dependency chain. If the key is not present yet then
@@ -1366,21 +1515,72 @@ cache_hit:
chain->chain_key = chain_key;
list_add_tail_rcu(&chain->entry, hash_head);
debug_atomic_inc(&chain_lookup_misses);
-#ifdef CONFIG_TRACE_IRQFLAGS
- if (current->hardirq_context)
- nr_hardirq_chains++;
- else {
- if (current->softirq_context)
- nr_softirq_chains++;
- else
- nr_process_chains++;
- }
-#else
- nr_process_chains++;
-#endif
+ inc_chains();
+
+ return 1;
+}
+
+static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
+ struct held_lock *hlock, int chain_head)
+{
+ /*
+ * Trylock needs to maintain the stack of held locks, but it
+ * does not add new dependencies, because trylock can be done
+ * in any order.
+ *
+ * We look up the chain_key and do the O(N^2) check and update of
+ * the dependencies only if this is a new dependency chain.
+ * (If lookup_chain_cache() returns with 1 it acquires
+ * graph_lock for us)
+ */
+ if (!hlock->trylock && (hlock->check == 2) &&
+ lookup_chain_cache(curr->curr_chain_key, hlock->class)) {
+ /*
+ * Check whether last held lock:
+ *
+ * - is irq-safe, if this lock is irq-unsafe
+ * - is softirq-safe, if this lock is hardirq-unsafe
+ *
+ * And check whether the new lock's dependency graph
+ * could lead back to the previous lock.
+ *
+ * any of these scenarios could lead to a deadlock. If
+ * All validations
+ */
+ int ret = check_deadlock(curr, hlock, lock, hlock->read);
+
+ if (!ret)
+ return 0;
+ /*
+ * Mark recursive read, as we jump over it when
+ * building dependencies (just like we jump over
+ * trylock entries):
+ */
+ if (ret == 2)
+ hlock->read = 2;
+ /*
+ * Add dependency only if this lock is not the head
+ * of the chain, and if it's not a secondary read-lock:
+ */
+ if (!chain_head && ret != 2)
+ if (!check_prevs_add(curr, hlock))
+ return 0;
+ graph_unlock();
+ } else
+ /* after lookup_chain_cache(): */
+ if (unlikely(!debug_locks))
+ return 0;
return 1;
}
+#else
+static inline int validate_chain(struct task_struct *curr,
+ struct lockdep_map *lock, struct held_lock *hlock,
+ int chain_head)
+{
+ return 1;
+}
+#endif
/*
* We are building curr_chain_key incrementally, so double-check
@@ -1425,6 +1625,57 @@ static void check_chain_key(struct task_struct *curr)
#endif
}
+static int
+print_usage_bug(struct task_struct *curr, struct held_lock *this,
+ enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
+{
+ if (!debug_locks_off_graph_unlock() || debug_locks_silent)
+ return 0;
+
+ printk("\n=================================\n");
+ printk( "[ INFO: inconsistent lock state ]\n");
+ print_kernel_version();
+ printk( "---------------------------------\n");
+
+ printk("inconsistent {%s} -> {%s} usage.\n",
+ usage_str[prev_bit], usage_str[new_bit]);
+
+ printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
+ curr->comm, curr->pid,
+ trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
+ trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
+ trace_hardirqs_enabled(curr),
+ trace_softirqs_enabled(curr));
+ print_lock(this);
+
+ printk("{%s} state was registered at:\n", usage_str[prev_bit]);
+ print_stack_trace(this->class->usage_traces + prev_bit, 1);
+
+ print_irqtrace_events(curr);
+ printk("\nother info that might help us debug this:\n");
+ lockdep_print_held_locks(curr);
+
+ printk("\nstack backtrace:\n");
+ dump_stack();
+
+ return 0;
+}
+
+/*
+ * Print out an error if an invalid bit is set:
+ */
+static inline int
+valid_state(struct task_struct *curr, struct held_lock *this,
+ enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
+{
+ if (unlikely(this->class->usage_mask & (1 << bad_bit)))
+ return print_usage_bug(curr, this, bad_bit, new_bit);
+ return 1;
+}
+
+static int mark_lock(struct task_struct *curr, struct held_lock *this,
+ enum lock_usage_bit new_bit);
+
#ifdef CONFIG_TRACE_IRQFLAGS
/*
@@ -1518,90 +1769,30 @@ void print_irqtrace_events(struct task_struct *curr)
print_ip_sym(curr->softirq_disable_ip);
}
-#endif
-
-static int
-print_usage_bug(struct task_struct *curr, struct held_lock *this,
- enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
+static int hardirq_verbose(struct lock_class *class)
{
- if (!debug_locks_off_graph_unlock() || debug_locks_silent)
- return 0;
-
- printk("\n=================================\n");
- printk( "[ INFO: inconsistent lock state ]\n");
- print_kernel_version();
- printk( "---------------------------------\n");
-
- printk("inconsistent {%s} -> {%s} usage.\n",
- usage_str[prev_bit], usage_str[new_bit]);
-
- printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
- curr->comm, curr->pid,
- trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
- trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
- trace_hardirqs_enabled(curr),
- trace_softirqs_enabled(curr));
- print_lock(this);
-
- printk("{%s} state was registered at:\n", usage_str[prev_bit]);
- print_stack_trace(this->class->usage_traces + prev_bit, 1);
-
- print_irqtrace_events(curr);
- printk("\nother info that might help us debug this:\n");
- lockdep_print_held_locks(curr);
-
- printk("\nstack backtrace:\n");
- dump_stack();
-
+#if HARDIRQ_VERBOSE
+ return class_filter(class);
+#endif
return 0;
}
-/*
- * Print out an error if an invalid bit is set:
- */
-static inline int
-valid_state(struct task_struct *curr, struct held_lock *this,
- enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
+static int softirq_verbose(struct lock_class *class)
{
- if (unlikely(this->class->usage_mask & (1 << bad_bit)))
- return print_usage_bug(curr, this, bad_bit, new_bit);
- return 1;
+#if SOFTIRQ_VERBOSE
+ return class_filter(class);
+#endif
+ return 0;
}
#define STRICT_READ_CHECKS 1
-/*
- * Mark a lock with a usage bit, and validate the state transition:
- */
-static int mark_lock(struct task_struct *curr, struct held_lock *this,
- enum lock_usage_bit new_bit)
+static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
+ enum lock_usage_bit new_bit)
{
- unsigned int new_mask = 1 << new_bit, ret = 1;
-
- /*
- * If already set then do not dirty the cacheline,
- * nor do any checks:
- */
- if (likely(this->class->usage_mask & new_mask))
- return 1;
-
- if (!graph_lock())
- return 0;
- /*
- * Make sure we didnt race:
- */
- if (unlikely(this->class->usage_mask & new_mask)) {
- graph_unlock();
- return 1;
- }
-
- this->class->usage_mask |= new_mask;
+ int ret = 1;
- if (!save_trace(this->class->usage_traces + new_bit))
- return 0;
-
- switch (new_bit) {
-#ifdef CONFIG_TRACE_IRQFLAGS
+ switch(new_bit) {
case LOCK_USED_IN_HARDIRQ:
if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS))
return 0;
@@ -1760,37 +1951,14 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
if (softirq_verbose(this->class))
ret = 2;
break;
-#endif
- case LOCK_USED:
- /*
- * Add it to the global list of classes:
- */
- list_add_tail_rcu(&this->class->lock_entry, &all_lock_classes);
- debug_atomic_dec(&nr_unused_locks);
- break;
default:
- if (!debug_locks_off_graph_unlock())
- return 0;
WARN_ON(1);
- return 0;
- }
-
- graph_unlock();
-
- /*
- * We must printk outside of the graph_lock:
- */
- if (ret == 2) {
- printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
- print_lock(this);
- print_irqtrace_events(curr);
- dump_stack();
+ break;
}
return ret;
}
-#ifdef CONFIG_TRACE_IRQFLAGS
/*
* Mark all held locks with a usage bit:
*/
@@ -1973,9 +2141,176 @@ void trace_softirqs_off(unsigned long ip)
debug_atomic_inc(&redundant_softirqs_off);
}
+static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
+{
+ /*
+ * If non-trylock use in a hardirq or softirq context, then
+ * mark the lock as used in these contexts:
+ */
+ if (!hlock->trylock) {
+ if (hlock->read) {
+ if (curr->hardirq_context)
+ if (!mark_lock(curr, hlock,
+ LOCK_USED_IN_HARDIRQ_READ))
+ return 0;
+ if (curr->softirq_context)
+ if (!mark_lock(curr, hlock,
+ LOCK_USED_IN_SOFTIRQ_READ))
+ return 0;
+ } else {
+ if (curr->hardirq_context)
+ if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))
+ return 0;
+ if (curr->softirq_context)
+ if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ))
+ return 0;
+ }
+ }
+ if (!hlock->hardirqs_off) {
+ if (hlock->read) {
+ if (!mark_lock(curr, hlock,
+ LOCK_ENABLED_HARDIRQS_READ))
+ return 0;
+ if (curr->softirqs_enabled)
+ if (!mark_lock(curr, hlock,
+ LOCK_ENABLED_SOFTIRQS_READ))
+ return 0;
+ } else {
+ if (!mark_lock(curr, hlock,
+ LOCK_ENABLED_HARDIRQS))
+ return 0;
+ if (curr->softirqs_enabled)
+ if (!mark_lock(curr, hlock,
+ LOCK_ENABLED_SOFTIRQS))
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+static int separate_irq_context(struct task_struct *curr,
+ struct held_lock *hlock)
+{
+ unsigned int depth = curr->lockdep_depth;
+
+ /*
+ * Keep track of points where we cross into an interrupt context:
+ */
+ hlock->irq_context = 2*(curr->hardirq_context ? 1 : 0) +
+ curr->softirq_context;
+ if (depth) {
+ struct held_lock *prev_hlock;
+
+ prev_hlock = curr->held_locks + depth-1;
+ /*
+ * If we cross into another context, reset the
+ * hash key (this also prevents the checking and the
+ * adding of the dependency to 'prev'):
+ */
+ if (prev_hlock->irq_context != hlock->irq_context)
+ return 1;
+ }
+ return 0;
+}
+
+#else
+
+static inline
+int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
+ enum lock_usage_bit new_bit)
+{
+ WARN_ON(1);
+ return 1;
+}
+
+static inline int mark_irqflags(struct task_struct *curr,
+ struct held_lock *hlock)
+{
+ return 1;
+}
+
+static inline int separate_irq_context(struct task_struct *curr,
+ struct held_lock *hlock)
+{
+ return 0;
+}
+
#endif
/*
+ * Mark a lock with a usage bit, and validate the state transition:
+ */
+static int mark_lock(struct task_struct *curr, struct held_lock *this,
+ enum lock_usage_bit new_bit)
+{
+ unsigned int new_mask = 1 << new_bit, ret = 1;
+
+ /*
+ * If already set then do not dirty the cacheline,
+ * nor do any checks:
+ */
+ if (likely(this->class->usage_mask & new_mask))
+ return 1;
+
+ if (!graph_lock())
+ return 0;
+ /*
+ * Make sure we didnt race:
+ */
+ if (unlikely(this->class->usage_mask & new_mask)) {
+ graph_unlock();
+ return 1;
+ }
+
+ this->class->usage_mask |= new_mask;
+
+ if (!save_trace(this->class->usage_traces + new_bit))
+ return 0;
+
+ switch (new_bit) {
+ case LOCK_USED_IN_HARDIRQ:
+ case LOCK_USED_IN_SOFTIRQ:
+ case LOCK_USED_IN_HARDIRQ_READ:
+ case LOCK_USED_IN_SOFTIRQ_READ:
+ case LOCK_ENABLED_HARDIRQS:
+ case LOCK_ENABLED_SOFTIRQS:
+ case LOCK_ENABLED_HARDIRQS_READ:
+ case LOCK_ENABLED_SOFTIRQS_READ:
+ ret = mark_lock_irq(curr, this, new_bit);
+ if (!ret)
+ return 0;
+ break;
+ case LOCK_USED:
+ /*
+ * Add it to the global list of classes:
+ */
+ list_add_tail_rcu(&this->class->lock_entry, &all_lock_classes);
+ debug_atomic_dec(&nr_unused_locks);
+ break;
+ default:
+ if (!debug_locks_off_graph_unlock())
+ return 0;
+ WARN_ON(1);
+ return 0;
+ }
+
+ graph_unlock();
+
+ /*
+ * We must printk outside of the graph_lock:
+ */
+ if (ret == 2) {
+ printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
+ print_lock(this);
+ print_irqtrace_events(curr);
+ dump_stack();
+ }
+
+ return ret;
+}
+
+/*
* Initialize a lock instance's lock-class mapping info:
*/
void lockdep_init_map(struct lockdep_map *lock, const char *name,
@@ -1999,6 +2334,9 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
lock->name = name;
lock->key = key;
lock->class_cache = NULL;
+#ifdef CONFIG_LOCK_STAT
+ lock->cpu = raw_smp_processor_id();
+#endif
if (subclass)
register_lock_class(lock, subclass, 1);
}
@@ -2020,6 +2358,9 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
int chain_head = 0;
u64 chain_key;
+ if (!prove_locking)
+ check = 1;
+
if (unlikely(!debug_locks))
return 0;
@@ -2070,57 +2411,18 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
hlock->read = read;
hlock->check = check;
hlock->hardirqs_off = hardirqs_off;
-
- if (check != 2)
- goto out_calc_hash;
-#ifdef CONFIG_TRACE_IRQFLAGS
- /*
- * If non-trylock use in a hardirq or softirq context, then
- * mark the lock as used in these contexts:
- */
- if (!trylock) {
- if (read) {
- if (curr->hardirq_context)
- if (!mark_lock(curr, hlock,
- LOCK_USED_IN_HARDIRQ_READ))
- return 0;
- if (curr->softirq_context)
- if (!mark_lock(curr, hlock,
- LOCK_USED_IN_SOFTIRQ_READ))
- return 0;
- } else {
- if (curr->hardirq_context)
- if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))
- return 0;
- if (curr->softirq_context)
- if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ))
- return 0;
- }
- }
- if (!hardirqs_off) {
- if (read) {
- if (!mark_lock(curr, hlock,
- LOCK_ENABLED_HARDIRQS_READ))
- return 0;
- if (curr->softirqs_enabled)
- if (!mark_lock(curr, hlock,
- LOCK_ENABLED_SOFTIRQS_READ))
- return 0;
- } else {
- if (!mark_lock(curr, hlock,
- LOCK_ENABLED_HARDIRQS))
- return 0;
- if (curr->softirqs_enabled)
- if (!mark_lock(curr, hlock,
- LOCK_ENABLED_SOFTIRQS))
- return 0;
- }
- }
+#ifdef CONFIG_LOCK_STAT
+ hlock->waittime_stamp = 0;
+ hlock->holdtime_stamp = sched_clock();
#endif
+
+ if (check == 2 && !mark_irqflags(curr, hlock))
+ return 0;
+
/* mark it as used: */
if (!mark_lock(curr, hlock, LOCK_USED))
return 0;
-out_calc_hash:
+
/*
* Calculate the chain hash: it's the combined has of all the
* lock keys along the dependency chain. We save the hash value
@@ -2143,77 +2445,15 @@ out_calc_hash:
}
hlock->prev_chain_key = chain_key;
-
-#ifdef CONFIG_TRACE_IRQFLAGS
- /*
- * Keep track of points where we cross into an interrupt context:
- */
- hlock->irq_context = 2*(curr->hardirq_context ? 1 : 0) +
- curr->softirq_context;
- if (depth) {
- struct held_lock *prev_hlock;
-
- prev_hlock = curr->held_locks + depth-1;
- /*
- * If we cross into another context, reset the
- * hash key (this also prevents the checking and the
- * adding of the dependency to 'prev'):
- */
- if (prev_hlock->irq_context != hlock->irq_context) {
- chain_key = 0;
- chain_head = 1;
- }
+ if (separate_irq_context(curr, hlock)) {
+ chain_key = 0;
+ chain_head = 1;
}
-#endif
chain_key = iterate_chain_key(chain_key, id);
curr->curr_chain_key = chain_key;
- /*
- * Trylock needs to maintain the stack of held locks, but it
- * does not add new dependencies, because trylock can be done
- * in any order.
- *
- * We look up the chain_key and do the O(N^2) check and update of
- * the dependencies only if this is a new dependency chain.
- * (If lookup_chain_cache() returns with 1 it acquires
- * graph_lock for us)
- */
- if (!trylock && (check == 2) && lookup_chain_cache(chain_key, class)) {
- /*
- * Check whether last held lock:
- *
- * - is irq-safe, if this lock is irq-unsafe
- * - is softirq-safe, if this lock is hardirq-unsafe
- *
- * And check whether the new lock's dependency graph
- * could lead back to the previous lock.
- *
- * any of these scenarios could lead to a deadlock. If
- * All validations
- */
- int ret = check_deadlock(curr, hlock, lock, read);
-
- if (!ret)
- return 0;
- /*
- * Mark recursive read, as we jump over it when
- * building dependencies (just like we jump over
- * trylock entries):
- */
- if (ret == 2)
- hlock->read = 2;
- /*
- * Add dependency only if this lock is not the head
- * of the chain, and if it's not a secondary read-lock:
- */
- if (!chain_head && ret != 2)
- if (!check_prevs_add(curr, hlock))
- return 0;
- graph_unlock();
- } else
- /* after lookup_chain_cache(): */
- if (unlikely(!debug_locks))
- return 0;
+ if (!validate_chain(curr, lock, hlock, chain_head))
+ return 0;
curr->lockdep_depth++;
check_chain_key(curr);
@@ -2315,6 +2555,8 @@ lock_release_non_nested(struct task_struct *curr,
return print_unlock_inbalance_bug(curr, lock, ip);
found_it:
+ lock_release_holdtime(hlock);
+
/*
* We have the right lock to unlock, 'hlock' points to it.
* Now we remove it from the stack, and add back the other
@@ -2367,6 +2609,8 @@ static int lock_release_nested(struct task_struct *curr,
curr->curr_chain_key = hlock->prev_chain_key;
+ lock_release_holdtime(hlock);
+
#ifdef CONFIG_DEBUG_LOCKDEP
hlock->prev_chain_key = 0;
hlock->class = NULL;
@@ -2441,6 +2685,9 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
{
unsigned long flags;
+ if (unlikely(!lock_stat && !prove_locking))
+ return;
+
if (unlikely(current->lockdep_recursion))
return;
@@ -2460,6 +2707,9 @@ void lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
{
unsigned long flags;
+ if (unlikely(!lock_stat && !prove_locking))
+ return;
+
if (unlikely(current->lockdep_recursion))
return;
@@ -2473,6 +2723,166 @@ void lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
EXPORT_SYMBOL_GPL(lock_release);
+#ifdef CONFIG_LOCK_STAT
+static int
+print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
+ unsigned long ip)
+{
+ if (!debug_locks_off())
+ return 0;
+ if (debug_locks_silent)
+ return 0;
+
+ printk("\n=================================\n");
+ printk( "[ BUG: bad contention detected! ]\n");
+ printk( "---------------------------------\n");
+ printk("%s/%d is trying to contend lock (",
+ curr->comm, curr->pid);
+ print_lockdep_cache(lock);
+ printk(") at:\n");
+ print_ip_sym(ip);
+ printk("but there are no locks held!\n");
+ printk("\nother info that might help us debug this:\n");
+ lockdep_print_held_locks(curr);
+
+ printk("\nstack backtrace:\n");
+ dump_stack();
+
+ return 0;
+}
+
+static void
+__lock_contended(struct lockdep_map *lock, unsigned long ip)
+{
+ struct task_struct *curr = current;
+ struct held_lock *hlock, *prev_hlock;
+ struct lock_class_stats *stats;
+ unsigned int depth;
+ int i, point;
+
+ depth = curr->lockdep_depth;
+ if (DEBUG_LOCKS_WARN_ON(!depth))
+ return;
+
+ prev_hlock = NULL;
+ for (i = depth-1; i >= 0; i--) {
+ hlock = curr->held_locks + i;
+ /*
+ * We must not cross into another context:
+ */
+ if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
+ break;
+ if (hlock->instance == lock)
+ goto found_it;
+ prev_hlock = hlock;
+ }
+ print_lock_contention_bug(curr, lock, ip);
+ return;
+
+found_it:
+ hlock->waittime_stamp = sched_clock();
+
+ point = lock_contention_point(hlock->class, ip);
+
+ stats = get_lock_stats(hlock->class);
+ if (point < ARRAY_SIZE(stats->contention_point))
+ stats->contention_point[i]++;
+ if (lock->cpu != smp_processor_id())
+ stats->bounces[bounce_contended + !!hlock->read]++;
+ put_lock_stats(stats);
+}
+
+static void
+__lock_acquired(struct lockdep_map *lock)
+{
+ struct task_struct *curr = current;
+ struct held_lock *hlock, *prev_hlock;
+ struct lock_class_stats *stats;
+ unsigned int depth;
+ u64 now;
+ s64 waittime = 0;
+ int i, cpu;
+
+ depth = curr->lockdep_depth;
+ if (DEBUG_LOCKS_WARN_ON(!depth))
+ return;
+
+ prev_hlock = NULL;
+ for (i = depth-1; i >= 0; i--) {
+ hlock = curr->held_locks + i;
+ /*
+ * We must not cross into another context:
+ */
+ if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
+ break;
+ if (hlock->instance == lock)
+ goto found_it;
+ prev_hlock = hlock;
+ }
+ print_lock_contention_bug(curr, lock, _RET_IP_);
+ return;
+
+found_it:
+ cpu = smp_processor_id();
+ if (hlock->waittime_stamp) {
+ now = sched_clock();
+ waittime = now - hlock->waittime_stamp;
+ hlock->holdtime_stamp = now;
+ }
+
+ stats = get_lock_stats(hlock->class);
+ if (waittime) {
+ if (hlock->read)
+ lock_time_inc(&stats->read_waittime, waittime);
+ else
+ lock_time_inc(&stats->write_waittime, waittime);
+ }
+ if (lock->cpu != cpu)
+ stats->bounces[bounce_acquired + !!hlock->read]++;
+ put_lock_stats(stats);
+
+ lock->cpu = cpu;
+}
+
+void lock_contended(struct lockdep_map *lock, unsigned long ip)
+{
+ unsigned long flags;
+
+ if (unlikely(!lock_stat))
+ return;
+
+ if (unlikely(current->lockdep_recursion))
+ return;
+
+ raw_local_irq_save(flags);
+ check_flags(flags);
+ current->lockdep_recursion = 1;
+ __lock_contended(lock, ip);
+ current->lockdep_recursion = 0;
+ raw_local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(lock_contended);
+
+void lock_acquired(struct lockdep_map *lock)
+{
+ unsigned long flags;
+
+ if (unlikely(!lock_stat))
+ return;
+
+ if (unlikely(current->lockdep_recursion))
+ return;
+
+ raw_local_irq_save(flags);
+ check_flags(flags);
+ current->lockdep_recursion = 1;
+ __lock_acquired(lock);
+ current->lockdep_recursion = 0;
+ raw_local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(lock_acquired);
+#endif
+
/*
* Used by the testsuite, sanitize the validator state
* after a simulated failure:
@@ -2636,8 +3046,11 @@ void __init lockdep_info(void)
sizeof(struct held_lock) * MAX_LOCK_DEPTH);
#ifdef CONFIG_DEBUG_LOCKDEP
- if (lockdep_init_error)
- printk("WARNING: lockdep init error! Arch code didnt call lockdep_init() early enough?\n");
+ if (lockdep_init_error) {
+ printk("WARNING: lockdep init error! Arch code didn't call lockdep_init() early enough?\n");
+ printk("Call stack leading to lockdep invocation was:\n");
+ print_stack_trace(&lockdep_init_trace, 0);
+ }
#endif
}
diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
index 58f35e586ee3..9f17af4a2490 100644
--- a/kernel/lockdep_proc.c
+++ b/kernel/lockdep_proc.c
@@ -5,7 +5,8 @@
*
* Started by Ingo Molnar:
*
- * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
*
* Code for /proc/lockdep and /proc/lockdep_stats:
*
@@ -15,6 +16,10 @@
#include <linux/seq_file.h>
#include <linux/kallsyms.h>
#include <linux/debug_locks.h>
+#include <linux/vmalloc.h>
+#include <linux/sort.h>
+#include <asm/uaccess.h>
+#include <asm/div64.h>
#include "lockdep_internals.h"
@@ -271,8 +276,10 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
if (nr_list_entries)
factor = sum_forward_deps / nr_list_entries;
+#ifdef CONFIG_PROVE_LOCKING
seq_printf(m, " dependency chains: %11lu [max: %lu]\n",
nr_lock_chains, MAX_LOCKDEP_CHAINS);
+#endif
#ifdef CONFIG_TRACE_IRQFLAGS
seq_printf(m, " in-hardirq chains: %11u\n",
@@ -342,6 +349,292 @@ static const struct file_operations proc_lockdep_stats_operations = {
.release = seq_release,
};
+#ifdef CONFIG_LOCK_STAT
+
+struct lock_stat_data {
+ struct lock_class *class;
+ struct lock_class_stats stats;
+};
+
+struct lock_stat_seq {
+ struct lock_stat_data *iter;
+ struct lock_stat_data *iter_end;
+ struct lock_stat_data stats[MAX_LOCKDEP_KEYS];
+};
+
+/*
+ * sort on absolute number of contentions
+ */
+static int lock_stat_cmp(const void *l, const void *r)
+{
+ const struct lock_stat_data *dl = l, *dr = r;
+ unsigned long nl, nr;
+
+ nl = dl->stats.read_waittime.nr + dl->stats.write_waittime.nr;
+ nr = dr->stats.read_waittime.nr + dr->stats.write_waittime.nr;
+
+ return nr - nl;
+}
+
+static void seq_line(struct seq_file *m, char c, int offset, int length)
+{
+ int i;
+
+ for (i = 0; i < offset; i++)
+ seq_puts(m, " ");
+ for (i = 0; i < length; i++)
+ seq_printf(m, "%c", c);
+ seq_puts(m, "\n");
+}
+
+static void snprint_time(char *buf, size_t bufsiz, s64 nr)
+{
+ unsigned long rem;
+
+ rem = do_div(nr, 1000); /* XXX: do_div_signed */
+ snprintf(buf, bufsiz, "%lld.%02d", (long long)nr, ((int)rem+5)/10);
+}
+
+static void seq_time(struct seq_file *m, s64 time)
+{
+ char num[15];
+
+ snprint_time(num, sizeof(num), time);
+ seq_printf(m, " %14s", num);
+}
+
+static void seq_lock_time(struct seq_file *m, struct lock_time *lt)
+{
+ seq_printf(m, "%14lu", lt->nr);
+ seq_time(m, lt->min);
+ seq_time(m, lt->max);
+ seq_time(m, lt->total);
+}
+
+static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
+{
+ char name[39];
+ struct lock_class *class;
+ struct lock_class_stats *stats;
+ int i, namelen;
+
+ class = data->class;
+ stats = &data->stats;
+
+ namelen = 38;
+ if (class->name_version > 1)
+ namelen -= 2; /* XXX truncates versions > 9 */
+ if (class->subclass)
+ namelen -= 2;
+
+ if (!class->name) {
+ char str[KSYM_NAME_LEN];
+ const char *key_name;
+
+ key_name = __get_key_name(class->key, str);
+ snprintf(name, namelen, "%s", key_name);
+ } else {
+ snprintf(name, namelen, "%s", class->name);
+ }
+ namelen = strlen(name);
+ if (class->name_version > 1) {
+ snprintf(name+namelen, 3, "#%d", class->name_version);
+ namelen += 2;
+ }
+ if (class->subclass) {
+ snprintf(name+namelen, 3, "/%d", class->subclass);
+ namelen += 2;
+ }
+
+ if (stats->write_holdtime.nr) {
+ if (stats->read_holdtime.nr)
+ seq_printf(m, "%38s-W:", name);
+ else
+ seq_printf(m, "%40s:", name);
+
+ seq_printf(m, "%14lu ", stats->bounces[bounce_contended_write]);
+ seq_lock_time(m, &stats->write_waittime);
+ seq_printf(m, " %14lu ", stats->bounces[bounce_acquired_write]);
+ seq_lock_time(m, &stats->write_holdtime);
+ seq_puts(m, "\n");
+ }
+
+ if (stats->read_holdtime.nr) {
+ seq_printf(m, "%38s-R:", name);
+ seq_printf(m, "%14lu ", stats->bounces[bounce_contended_read]);
+ seq_lock_time(m, &stats->read_waittime);
+ seq_printf(m, " %14lu ", stats->bounces[bounce_acquired_read]);
+ seq_lock_time(m, &stats->read_holdtime);
+ seq_puts(m, "\n");
+ }
+
+ if (stats->read_waittime.nr + stats->write_waittime.nr == 0)
+ return;
+
+ if (stats->read_holdtime.nr)
+ namelen += 2;
+
+ for (i = 0; i < ARRAY_SIZE(class->contention_point); i++) {
+ char sym[KSYM_SYMBOL_LEN];
+ char ip[32];
+
+ if (class->contention_point[i] == 0)
+ break;
+
+ if (!i)
+ seq_line(m, '-', 40-namelen, namelen);
+
+ sprint_symbol(sym, class->contention_point[i]);
+ snprintf(ip, sizeof(ip), "[<%p>]",
+ (void *)class->contention_point[i]);
+ seq_printf(m, "%40s %14lu %29s %s\n", name,
+ stats->contention_point[i],
+ ip, sym);
+ }
+ if (i) {
+ seq_puts(m, "\n");
+ seq_line(m, '.', 0, 40 + 1 + 10 * (14 + 1));
+ seq_puts(m, "\n");
+ }
+}
+
+static void seq_header(struct seq_file *m)
+{
+ seq_printf(m, "lock_stat version 0.2\n");
+ seq_line(m, '-', 0, 40 + 1 + 10 * (14 + 1));
+ seq_printf(m, "%40s %14s %14s %14s %14s %14s %14s %14s %14s "
+ "%14s %14s\n",
+ "class name",
+ "con-bounces",
+ "contentions",
+ "waittime-min",
+ "waittime-max",
+ "waittime-total",
+ "acq-bounces",
+ "acquisitions",
+ "holdtime-min",
+ "holdtime-max",
+ "holdtime-total");
+ seq_line(m, '-', 0, 40 + 1 + 10 * (14 + 1));
+ seq_printf(m, "\n");
+}
+
+static void *ls_start(struct seq_file *m, loff_t *pos)
+{
+ struct lock_stat_seq *data = m->private;
+
+ if (data->iter == data->stats)
+ seq_header(m);
+
+ if (data->iter == data->iter_end)
+ data->iter = NULL;
+
+ return data->iter;
+}
+
+static void *ls_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ struct lock_stat_seq *data = m->private;
+
+ (*pos)++;
+
+ data->iter = v;
+ data->iter++;
+ if (data->iter == data->iter_end)
+ data->iter = NULL;
+
+ return data->iter;
+}
+
+static void ls_stop(struct seq_file *m, void *v)
+{
+}
+
+static int ls_show(struct seq_file *m, void *v)
+{
+ struct lock_stat_seq *data = m->private;
+
+ seq_stats(m, data->iter);
+ return 0;
+}
+
+static struct seq_operations lockstat_ops = {
+ .start = ls_start,
+ .next = ls_next,
+ .stop = ls_stop,
+ .show = ls_show,
+};
+
+static int lock_stat_open(struct inode *inode, struct file *file)
+{
+ int res;
+ struct lock_class *class;
+ struct lock_stat_seq *data = vmalloc(sizeof(struct lock_stat_seq));
+
+ if (!data)
+ return -ENOMEM;
+
+ res = seq_open(file, &lockstat_ops);
+ if (!res) {
+ struct lock_stat_data *iter = data->stats;
+ struct seq_file *m = file->private_data;
+
+ data->iter = iter;
+ list_for_each_entry(class, &all_lock_classes, lock_entry) {
+ iter->class = class;
+ iter->stats = lock_stats(class);
+ iter++;
+ }
+ data->iter_end = iter;
+
+ sort(data->stats, data->iter_end - data->iter,
+ sizeof(struct lock_stat_data),
+ lock_stat_cmp, NULL);
+
+ m->private = data;
+ } else
+ vfree(data);
+
+ return res;
+}
+
+static ssize_t lock_stat_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct lock_class *class;
+ char c;
+
+ if (count) {
+ if (get_user(c, buf))
+ return -EFAULT;
+
+ if (c != '0')
+ return count;
+
+ list_for_each_entry(class, &all_lock_classes, lock_entry)
+ clear_lock_stats(class);
+ }
+ return count;
+}
+
+static int lock_stat_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *seq = file->private_data;
+
+ vfree(seq->private);
+ seq->private = NULL;
+ return seq_release(inode, file);
+}
+
+static const struct file_operations proc_lock_stat_operations = {
+ .open = lock_stat_open,
+ .write = lock_stat_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = lock_stat_release,
+};
+#endif /* CONFIG_LOCK_STAT */
+
static int __init lockdep_proc_init(void)
{
struct proc_dir_entry *entry;
@@ -354,6 +647,12 @@ static int __init lockdep_proc_init(void)
if (entry)
entry->proc_fops = &proc_lockdep_stats_operations;
+#ifdef CONFIG_LOCK_STAT
+ entry = create_proc_entry("lock_stat", S_IRUSR, NULL);
+ if (entry)
+ entry->proc_fops = &proc_lock_stat_operations;
+#endif
+
return 0;
}
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 303eab18484b..691b86564dd9 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -139,6 +139,12 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass)
list_add_tail(&waiter.list, &lock->wait_list);
waiter.task = task;
+ old_val = atomic_xchg(&lock->count, -1);
+ if (old_val == 1)
+ goto done;
+
+ lock_contended(&lock->dep_map, _RET_IP_);
+
for (;;) {
/*
* Lets try to take the lock again - this is needed even if
@@ -174,6 +180,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass)
spin_lock_mutex(&lock->wait_lock, flags);
}
+done:
+ lock_acquired(&lock->dep_map);
/* got the lock - rejoice! */
mutex_remove_waiter(lock, &waiter, task_thread_info(task));
debug_mutex_set_owner(lock, task_thread_info(task));
diff --git a/kernel/nsproxy.c b/kernel/nsproxy.c
index 10f0bbba382b..a4fb7d46971f 100644
--- a/kernel/nsproxy.c
+++ b/kernel/nsproxy.c
@@ -193,7 +193,7 @@ int unshare_nsproxy_namespaces(unsigned long unshare_flags,
static int __init nsproxy_cache_init(void)
{
nsproxy_cachep = kmem_cache_create("nsproxy", sizeof(struct nsproxy),
- 0, SLAB_PANIC, NULL, NULL);
+ 0, SLAB_PANIC, NULL);
return 0;
}
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index 329ce0172074..55b3761edaa9 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -241,7 +241,7 @@ static __init int init_posix_timers(void)
register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
posix_timers_cache = kmem_cache_create("posix_timers_cache",
- sizeof (struct k_itimer), 0, 0, NULL, NULL);
+ sizeof (struct k_itimer), 0, 0, NULL);
idr_init(&posix_timers_id);
return 0;
}
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 495b7d4dd330..c1a106d87d90 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -33,13 +33,20 @@ config PM_DEBUG
bool "Power Management Debug Support"
depends on PM
---help---
- This option enables verbose debugging support in the Power Management
- code. This is helpful when debugging and reporting various PM bugs,
- like suspend support.
+ This option enables various debugging support in the Power Management
+ code. This is helpful when debugging and reporting PM bugs, like
+ suspend support.
+
+config PM_VERBOSE
+ bool "Verbose Power Management debugging"
+ depends on PM_DEBUG
+ default n
+ ---help---
+ This option enables verbose messages from the Power Management code.
config DISABLE_CONSOLE_SUSPEND
bool "Keep console(s) enabled during suspend/resume (DANGEROUS)"
- depends on PM && PM_DEBUG
+ depends on PM_DEBUG
default n
---help---
This option turns off the console suspend mechanism that prevents
@@ -50,7 +57,7 @@ config DISABLE_CONSOLE_SUSPEND
config PM_TRACE
bool "Suspend/resume event tracing"
- depends on PM && PM_DEBUG && X86_32 && EXPERIMENTAL
+ depends on PM_DEBUG && X86 && EXPERIMENTAL
default n
---help---
This enables some cheesy code to save the last PM event point in the
@@ -65,18 +72,6 @@ config PM_TRACE
CAUTION: this option will cause your machine's real-time clock to be
set to an invalid time after a resume.
-config PM_SYSFS_DEPRECATED
- bool "Driver model /sys/devices/.../power/state files (DEPRECATED)"
- depends on PM && SYSFS
- default n
- help
- The driver model started out with a sysfs file intended to provide
- a userspace hook for device power management. This feature has never
- worked very well, except for limited testing purposes, and so it will
- be removed. It's not clear that a generic mechanism could really
- handle the wide variability of device power states; any replacements
- are likely to be bus or driver specific.
-
config SOFTWARE_SUSPEND
bool "Software Suspend (Hibernation)"
depends on PM && SWAP && (((X86 || PPC64_SWSUSP) && (!SMP || SUSPEND_SMP)) || ((FRV || PPC32) && !SMP))
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index f445b9cd60fb..324ac0188ce1 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -45,7 +45,7 @@ enum {
static int hibernation_mode = HIBERNATION_SHUTDOWN;
-struct hibernation_ops *hibernation_ops;
+static struct hibernation_ops *hibernation_ops;
/**
* hibernation_set_ops - set the global hibernate operations
@@ -54,7 +54,8 @@ struct hibernation_ops *hibernation_ops;
void hibernation_set_ops(struct hibernation_ops *ops)
{
- if (ops && !(ops->prepare && ops->enter && ops->finish)) {
+ if (ops && !(ops->prepare && ops->enter && ops->finish
+ && ops->pre_restore && ops->restore_cleanup)) {
WARN_ON(1);
return;
}
@@ -74,9 +75,9 @@ void hibernation_set_ops(struct hibernation_ops *ops)
* platform driver if so configured and return an error code if it fails
*/
-static int platform_prepare(void)
+static int platform_prepare(int platform_mode)
{
- return (hibernation_mode == HIBERNATION_PLATFORM && hibernation_ops) ?
+ return (platform_mode && hibernation_ops) ?
hibernation_ops->prepare() : 0;
}
@@ -85,13 +86,145 @@ static int platform_prepare(void)
* using the platform driver (must be called after platform_prepare())
*/
-static void platform_finish(void)
+static void platform_finish(int platform_mode)
{
- if (hibernation_mode == HIBERNATION_PLATFORM && hibernation_ops)
+ if (platform_mode && hibernation_ops)
hibernation_ops->finish();
}
/**
+ * platform_pre_restore - prepare the platform for the restoration from a
+ * hibernation image. If the restore fails after this function has been
+ * called, platform_restore_cleanup() must be called.
+ */
+
+static int platform_pre_restore(int platform_mode)
+{
+ return (platform_mode && hibernation_ops) ?
+ hibernation_ops->pre_restore() : 0;
+}
+
+/**
+ * platform_restore_cleanup - switch the platform to the normal mode of
+ * operation after a failing restore. If platform_pre_restore() has been
+ * called before the failing restore, this function must be called too,
+ * regardless of the result of platform_pre_restore().
+ */
+
+static void platform_restore_cleanup(int platform_mode)
+{
+ if (platform_mode && hibernation_ops)
+ hibernation_ops->restore_cleanup();
+}
+
+/**
+ * hibernation_snapshot - quiesce devices and create the hibernation
+ * snapshot image.
+ * @platform_mode - if set, use the platform driver, if available, to
+ * prepare the platform frimware for the power transition.
+ *
+ * Must be called with pm_mutex held
+ */
+
+int hibernation_snapshot(int platform_mode)
+{
+ int error;
+
+ /* Free memory before shutting down devices. */
+ error = swsusp_shrink_memory();
+ if (error)
+ return error;
+
+ suspend_console();
+ error = device_suspend(PMSG_FREEZE);
+ if (error)
+ goto Resume_console;
+
+ error = platform_prepare(platform_mode);
+ if (error)
+ goto Resume_devices;
+
+ error = disable_nonboot_cpus();
+ if (!error) {
+ if (hibernation_mode != HIBERNATION_TEST) {
+ in_suspend = 1;
+ error = swsusp_suspend();
+ /* Control returns here after successful restore */
+ } else {
+ printk("swsusp debug: Waiting for 5 seconds.\n");
+ mdelay(5000);
+ }
+ }
+ enable_nonboot_cpus();
+ Resume_devices:
+ platform_finish(platform_mode);
+ device_resume();
+ Resume_console:
+ resume_console();
+ return error;
+}
+
+/**
+ * hibernation_restore - quiesce devices and restore the hibernation
+ * snapshot image. If successful, control returns in hibernation_snaphot()
+ * @platform_mode - if set, use the platform driver, if available, to
+ * prepare the platform frimware for the transition.
+ *
+ * Must be called with pm_mutex held
+ */
+
+int hibernation_restore(int platform_mode)
+{
+ int error;
+
+ pm_prepare_console();
+ suspend_console();
+ error = device_suspend(PMSG_PRETHAW);
+ if (error)
+ goto Finish;
+
+ error = platform_pre_restore(platform_mode);
+ if (!error) {
+ error = disable_nonboot_cpus();
+ if (!error)
+ error = swsusp_resume();
+ enable_nonboot_cpus();
+ }
+ platform_restore_cleanup(platform_mode);
+ device_resume();
+ Finish:
+ resume_console();
+ pm_restore_console();
+ return error;
+}
+
+/**
+ * hibernation_platform_enter - enter the hibernation state using the
+ * platform driver (if available)
+ */
+
+int hibernation_platform_enter(void)
+{
+ int error;
+
+ if (hibernation_ops) {
+ kernel_shutdown_prepare(SYSTEM_SUSPEND_DISK);
+ /*
+ * We have cancelled the power transition by running
+ * hibernation_ops->finish() before saving the image, so we
+ * should let the firmware know that we're going to enter the
+ * sleep state after all
+ */
+ error = hibernation_ops->prepare();
+ if (!error)
+ error = hibernation_ops->enter();
+ } else {
+ error = -ENOSYS;
+ }
+ return error;
+}
+
+/**
* power_down - Shut the machine down for hibernation.
*
* Use the platform driver, if configured so; otherwise try
@@ -111,11 +244,7 @@ static void power_down(void)
kernel_restart(NULL);
break;
case HIBERNATION_PLATFORM:
- if (hibernation_ops) {
- kernel_shutdown_prepare(SYSTEM_SUSPEND_DISK);
- hibernation_ops->enter();
- break;
- }
+ hibernation_platform_enter();
}
kernel_halt();
/*
@@ -152,9 +281,16 @@ int hibernate(void)
{
int error;
+ mutex_lock(&pm_mutex);
/* The snapshot device should not be opened while we're running */
- if (!atomic_add_unless(&snapshot_device_available, -1, 0))
- return -EBUSY;
+ if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
+ error = -EBUSY;
+ goto Unlock;
+ }
+
+ error = pm_notifier_call_chain(PM_HIBERNATION_PREPARE);
+ if (error)
+ goto Exit;
/* Allocate memory management structures */
error = create_basic_memory_bitmaps();
@@ -165,75 +301,35 @@ int hibernate(void)
if (error)
goto Finish;
- mutex_lock(&pm_mutex);
if (hibernation_mode == HIBERNATION_TESTPROC) {
printk("swsusp debug: Waiting for 5 seconds.\n");
mdelay(5000);
goto Thaw;
}
+ error = hibernation_snapshot(hibernation_mode == HIBERNATION_PLATFORM);
+ if (in_suspend && !error) {
+ unsigned int flags = 0;
- /* Free memory before shutting down devices. */
- error = swsusp_shrink_memory();
- if (error)
- goto Thaw;
-
- error = platform_prepare();
- if (error)
- goto Thaw;
-
- suspend_console();
- error = device_suspend(PMSG_FREEZE);
- if (error) {
- printk(KERN_ERR "PM: Some devices failed to suspend\n");
- goto Resume_devices;
- }
- error = disable_nonboot_cpus();
- if (error)
- goto Enable_cpus;
-
- if (hibernation_mode == HIBERNATION_TEST) {
- printk("swsusp debug: Waiting for 5 seconds.\n");
- mdelay(5000);
- goto Enable_cpus;
- }
-
- pr_debug("PM: snapshotting memory.\n");
- in_suspend = 1;
- error = swsusp_suspend();
- if (error)
- goto Enable_cpus;
-
- if (in_suspend) {
- enable_nonboot_cpus();
- platform_finish();
- device_resume();
- resume_console();
+ if (hibernation_mode == HIBERNATION_PLATFORM)
+ flags |= SF_PLATFORM_MODE;
pr_debug("PM: writing image.\n");
- error = swsusp_write();
+ error = swsusp_write(flags);
+ swsusp_free();
if (!error)
power_down();
- else {
- swsusp_free();
- goto Thaw;
- }
} else {
pr_debug("PM: Image restored successfully.\n");
+ swsusp_free();
}
-
- swsusp_free();
- Enable_cpus:
- enable_nonboot_cpus();
- Resume_devices:
- platform_finish();
- device_resume();
- resume_console();
Thaw:
- mutex_unlock(&pm_mutex);
unprepare_processes();
Finish:
free_basic_memory_bitmaps();
Exit:
+ pm_notifier_call_chain(PM_POST_HIBERNATION);
atomic_inc(&snapshot_device_available);
+ Unlock:
+ mutex_unlock(&pm_mutex);
return error;
}
@@ -253,6 +349,7 @@ int hibernate(void)
static int software_resume(void)
{
int error;
+ unsigned int flags;
mutex_lock(&pm_mutex);
if (!swsusp_resume_device) {
@@ -300,30 +397,12 @@ static int software_resume(void)
pr_debug("PM: Reading swsusp image.\n");
- error = swsusp_read();
- if (error) {
- swsusp_free();
- goto Thaw;
- }
-
- pr_debug("PM: Preparing devices for restore.\n");
-
- suspend_console();
- error = device_suspend(PMSG_PRETHAW);
- if (error)
- goto Free;
-
- error = disable_nonboot_cpus();
+ error = swsusp_read(&flags);
if (!error)
- swsusp_resume();
+ hibernation_restore(flags & SF_PLATFORM_MODE);
- enable_nonboot_cpus();
- Free:
- swsusp_free();
- device_resume();
- resume_console();
- Thaw:
printk(KERN_ERR "PM: Restore failed, recovering.\n");
+ swsusp_free();
unprepare_processes();
Done:
free_basic_memory_bitmaps();
@@ -333,7 +412,7 @@ static int software_resume(void)
Unlock:
mutex_unlock(&pm_mutex);
pr_debug("PM: Resume from disk failed.\n");
- return 0;
+ return error;
}
late_initcall(software_resume);
diff --git a/kernel/power/main.c b/kernel/power/main.c
index fc45ed22620f..32147b57c3bf 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -23,6 +23,8 @@
#include "power.h"
+BLOCKING_NOTIFIER_HEAD(pm_chain_head);
+
/*This is just an arbitrary number */
#define FREE_PAGE_NUMBER (100)
@@ -63,14 +65,11 @@ static inline void pm_finish(suspend_state_t state)
/**
* suspend_prepare - Do prep work before entering low-power state.
- * @state: State we're entering.
*
- * This is common code that is called for each state that we're
- * entering. Allocate a console, stop all processes, then make sure
- * the platform can enter the requested state.
+ * This is common code that is called for each state that we're entering.
+ * Run suspend notifiers, allocate a console and stop all processes.
*/
-
-static int suspend_prepare(suspend_state_t state)
+static int suspend_prepare(void)
{
int error;
unsigned int free_pages;
@@ -78,6 +77,10 @@ static int suspend_prepare(suspend_state_t state)
if (!pm_ops || !pm_ops->enter)
return -EPERM;
+ error = pm_notifier_call_chain(PM_SUSPEND_PREPARE);
+ if (error)
+ goto Finish;
+
pm_prepare_console();
if (freeze_processes()) {
@@ -85,46 +88,23 @@ static int suspend_prepare(suspend_state_t state)
goto Thaw;
}
- if ((free_pages = global_page_state(NR_FREE_PAGES))
- < FREE_PAGE_NUMBER) {
+ free_pages = global_page_state(NR_FREE_PAGES);
+ if (free_pages < FREE_PAGE_NUMBER) {
pr_debug("PM: free some memory\n");
shrink_all_memory(FREE_PAGE_NUMBER - free_pages);
if (nr_free_pages() < FREE_PAGE_NUMBER) {
error = -ENOMEM;
printk(KERN_ERR "PM: No enough memory\n");
- goto Thaw;
}
}
-
- if (pm_ops->set_target) {
- error = pm_ops->set_target(state);
- if (error)
- goto Thaw;
- }
- suspend_console();
- error = device_suspend(PMSG_SUSPEND);
- if (error) {
- printk(KERN_ERR "Some devices failed to suspend\n");
- goto Resume_console;
- }
- if (pm_ops->prepare) {
- if ((error = pm_ops->prepare(state)))
- goto Resume_devices;
- }
-
- error = disable_nonboot_cpus();
if (!error)
return 0;
- enable_nonboot_cpus();
- pm_finish(state);
- Resume_devices:
- device_resume();
- Resume_console:
- resume_console();
Thaw:
thaw_processes();
pm_restore_console();
+ Finish:
+ pm_notifier_call_chain(PM_POST_SUSPEND);
return error;
}
@@ -140,6 +120,12 @@ void __attribute__ ((weak)) arch_suspend_enable_irqs(void)
local_irq_enable();
}
+/**
+ * suspend_enter - enter the desired system sleep state.
+ * @state: state to enter
+ *
+ * This function should be called after devices have been suspended.
+ */
int suspend_enter(suspend_state_t state)
{
int error = 0;
@@ -159,23 +145,58 @@ int suspend_enter(suspend_state_t state)
return error;
}
+/**
+ * suspend_devices_and_enter - suspend devices and enter the desired system sleep
+ * state.
+ * @state: state to enter
+ */
+int suspend_devices_and_enter(suspend_state_t state)
+{
+ int error;
+
+ if (!pm_ops)
+ return -ENOSYS;
+
+ if (pm_ops->set_target) {
+ error = pm_ops->set_target(state);
+ if (error)
+ return error;
+ }
+ suspend_console();
+ error = device_suspend(PMSG_SUSPEND);
+ if (error) {
+ printk(KERN_ERR "Some devices failed to suspend\n");
+ goto Resume_console;
+ }
+ if (pm_ops->prepare) {
+ error = pm_ops->prepare(state);
+ if (error)
+ goto Resume_devices;
+ }
+ error = disable_nonboot_cpus();
+ if (!error)
+ suspend_enter(state);
+
+ enable_nonboot_cpus();
+ pm_finish(state);
+ Resume_devices:
+ device_resume();
+ Resume_console:
+ resume_console();
+ return error;
+}
/**
* suspend_finish - Do final work before exiting suspend sequence.
- * @state: State we're coming out of.
*
* Call platform code to clean up, restart processes, and free the
* console that we've allocated. This is not called for suspend-to-disk.
*/
-
-static void suspend_finish(suspend_state_t state)
+static void suspend_finish(void)
{
- enable_nonboot_cpus();
- pm_finish(state);
- device_resume();
- resume_console();
thaw_processes();
pm_restore_console();
+ pm_notifier_call_chain(PM_POST_SUSPEND);
}
@@ -207,7 +228,6 @@ static inline int valid_state(suspend_state_t state)
* Then, do the setup for suspend, enter the state, and cleaup (after
* we've woken up).
*/
-
static int enter_state(suspend_state_t state)
{
int error;
@@ -218,14 +238,14 @@ static int enter_state(suspend_state_t state)
return -EBUSY;
pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]);
- if ((error = suspend_prepare(state)))
+ if ((error = suspend_prepare()))
goto Unlock;
pr_debug("PM: Entering %s sleep\n", pm_states[state]);
- error = suspend_enter(state);
+ error = suspend_devices_and_enter(state);
pr_debug("PM: Finishing wakeup.\n");
- suspend_finish(state);
+ suspend_finish();
Unlock:
mutex_unlock(&pm_mutex);
return error;
diff --git a/kernel/power/power.h b/kernel/power/power.h
index 51381487103f..5f24c786f8ec 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -25,7 +25,10 @@ struct swsusp_info {
*/
#define SPARE_PAGES ((1024 * 1024) >> PAGE_SHIFT)
-extern struct hibernation_ops *hibernation_ops;
+/* kernel/power/disk.c */
+extern int hibernation_snapshot(int platform_mode);
+extern int hibernation_restore(int platform_mode);
+extern int hibernation_platform_enter(void);
#endif
extern int pfn_is_nosave(unsigned long);
@@ -152,16 +155,34 @@ extern sector_t alloc_swapdev_block(int swap);
extern void free_all_swap_pages(int swap);
extern int swsusp_swap_in_use(void);
+/*
+ * Flags that can be passed from the hibernatig hernel to the "boot" kernel in
+ * the image header.
+ */
+#define SF_PLATFORM_MODE 1
+
+/* kernel/power/disk.c */
extern int swsusp_check(void);
extern int swsusp_shrink_memory(void);
extern void swsusp_free(void);
extern int swsusp_suspend(void);
extern int swsusp_resume(void);
-extern int swsusp_read(void);
-extern int swsusp_write(void);
+extern int swsusp_read(unsigned int *flags_p);
+extern int swsusp_write(unsigned int flags);
extern void swsusp_close(void);
-extern int suspend_enter(suspend_state_t state);
struct timeval;
+/* kernel/power/swsusp.c */
extern void swsusp_show_speed(struct timeval *, struct timeval *,
unsigned int, char *);
+
+/* kernel/power/main.c */
+extern int suspend_enter(suspend_state_t state);
+extern int suspend_devices_and_enter(suspend_state_t state);
+extern struct blocking_notifier_head pm_chain_head;
+
+static inline int pm_notifier_call_chain(unsigned long val)
+{
+ return (blocking_notifier_call_chain(&pm_chain_head, val, NULL)
+ == NOTIFY_BAD) ? -EINVAL : 0;
+}
diff --git a/kernel/power/process.c b/kernel/power/process.c
index e0233d8422b9..3434940a3df1 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -40,7 +40,7 @@ static inline void frozen_process(void)
current->flags |= PF_FROZEN;
wmb();
}
- clear_tsk_thread_flag(current, TIF_FREEZE);
+ clear_freeze_flag(current);
}
/* Refrigerator is place where frozen processes are stored :-). */
@@ -72,20 +72,19 @@ void refrigerator(void)
schedule();
}
pr_debug("%s left refrigerator\n", current->comm);
- current->state = save;
+ __set_current_state(save);
}
-static inline void freeze_process(struct task_struct *p)
+static void freeze_task(struct task_struct *p)
{
unsigned long flags;
if (!freezing(p)) {
rmb();
if (!frozen(p)) {
+ set_freeze_flag(p);
if (p->state == TASK_STOPPED)
force_sig_specific(SIGSTOP, p);
-
- freeze(p);
spin_lock_irqsave(&p->sighand->siglock, flags);
signal_wake_up(p, p->state == TASK_STOPPED);
spin_unlock_irqrestore(&p->sighand->siglock, flags);
@@ -99,19 +98,14 @@ static void cancel_freezing(struct task_struct *p)
if (freezing(p)) {
pr_debug(" clean up: %s\n", p->comm);
- do_not_freeze(p);
+ clear_freeze_flag(p);
spin_lock_irqsave(&p->sighand->siglock, flags);
recalc_sigpending_and_wake(p);
spin_unlock_irqrestore(&p->sighand->siglock, flags);
}
}
-static inline int is_user_space(struct task_struct *p)
-{
- return p->mm && !(p->flags & PF_BORROWED_MM);
-}
-
-static unsigned int try_to_freeze_tasks(int freeze_user_space)
+static int try_to_freeze_tasks(int freeze_user_space)
{
struct task_struct *g, *p;
unsigned long end_time;
@@ -122,26 +116,40 @@ static unsigned int try_to_freeze_tasks(int freeze_user_space)
todo = 0;
read_lock(&tasklist_lock);
do_each_thread(g, p) {
- if (!freezeable(p))
+ if (frozen(p) || !freezeable(p))
continue;
- if (frozen(p))
- continue;
-
- if (p->state == TASK_TRACED && frozen(p->parent)) {
- cancel_freezing(p);
- continue;
+ if (freeze_user_space) {
+ if (p->state == TASK_TRACED &&
+ frozen(p->parent)) {
+ cancel_freezing(p);
+ continue;
+ }
+ /*
+ * Kernel threads should not have TIF_FREEZE set
+ * at this point, so we must ensure that either
+ * p->mm is not NULL *and* PF_BORROWED_MM is
+ * unset, or TIF_FRREZE is left unset.
+ * The task_lock() is necessary to prevent races
+ * with exit_mm() or use_mm()/unuse_mm() from
+ * occuring.
+ */
+ task_lock(p);
+ if (!p->mm || (p->flags & PF_BORROWED_MM)) {
+ task_unlock(p);
+ continue;
+ }
+ freeze_task(p);
+ task_unlock(p);
+ } else {
+ freeze_task(p);
}
- if (freeze_user_space && !is_user_space(p))
- continue;
-
- freeze_process(p);
if (!freezer_should_skip(p))
todo++;
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
yield(); /* Yield is okay here */
- if (todo && time_after(jiffies, end_time))
+ if (time_after(jiffies, end_time))
break;
} while (todo);
@@ -152,49 +160,41 @@ static unsigned int try_to_freeze_tasks(int freeze_user_space)
* but it cleans up leftover PF_FREEZE requests.
*/
printk("\n");
- printk(KERN_ERR "Stopping %s timed out after %d seconds "
+ printk(KERN_ERR "Freezing of %s timed out after %d seconds "
"(%d tasks refusing to freeze):\n",
- freeze_user_space ? "user space processes" :
- "kernel threads",
+ freeze_user_space ? "user space " : "tasks ",
TIMEOUT / HZ, todo);
+ show_state();
read_lock(&tasklist_lock);
do_each_thread(g, p) {
- if (freeze_user_space && !is_user_space(p))
- continue;
-
task_lock(p);
- if (freezeable(p) && !frozen(p) &&
- !freezer_should_skip(p))
+ if (freezing(p) && !freezer_should_skip(p))
printk(KERN_ERR " %s\n", p->comm);
-
cancel_freezing(p);
task_unlock(p);
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
}
- return todo;
+ return todo ? -EBUSY : 0;
}
/**
* freeze_processes - tell processes to enter the refrigerator
- *
- * Returns 0 on success, or the number of processes that didn't freeze,
- * although they were told to.
*/
int freeze_processes(void)
{
- unsigned int nr_unfrozen;
+ int error;
printk("Stopping tasks ... ");
- nr_unfrozen = try_to_freeze_tasks(FREEZER_USER_SPACE);
- if (nr_unfrozen)
- return nr_unfrozen;
+ error = try_to_freeze_tasks(FREEZER_USER_SPACE);
+ if (error)
+ return error;
sys_sync();
- nr_unfrozen = try_to_freeze_tasks(FREEZER_KERNEL_THREADS);
- if (nr_unfrozen)
- return nr_unfrozen;
+ error = try_to_freeze_tasks(FREEZER_KERNEL_THREADS);
+ if (error)
+ return error;
printk("done.\n");
BUG_ON(in_atomic());
@@ -210,7 +210,7 @@ static void thaw_tasks(int thaw_user_space)
if (!freezeable(p))
continue;
- if (is_user_space(p) == !thaw_user_space)
+ if (!p->mm == thaw_user_space)
continue;
thaw_process(p);
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 8b1a1b837145..917aba100575 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -33,8 +33,9 @@ extern char resume_file[];
#define SWSUSP_SIG "S1SUSPEND"
struct swsusp_header {
- char reserved[PAGE_SIZE - 20 - sizeof(sector_t)];
+ char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int)];
sector_t image;
+ unsigned int flags; /* Flags to pass to the "boot" kernel */
char orig_sig[10];
char sig[10];
} __attribute__((packed));
@@ -138,7 +139,7 @@ static int wait_on_bio_chain(struct bio **bio_chain)
* Saving part
*/
-static int mark_swapfiles(sector_t start)
+static int mark_swapfiles(sector_t start, unsigned int flags)
{
int error;
@@ -148,6 +149,7 @@ static int mark_swapfiles(sector_t start)
memcpy(swsusp_header->orig_sig,swsusp_header->sig, 10);
memcpy(swsusp_header->sig,SWSUSP_SIG, 10);
swsusp_header->image = start;
+ swsusp_header->flags = flags;
error = bio_write_page(swsusp_resume_block,
swsusp_header, NULL);
} else {
@@ -369,6 +371,7 @@ static int enough_swap(unsigned int nr_pages)
/**
* swsusp_write - Write entire image and metadata.
+ * @flags: flags to pass to the "boot" kernel in the image header
*
* It is important _NOT_ to umount filesystems at this point. We want
* them synced (in case something goes wrong) but we DO not want to mark
@@ -376,7 +379,7 @@ static int enough_swap(unsigned int nr_pages)
* correctly, we'll mark system clean, anyway.)
*/
-int swsusp_write(void)
+int swsusp_write(unsigned int flags)
{
struct swap_map_handle handle;
struct snapshot_handle snapshot;
@@ -415,7 +418,7 @@ int swsusp_write(void)
if (!error) {
flush_swap_writer(&handle);
printk("S");
- error = mark_swapfiles(start);
+ error = mark_swapfiles(start, flags);
printk("|\n");
}
}
@@ -540,13 +543,20 @@ static int load_image(struct swap_map_handle *handle,
return error;
}
-int swsusp_read(void)
+/**
+ * swsusp_read - read the hibernation image.
+ * @flags_p: flags passed by the "frozen" kernel in the image header should
+ * be written into this memeory location
+ */
+
+int swsusp_read(unsigned int *flags_p)
{
int error;
struct swap_map_handle handle;
struct snapshot_handle snapshot;
struct swsusp_info *header;
+ *flags_p = swsusp_header->flags;
if (IS_ERR(resume_bdev)) {
pr_debug("swsusp: block device not initialised\n");
return PTR_ERR(resume_bdev);
diff --git a/kernel/power/user.c b/kernel/power/user.c
index d65305b515b1..bd0723a7df3f 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -128,92 +128,6 @@ static ssize_t snapshot_write(struct file *filp, const char __user *buf,
return res;
}
-static inline int platform_prepare(void)
-{
- int error = 0;
-
- if (hibernation_ops)
- error = hibernation_ops->prepare();
-
- return error;
-}
-
-static inline void platform_finish(void)
-{
- if (hibernation_ops)
- hibernation_ops->finish();
-}
-
-static inline int snapshot_suspend(int platform_suspend)
-{
- int error;
-
- mutex_lock(&pm_mutex);
- /* Free memory before shutting down devices. */
- error = swsusp_shrink_memory();
- if (error)
- goto Finish;
-
- if (platform_suspend) {
- error = platform_prepare();
- if (error)
- goto Finish;
- }
- suspend_console();
- error = device_suspend(PMSG_FREEZE);
- if (error)
- goto Resume_devices;
-
- error = disable_nonboot_cpus();
- if (!error) {
- in_suspend = 1;
- error = swsusp_suspend();
- }
- enable_nonboot_cpus();
- Resume_devices:
- if (platform_suspend)
- platform_finish();
-
- device_resume();
- resume_console();
- Finish:
- mutex_unlock(&pm_mutex);
- return error;
-}
-
-static inline int snapshot_restore(int platform_suspend)
-{
- int error;
-
- mutex_lock(&pm_mutex);
- pm_prepare_console();
- if (platform_suspend) {
- error = platform_prepare();
- if (error)
- goto Finish;
- }
- suspend_console();
- error = device_suspend(PMSG_PRETHAW);
- if (error)
- goto Resume_devices;
-
- error = disable_nonboot_cpus();
- if (!error)
- error = swsusp_resume();
-
- enable_nonboot_cpus();
- Resume_devices:
- if (platform_suspend)
- platform_finish();
-
- device_resume();
- resume_console();
- Finish:
- pm_restore_console();
- mutex_unlock(&pm_mutex);
- return error;
-}
-
static int snapshot_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
@@ -237,10 +151,14 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
if (data->frozen)
break;
mutex_lock(&pm_mutex);
- if (freeze_processes()) {
- thaw_processes();
- error = -EBUSY;
+ error = pm_notifier_call_chain(PM_HIBERNATION_PREPARE);
+ if (!error) {
+ error = freeze_processes();
+ if (error)
+ thaw_processes();
}
+ if (error)
+ pm_notifier_call_chain(PM_POST_HIBERNATION);
mutex_unlock(&pm_mutex);
if (!error)
data->frozen = 1;
@@ -251,6 +169,7 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
break;
mutex_lock(&pm_mutex);
thaw_processes();
+ pm_notifier_call_chain(PM_POST_HIBERNATION);
mutex_unlock(&pm_mutex);
data->frozen = 0;
break;
@@ -260,7 +179,7 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
error = -EPERM;
break;
}
- error = snapshot_suspend(data->platform_suspend);
+ error = hibernation_snapshot(data->platform_suspend);
if (!error)
error = put_user(in_suspend, (unsigned int __user *)arg);
if (!error)
@@ -274,7 +193,7 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
error = -EPERM;
break;
}
- error = snapshot_restore(data->platform_suspend);
+ error = hibernation_restore(data->platform_suspend);
break;
case SNAPSHOT_FREE:
@@ -336,47 +255,19 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
break;
case SNAPSHOT_S2RAM:
- if (!pm_ops) {
- error = -ENOSYS;
- break;
- }
-
if (!data->frozen) {
error = -EPERM;
break;
}
-
if (!mutex_trylock(&pm_mutex)) {
error = -EBUSY;
break;
}
-
- if (pm_ops->prepare) {
- error = pm_ops->prepare(PM_SUSPEND_MEM);
- if (error)
- goto OutS3;
- }
-
- /* Put devices to sleep */
- suspend_console();
- error = device_suspend(PMSG_SUSPEND);
- if (error) {
- printk(KERN_ERR "Failed to suspend some devices.\n");
- } else {
- error = disable_nonboot_cpus();
- if (!error) {
- /* Enter S3, system is already frozen */
- suspend_enter(PM_SUSPEND_MEM);
- enable_nonboot_cpus();
- }
- /* Wake up devices */
- device_resume();
- }
- resume_console();
- if (pm_ops->finish)
- pm_ops->finish(PM_SUSPEND_MEM);
-
- OutS3:
+ /*
+ * Tasks are frozen and the notifiers have been called with
+ * PM_HIBERNATION_PREPARE
+ */
+ error = suspend_devices_and_enter(PM_SUSPEND_MEM);
mutex_unlock(&pm_mutex);
break;
@@ -386,19 +277,14 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
switch (arg) {
case PMOPS_PREPARE:
- if (hibernation_ops) {
- data->platform_suspend = 1;
- error = 0;
- } else {
- error = -ENOSYS;
- }
+ data->platform_suspend = 1;
+ error = 0;
break;
case PMOPS_ENTER:
- if (data->platform_suspend) {
- kernel_shutdown_prepare(SYSTEM_SUSPEND_DISK);
- error = hibernation_ops->enter();
- }
+ if (data->platform_suspend)
+ error = hibernation_platform_enter();
+
break;
case PMOPS_FINISH:
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 4a1745f1dadf..82a558b655da 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -142,7 +142,7 @@ static int may_attach(struct task_struct *task)
return -EPERM;
smp_rmb();
if (task->mm)
- dumpable = task->mm->dumpable;
+ dumpable = get_dumpable(task->mm);
if (!dumpable && !capable(CAP_SYS_PTRACE))
return -EPERM;
diff --git a/kernel/relay.c b/kernel/relay.c
index a615a8f513fc..510fbbd7b500 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -80,7 +80,7 @@ static struct vm_operations_struct relay_file_mmap_ops = {
*
* Caller should already have grabbed mmap_sem.
*/
-int relay_mmap_buf(struct rchan_buf *buf, struct vm_area_struct *vma)
+static int relay_mmap_buf(struct rchan_buf *buf, struct vm_area_struct *vma)
{
unsigned long length = vma->vm_end - vma->vm_start;
struct file *filp = vma->vm_file;
@@ -145,7 +145,7 @@ depopulate:
*
* Returns channel buffer if successful, %NULL otherwise.
*/
-struct rchan_buf *relay_create_buf(struct rchan *chan)
+static struct rchan_buf *relay_create_buf(struct rchan *chan)
{
struct rchan_buf *buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL);
if (!buf)
@@ -175,7 +175,7 @@ free_buf:
*
* Should only be called from kref_put().
*/
-void relay_destroy_channel(struct kref *kref)
+static void relay_destroy_channel(struct kref *kref)
{
struct rchan *chan = container_of(kref, struct rchan, kref);
kfree(chan);
@@ -185,7 +185,7 @@ void relay_destroy_channel(struct kref *kref)
* relay_destroy_buf - destroy an rchan_buf struct and associated buffer
* @buf: the buffer struct
*/
-void relay_destroy_buf(struct rchan_buf *buf)
+static void relay_destroy_buf(struct rchan_buf *buf)
{
struct rchan *chan = buf->chan;
unsigned int i;
@@ -210,7 +210,7 @@ void relay_destroy_buf(struct rchan_buf *buf)
* rchan_buf_struct and the channel buffer. Should only be called from
* kref_put().
*/
-void relay_remove_buf(struct kref *kref)
+static void relay_remove_buf(struct kref *kref)
{
struct rchan_buf *buf = container_of(kref, struct rchan_buf, kref);
buf->chan->cb->remove_buf_file(buf->dentry);
@@ -223,11 +223,10 @@ void relay_remove_buf(struct kref *kref)
*
* Returns 1 if the buffer is empty, 0 otherwise.
*/
-int relay_buf_empty(struct rchan_buf *buf)
+static int relay_buf_empty(struct rchan_buf *buf)
{
return (buf->subbufs_produced - buf->subbufs_consumed) ? 0 : 1;
}
-EXPORT_SYMBOL_GPL(relay_buf_empty);
/**
* relay_buf_full - boolean, is the channel buffer full?
diff --git a/kernel/rwsem.c b/kernel/rwsem.c
index 9a87886b022e..1ec620c03064 100644
--- a/kernel/rwsem.c
+++ b/kernel/rwsem.c
@@ -20,7 +20,7 @@ void down_read(struct rw_semaphore *sem)
might_sleep();
rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
- __down_read(sem);
+ LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
}
EXPORT_SYMBOL(down_read);
@@ -47,7 +47,7 @@ void down_write(struct rw_semaphore *sem)
might_sleep();
rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
- __down_write(sem);
+ LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
}
EXPORT_SYMBOL(down_write);
@@ -111,7 +111,7 @@ void down_read_nested(struct rw_semaphore *sem, int subclass)
might_sleep();
rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
- __down_read(sem);
+ LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
}
EXPORT_SYMBOL(down_read_nested);
@@ -130,7 +130,7 @@ void down_write_nested(struct rw_semaphore *sem, int subclass)
might_sleep();
rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
- __down_write_nested(sem, subclass);
+ LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
}
EXPORT_SYMBOL(down_write_nested);
diff --git a/kernel/sched.c b/kernel/sched.c
index cb31fb4a1379..93cf241cfbe9 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -301,7 +301,7 @@ struct rq {
struct lock_class_key rq_lock_key;
};
-static DEFINE_PER_CPU(struct rq, runqueues) ____cacheline_aligned_in_smp;
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
static DEFINE_MUTEX(sched_hotcpu_mutex);
static inline void check_preempt_curr(struct rq *rq, struct task_struct *p)
@@ -379,6 +379,23 @@ static inline unsigned long long rq_clock(struct rq *rq)
#define task_rq(p) cpu_rq(task_cpu(p))
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
+/*
+ * For kernel-internal use: high-speed (but slightly incorrect) per-cpu
+ * clock constructed from sched_clock():
+ */
+unsigned long long cpu_clock(int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+ unsigned long long now;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rq->lock, flags);
+ now = rq_clock(rq);
+ spin_unlock_irqrestore(&rq->lock, flags);
+
+ return now;
+}
+
#ifdef CONFIG_FAIR_GROUP_SCHED
/* Change a task's ->cfs_rq if it moves across CPUs */
static inline void set_task_cfs_rq(struct task_struct *p)
@@ -2235,7 +2252,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
rq = cpu_rq(i);
- if (*sd_idle && !idle_cpu(i))
+ if (*sd_idle && rq->nr_running)
*sd_idle = 0;
/* Bias balancing toward cpus of our domain */
@@ -2257,9 +2274,11 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
/*
* First idle cpu or the first cpu(busiest) in this sched group
* is eligible for doing load balancing at this and above
- * domains.
+ * domains. In the newly idle case, we will allow all the cpu's
+ * to do the newly idle load balance.
*/
- if (local_group && balance_cpu != this_cpu && balance) {
+ if (idle != CPU_NEWLY_IDLE && local_group &&
+ balance_cpu != this_cpu && balance) {
*balance = 0;
goto ret;
}
@@ -2677,6 +2696,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
unsigned long imbalance;
int nr_moved = 0;
int sd_idle = 0;
+ int all_pinned = 0;
cpumask_t cpus = CPU_MASK_ALL;
/*
@@ -2715,10 +2735,11 @@ redo:
double_lock_balance(this_rq, busiest);
nr_moved = move_tasks(this_rq, this_cpu, busiest,
minus_1_or_zero(busiest->nr_running),
- imbalance, sd, CPU_NEWLY_IDLE, NULL);
+ imbalance, sd, CPU_NEWLY_IDLE,
+ &all_pinned);
spin_unlock(&busiest->lock);
- if (!nr_moved) {
+ if (unlikely(all_pinned)) {
cpu_clear(cpu_of(busiest), cpus);
if (!cpus_empty(cpus))
goto redo;
diff --git a/kernel/signal.c b/kernel/signal.c
index 39d122753bac..ef8156a6aad5 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -255,6 +255,16 @@ flush_signal_handlers(struct task_struct *t, int force_default)
}
}
+int unhandled_signal(struct task_struct *tsk, int sig)
+{
+ if (is_init(tsk))
+ return 1;
+ if (tsk->ptrace & PT_PTRACED)
+ return 0;
+ return (tsk->sighand->action[sig-1].sa.sa_handler == SIG_IGN) ||
+ (tsk->sighand->action[sig-1].sa.sa_handler == SIG_DFL);
+}
+
/* Notify the system that a driver wants to block all signals for this
* process, and wants to be notified if any signals at all were to be
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
index 2c6c2bf85514..cd72424c2662 100644
--- a/kernel/spinlock.c
+++ b/kernel/spinlock.c
@@ -72,7 +72,7 @@ void __lockfunc _read_lock(rwlock_t *lock)
{
preempt_disable();
rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
- _raw_read_lock(lock);
+ LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
}
EXPORT_SYMBOL(_read_lock);
@@ -88,8 +88,8 @@ unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
* _raw_spin_lock_flags() code, because lockdep assumes
* that interrupts are not re-enabled during lock-acquire:
*/
-#ifdef CONFIG_PROVE_LOCKING
- _raw_spin_lock(lock);
+#ifdef CONFIG_LOCKDEP
+ LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
#else
_raw_spin_lock_flags(lock, &flags);
#endif
@@ -102,7 +102,7 @@ void __lockfunc _spin_lock_irq(spinlock_t *lock)
local_irq_disable();
preempt_disable();
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- _raw_spin_lock(lock);
+ LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
}
EXPORT_SYMBOL(_spin_lock_irq);
@@ -111,7 +111,7 @@ void __lockfunc _spin_lock_bh(spinlock_t *lock)
local_bh_disable();
preempt_disable();
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- _raw_spin_lock(lock);
+ LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
}
EXPORT_SYMBOL(_spin_lock_bh);
@@ -122,7 +122,7 @@ unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
local_irq_save(flags);
preempt_disable();
rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
- _raw_read_lock(lock);
+ LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
return flags;
}
EXPORT_SYMBOL(_read_lock_irqsave);
@@ -132,7 +132,7 @@ void __lockfunc _read_lock_irq(rwlock_t *lock)
local_irq_disable();
preempt_disable();
rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
- _raw_read_lock(lock);
+ LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
}
EXPORT_SYMBOL(_read_lock_irq);
@@ -141,7 +141,7 @@ void __lockfunc _read_lock_bh(rwlock_t *lock)
local_bh_disable();
preempt_disable();
rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
- _raw_read_lock(lock);
+ LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
}
EXPORT_SYMBOL(_read_lock_bh);
@@ -152,7 +152,7 @@ unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
local_irq_save(flags);
preempt_disable();
rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- _raw_write_lock(lock);
+ LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
return flags;
}
EXPORT_SYMBOL(_write_lock_irqsave);
@@ -162,7 +162,7 @@ void __lockfunc _write_lock_irq(rwlock_t *lock)
local_irq_disable();
preempt_disable();
rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- _raw_write_lock(lock);
+ LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
}
EXPORT_SYMBOL(_write_lock_irq);
@@ -171,7 +171,7 @@ void __lockfunc _write_lock_bh(rwlock_t *lock)
local_bh_disable();
preempt_disable();
rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- _raw_write_lock(lock);
+ LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
}
EXPORT_SYMBOL(_write_lock_bh);
@@ -179,7 +179,7 @@ void __lockfunc _spin_lock(spinlock_t *lock)
{
preempt_disable();
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- _raw_spin_lock(lock);
+ LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
}
EXPORT_SYMBOL(_spin_lock);
@@ -188,7 +188,7 @@ void __lockfunc _write_lock(rwlock_t *lock)
{
preempt_disable();
rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- _raw_write_lock(lock);
+ LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
}
EXPORT_SYMBOL(_write_lock);
@@ -289,7 +289,7 @@ void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
{
preempt_disable();
spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
- _raw_spin_lock(lock);
+ LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
}
EXPORT_SYMBOL(_spin_lock_nested);
@@ -305,8 +305,8 @@ unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclas
* _raw_spin_lock_flags() code, because lockdep assumes
* that interrupts are not re-enabled during lock-acquire:
*/
-#ifdef CONFIG_PROVE_SPIN_LOCKING
- _raw_spin_lock(lock);
+#ifdef CONFIG_LOCKDEP
+ LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
#else
_raw_spin_lock_flags(lock, &flags);
#endif
diff --git a/kernel/sys.c b/kernel/sys.c
index 4d141ae3e802..08562f419768 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -100,6 +100,13 @@ struct pid *cad_pid;
EXPORT_SYMBOL(cad_pid);
/*
+ * If set, this is used for preparing the system to power off.
+ */
+
+void (*pm_power_off_prepare)(void);
+EXPORT_SYMBOL(pm_power_off_prepare);
+
+/*
* Notifier list for kernel code which wants to be called
* at shutdown. This is used to stop any idling DMA operations
* and the like.
@@ -867,6 +874,8 @@ EXPORT_SYMBOL_GPL(kernel_halt);
void kernel_power_off(void)
{
kernel_shutdown_prepare(SYSTEM_POWER_OFF);
+ if (pm_power_off_prepare)
+ pm_power_off_prepare();
printk(KERN_EMERG "Power down.\n");
machine_power_off();
}
@@ -1027,7 +1036,7 @@ asmlinkage long sys_setregid(gid_t rgid, gid_t egid)
return -EPERM;
}
if (new_egid != old_egid) {
- current->mm->dumpable = suid_dumpable;
+ set_dumpable(current->mm, suid_dumpable);
smp_wmb();
}
if (rgid != (gid_t) -1 ||
@@ -1057,13 +1066,13 @@ asmlinkage long sys_setgid(gid_t gid)
if (capable(CAP_SETGID)) {
if (old_egid != gid) {
- current->mm->dumpable = suid_dumpable;
+ set_dumpable(current->mm, suid_dumpable);
smp_wmb();
}
current->gid = current->egid = current->sgid = current->fsgid = gid;
} else if ((gid == current->gid) || (gid == current->sgid)) {
if (old_egid != gid) {
- current->mm->dumpable = suid_dumpable;
+ set_dumpable(current->mm, suid_dumpable);
smp_wmb();
}
current->egid = current->fsgid = gid;
@@ -1094,7 +1103,7 @@ static int set_user(uid_t new_ruid, int dumpclear)
switch_uid(new_user);
if (dumpclear) {
- current->mm->dumpable = suid_dumpable;
+ set_dumpable(current->mm, suid_dumpable);
smp_wmb();
}
current->uid = new_ruid;
@@ -1150,7 +1159,7 @@ asmlinkage long sys_setreuid(uid_t ruid, uid_t euid)
return -EAGAIN;
if (new_euid != old_euid) {
- current->mm->dumpable = suid_dumpable;
+ set_dumpable(current->mm, suid_dumpable);
smp_wmb();
}
current->fsuid = current->euid = new_euid;
@@ -1200,7 +1209,7 @@ asmlinkage long sys_setuid(uid_t uid)
return -EPERM;
if (old_euid != uid) {
- current->mm->dumpable = suid_dumpable;
+ set_dumpable(current->mm, suid_dumpable);
smp_wmb();
}
current->fsuid = current->euid = uid;
@@ -1245,7 +1254,7 @@ asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
}
if (euid != (uid_t) -1) {
if (euid != current->euid) {
- current->mm->dumpable = suid_dumpable;
+ set_dumpable(current->mm, suid_dumpable);
smp_wmb();
}
current->euid = euid;
@@ -1295,7 +1304,7 @@ asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
}
if (egid != (gid_t) -1) {
if (egid != current->egid) {
- current->mm->dumpable = suid_dumpable;
+ set_dumpable(current->mm, suid_dumpable);
smp_wmb();
}
current->egid = egid;
@@ -1341,7 +1350,7 @@ asmlinkage long sys_setfsuid(uid_t uid)
uid == current->suid || uid == current->fsuid ||
capable(CAP_SETUID)) {
if (uid != old_fsuid) {
- current->mm->dumpable = suid_dumpable;
+ set_dumpable(current->mm, suid_dumpable);
smp_wmb();
}
current->fsuid = uid;
@@ -1370,7 +1379,7 @@ asmlinkage long sys_setfsgid(gid_t gid)
gid == current->sgid || gid == current->fsgid ||
capable(CAP_SETGID)) {
if (gid != old_fsgid) {
- current->mm->dumpable = suid_dumpable;
+ set_dumpable(current->mm, suid_dumpable);
smp_wmb();
}
current->fsgid = gid;
@@ -2167,14 +2176,14 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
error = put_user(current->pdeath_signal, (int __user *)arg2);
break;
case PR_GET_DUMPABLE:
- error = current->mm->dumpable;
+ error = get_dumpable(current->mm);
break;
case PR_SET_DUMPABLE:
if (arg2 < 0 || arg2 > 1) {
error = -EINVAL;
break;
}
- current->mm->dumpable = arg2;
+ set_dumpable(current->mm, arg2);
break;
case PR_SET_UNALIGN:
@@ -2286,3 +2295,61 @@ asmlinkage long sys_getcpu(unsigned __user *cpup, unsigned __user *nodep,
}
return err ? -EFAULT : 0;
}
+
+char poweroff_cmd[POWEROFF_CMD_PATH_LEN] = "/sbin/poweroff";
+
+static void argv_cleanup(char **argv, char **envp)
+{
+ argv_free(argv);
+}
+
+/**
+ * orderly_poweroff - Trigger an orderly system poweroff
+ * @force: force poweroff if command execution fails
+ *
+ * This may be called from any context to trigger a system shutdown.
+ * If the orderly shutdown fails, it will force an immediate shutdown.
+ */
+int orderly_poweroff(bool force)
+{
+ int argc;
+ char **argv = argv_split(GFP_ATOMIC, poweroff_cmd, &argc);
+ static char *envp[] = {
+ "HOME=/",
+ "PATH=/sbin:/bin:/usr/sbin:/usr/bin",
+ NULL
+ };
+ int ret = -ENOMEM;
+ struct subprocess_info *info;
+
+ if (argv == NULL) {
+ printk(KERN_WARNING "%s failed to allocate memory for \"%s\"\n",
+ __func__, poweroff_cmd);
+ goto out;
+ }
+
+ info = call_usermodehelper_setup(argv[0], argv, envp);
+ if (info == NULL) {
+ argv_free(argv);
+ goto out;
+ }
+
+ call_usermodehelper_setcleanup(info, argv_cleanup);
+
+ ret = call_usermodehelper_exec(info, UMH_NO_WAIT);
+
+ out:
+ if (ret && force) {
+ printk(KERN_WARNING "Failed to start orderly shutdown: "
+ "forcing the issue\n");
+
+ /* I guess this should try to kick off some daemon to
+ sync and poweroff asap. Or not even bother syncing
+ if we're doing an emergency shutdown? */
+ emergency_sync();
+ kernel_power_off();
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(orderly_poweroff);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 7063ebc6db05..ddebf3f2affe 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -46,6 +46,7 @@
#include <linux/syscalls.h>
#include <linux/nfs_fs.h>
#include <linux/acpi.h>
+#include <linux/reboot.h>
#include <asm/uaccess.h>
#include <asm/processor.h>
@@ -77,6 +78,7 @@ extern int percpu_pagelist_fraction;
extern int compat_log;
extern int maps_protect;
extern int sysctl_stat_interval;
+extern int audit_argv_kb;
/* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */
static int maxolduid = 65535;
@@ -159,6 +161,8 @@ extern ctl_table inotify_table[];
int sysctl_legacy_va_layout;
#endif
+extern int prove_locking;
+extern int lock_stat;
/* The default sysctl tables: */
@@ -280,6 +284,26 @@ static ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = &proc_dointvec,
},
+#ifdef CONFIG_PROVE_LOCKING
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "prove_locking",
+ .data = &prove_locking,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
+#endif
+#ifdef CONFIG_LOCK_STAT
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "lock_stat",
+ .data = &lock_stat,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
+#endif
{
.ctl_name = CTL_UNNUMBERED,
.procname = "sched_features",
@@ -305,6 +329,16 @@ static ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = &proc_dointvec,
},
+#ifdef CONFIG_AUDITSYSCALL
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "audit_argv_kb",
+ .data = &audit_argv_kb,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
+#endif
{
.ctl_name = KERN_CORE_PATTERN,
.procname = "core_pattern",
@@ -659,7 +693,7 @@ static ctl_table kern_table[] = {
{
.ctl_name = KERN_ACPI_VIDEO_FLAGS,
.procname = "acpi_video_flags",
- .data = &acpi_video_flags,
+ .data = &acpi_realmode_flags,
.maxlen = sizeof (unsigned long),
.mode = 0644,
.proc_handler = &proc_doulongvec_minmax,
@@ -705,13 +739,26 @@ static ctl_table kern_table[] = {
.proc_handler = &proc_dointvec,
},
#endif
-
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "poweroff_cmd",
+ .data = &poweroff_cmd,
+ .maxlen = POWEROFF_CMD_PATH_LEN,
+ .mode = 0644,
+ .proc_handler = &proc_dostring,
+ .strategy = &sysctl_string,
+ },
+/*
+ * NOTE: do not add new entries to this table unless you have read
+ * Documentation/sysctl/ctl_unnumbered.txt
+ */
{ .ctl_name = 0 }
};
/* Constants for minimum and maximum testing in vm_table.
We use these as one-element integer vectors. */
static int zero;
+static int two = 2;
static int one_hundred = 100;
@@ -1102,7 +1149,10 @@ static ctl_table fs_table[] = {
.data = &lease_break_time,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = &proc_dointvec_minmax,
+ .strategy = &sysctl_intvec,
+ .extra1 = &zero,
+ .extra2 = &two,
},
{
.ctl_name = FS_AIO_NR,
@@ -1153,6 +1203,16 @@ static ctl_table fs_table[] = {
};
static ctl_table debug_table[] = {
+#ifdef CONFIG_X86
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "exception-trace",
+ .data = &show_unhandled_signals,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
+#endif
{ .ctl_name = 0 }
};
diff --git a/kernel/time.c b/kernel/time.c
index ffe19149d770..5b81da08bbdb 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -57,17 +57,14 @@ EXPORT_SYMBOL(sys_tz);
*/
asmlinkage long sys_time(time_t __user * tloc)
{
- /*
- * We read xtime.tv_sec atomically - it's updated
- * atomically by update_wall_time(), so no need to
- * even read-lock the xtime seqlock:
- */
- time_t i = xtime.tv_sec;
+ time_t i;
+ struct timespec tv;
- smp_rmb(); /* sys_time() results are coherent */
+ getnstimeofday(&tv);
+ i = tv.tv_sec;
if (tloc) {
- if (put_user(i, tloc))
+ if (put_user(i,tloc))
i = -EFAULT;
}
return i;
@@ -136,7 +133,6 @@ static inline void warp_clock(void)
write_seqlock_irq(&xtime_lock);
wall_to_monotonic.tv_sec -= sys_tz.tz_minuteswest * 60;
xtime.tv_sec += sys_tz.tz_minuteswest * 60;
- time_interpolator_reset();
write_sequnlock_irq(&xtime_lock);
clock_was_set();
}
@@ -309,92 +305,6 @@ struct timespec timespec_trunc(struct timespec t, unsigned gran)
}
EXPORT_SYMBOL(timespec_trunc);
-#ifdef CONFIG_TIME_INTERPOLATION
-void getnstimeofday (struct timespec *tv)
-{
- unsigned long seq,sec,nsec;
-
- do {
- seq = read_seqbegin(&xtime_lock);
- sec = xtime.tv_sec;
- nsec = xtime.tv_nsec+time_interpolator_get_offset();
- } while (unlikely(read_seqretry(&xtime_lock, seq)));
-
- while (unlikely(nsec >= NSEC_PER_SEC)) {
- nsec -= NSEC_PER_SEC;
- ++sec;
- }
- tv->tv_sec = sec;
- tv->tv_nsec = nsec;
-}
-EXPORT_SYMBOL_GPL(getnstimeofday);
-
-int do_settimeofday (struct timespec *tv)
-{
- time_t wtm_sec, sec = tv->tv_sec;
- long wtm_nsec, nsec = tv->tv_nsec;
-
- if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
- return -EINVAL;
-
- write_seqlock_irq(&xtime_lock);
- {
- wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
- wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
-
- set_normalized_timespec(&xtime, sec, nsec);
- set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
-
- time_adjust = 0; /* stop active adjtime() */
- time_status |= STA_UNSYNC;
- time_maxerror = NTP_PHASE_LIMIT;
- time_esterror = NTP_PHASE_LIMIT;
- time_interpolator_reset();
- }
- write_sequnlock_irq(&xtime_lock);
- clock_was_set();
- return 0;
-}
-EXPORT_SYMBOL(do_settimeofday);
-
-void do_gettimeofday (struct timeval *tv)
-{
- unsigned long seq, nsec, usec, sec, offset;
- do {
- seq = read_seqbegin(&xtime_lock);
- offset = time_interpolator_get_offset();
- sec = xtime.tv_sec;
- nsec = xtime.tv_nsec;
- } while (unlikely(read_seqretry(&xtime_lock, seq)));
-
- usec = (nsec + offset) / 1000;
-
- while (unlikely(usec >= USEC_PER_SEC)) {
- usec -= USEC_PER_SEC;
- ++sec;
- }
-
- tv->tv_sec = sec;
- tv->tv_usec = usec;
-
- /*
- * Make sure xtime.tv_sec [returned by sys_time()] always
- * follows the gettimeofday() result precisely. This
- * condition is extremely unlikely, it can hit at most
- * once per second:
- */
- if (unlikely(xtime.tv_sec != tv->tv_sec)) {
- unsigned long flags;
-
- write_seqlock_irqsave(&xtime_lock, flags);
- update_wall_time();
- write_sequnlock_irqrestore(&xtime_lock, flags);
- }
-}
-EXPORT_SYMBOL(do_gettimeofday);
-
-#else /* CONFIG_TIME_INTERPOLATION */
-
#ifndef CONFIG_GENERIC_TIME
/*
* Simulate gettimeofday using do_gettimeofday which only allows a timeval
@@ -410,7 +320,6 @@ void getnstimeofday(struct timespec *tv)
}
EXPORT_SYMBOL_GPL(getnstimeofday);
#endif
-#endif /* CONFIG_TIME_INTERPOLATION */
/* Converts Gregorian date to seconds since 1970-01-01 00:00:00.
* Assumes input in normal date format, i.e. 1980-12-31 23:59:59
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 438c6b723ee2..cd91237dbfe3 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -10,6 +10,7 @@
#include <linux/mm.h>
#include <linux/time.h>
+#include <linux/timer.h>
#include <linux/timex.h>
#include <linux/jiffies.h>
#include <linux/hrtimer.h>
@@ -116,11 +117,6 @@ void second_overflow(void)
if (xtime.tv_sec % 86400 == 0) {
xtime.tv_sec--;
wall_to_monotonic.tv_sec++;
- /*
- * The timer interpolator will make time change
- * gradually instead of an immediate jump by one second
- */
- time_interpolator_update(-NSEC_PER_SEC);
time_state = TIME_OOP;
printk(KERN_NOTICE "Clock: inserting leap second "
"23:59:60 UTC\n");
@@ -130,11 +126,6 @@ void second_overflow(void)
if ((xtime.tv_sec + 1) % 86400 == 0) {
xtime.tv_sec++;
wall_to_monotonic.tv_sec--;
- /*
- * Use of time interpolator for a gradual change of
- * time
- */
- time_interpolator_update(NSEC_PER_SEC);
time_state = TIME_WAIT;
printk(KERN_NOTICE "Clock: deleting leap second "
"23:59:59 UTC\n");
@@ -185,12 +176,64 @@ u64 current_tick_length(void)
return tick_length;
}
+#ifdef CONFIG_GENERIC_CMOS_UPDATE
-void __attribute__ ((weak)) notify_arch_cmos_timer(void)
+/* Disable the cmos update - used by virtualization and embedded */
+int no_sync_cmos_clock __read_mostly;
+
+static void sync_cmos_clock(unsigned long dummy);
+
+static DEFINE_TIMER(sync_cmos_timer, sync_cmos_clock, 0, 0);
+
+static void sync_cmos_clock(unsigned long dummy)
+{
+ struct timespec now, next;
+ int fail = 1;
+
+ /*
+ * If we have an externally synchronized Linux clock, then update
+ * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
+ * called as close as possible to 500 ms before the new second starts.
+ * This code is run on a timer. If the clock is set, that timer
+ * may not expire at the correct time. Thus, we adjust...
+ */
+ if (!ntp_synced())
+ /*
+ * Not synced, exit, do not restart a timer (if one is
+ * running, let it run out).
+ */
+ return;
+
+ getnstimeofday(&now);
+ if (abs(xtime.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec / 2)
+ fail = update_persistent_clock(now);
+
+ next.tv_nsec = (NSEC_PER_SEC / 2) - now.tv_nsec;
+ if (next.tv_nsec <= 0)
+ next.tv_nsec += NSEC_PER_SEC;
+
+ if (!fail)
+ next.tv_sec = 659;
+ else
+ next.tv_sec = 0;
+
+ if (next.tv_nsec >= NSEC_PER_SEC) {
+ next.tv_sec++;
+ next.tv_nsec -= NSEC_PER_SEC;
+ }
+ mod_timer(&sync_cmos_timer, jiffies + timespec_to_jiffies(&next));
+}
+
+static void notify_cmos_timer(void)
{
- return;
+ if (no_sync_cmos_clock)
+ mod_timer(&sync_cmos_timer, jiffies + 1);
}
+#else
+static inline void notify_cmos_timer(void) { }
+#endif
+
/* adjtimex mainly allows reading (and writing, if superuser) of
* kernel time-keeping variables. used by xntpd.
*/
@@ -355,6 +398,6 @@ leave: if ((time_status & (STA_UNSYNC|STA_CLOCKERR)) != 0)
txc->stbcnt = 0;
write_sequnlock_irq(&xtime_lock);
do_gettimeofday(&txc->time);
- notify_arch_cmos_timer();
+ notify_cmos_timer();
return(result);
}
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 8001d37071f5..db8e0f3d409b 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -31,6 +31,12 @@ struct tick_device tick_broadcast_device;
static cpumask_t tick_broadcast_mask;
static DEFINE_SPINLOCK(tick_broadcast_lock);
+#ifdef CONFIG_TICK_ONESHOT
+static void tick_broadcast_clear_oneshot(int cpu);
+#else
+static inline void tick_broadcast_clear_oneshot(int cpu) { }
+#endif
+
/*
* Debugging: see timer_list.c
*/
@@ -49,7 +55,7 @@ cpumask_t *tick_get_broadcast_mask(void)
*/
static void tick_broadcast_start_periodic(struct clock_event_device *bc)
{
- if (bc && bc->mode == CLOCK_EVT_MODE_SHUTDOWN)
+ if (bc)
tick_setup_periodic(bc, 1);
}
@@ -99,8 +105,19 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
cpu_set(cpu, tick_broadcast_mask);
tick_broadcast_start_periodic(tick_broadcast_device.evtdev);
ret = 1;
- }
+ } else {
+ /*
+ * When the new device is not affected by the stop
+ * feature and the cpu is marked in the broadcast mask
+ * then clear the broadcast bit.
+ */
+ if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
+ int cpu = smp_processor_id();
+ cpu_clear(cpu, tick_broadcast_mask);
+ tick_broadcast_clear_oneshot(cpu);
+ }
+ }
spin_unlock_irqrestore(&tick_broadcast_lock, flags);
return ret;
}
@@ -299,7 +316,7 @@ void tick_suspend_broadcast(void)
spin_lock_irqsave(&tick_broadcast_lock, flags);
bc = tick_broadcast_device.evtdev;
- if (bc && tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
+ if (bc)
clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN);
spin_unlock_irqrestore(&tick_broadcast_lock, flags);
@@ -316,6 +333,8 @@ int tick_resume_broadcast(void)
bc = tick_broadcast_device.evtdev;
if (bc) {
+ clockevents_set_mode(bc, CLOCK_EVT_MODE_RESUME);
+
switch (tick_broadcast_device.mode) {
case TICKDEV_MODE_PERIODIC:
if(!cpus_empty(tick_broadcast_mask))
@@ -485,6 +504,16 @@ out:
spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}
+/*
+ * Reset the one shot broadcast for a cpu
+ *
+ * Called with tick_broadcast_lock held
+ */
+static void tick_broadcast_clear_oneshot(int cpu)
+{
+ cpu_clear(cpu, tick_broadcast_oneshot_mask);
+}
+
/**
* tick_broadcast_setup_highres - setup the broadcast device for highres
*/
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index a96ec9ab3454..77a21abc8716 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -318,12 +318,17 @@ static void tick_resume(void)
{
struct tick_device *td = &__get_cpu_var(tick_cpu_device);
unsigned long flags;
+ int broadcast = tick_resume_broadcast();
spin_lock_irqsave(&tick_device_lock, flags);
- if (td->mode == TICKDEV_MODE_PERIODIC)
- tick_setup_periodic(td->evtdev, 0);
- else
- tick_resume_oneshot();
+ clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_RESUME);
+
+ if (!broadcast) {
+ if (td->mode == TICKDEV_MODE_PERIODIC)
+ tick_setup_periodic(td->evtdev, 0);
+ else
+ tick_resume_oneshot();
+ }
spin_unlock_irqrestore(&tick_device_lock, flags);
}
@@ -360,8 +365,7 @@ static int tick_notify(struct notifier_block *nb, unsigned long reason,
break;
case CLOCK_EVT_NOTIFY_RESUME:
- if (!tick_resume_broadcast())
- tick_resume();
+ tick_resume();
break;
default:
diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c
index f6997ab0c3c9..0258d3115d54 100644
--- a/kernel/time/tick-oneshot.c
+++ b/kernel/time/tick-oneshot.c
@@ -73,8 +73,21 @@ int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *))
struct clock_event_device *dev = td->evtdev;
if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT) ||
- !tick_device_is_functional(dev))
+ !tick_device_is_functional(dev)) {
+
+ printk(KERN_INFO "Clockevents: "
+ "could not switch to one-shot mode:");
+ if (!dev) {
+ printk(" no tick device\n");
+ } else {
+ if (!tick_device_is_functional(dev))
+ printk(" %s is not functional.\n", dev->name);
+ else
+ printk(" %s does not support one-shot mode.\n",
+ dev->name);
+ }
return -EINVAL;
+ }
td->mode = TICKDEV_MODE_ONESHOT;
dev->event_handler = handler;
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 52db9e3c526e..b416995b9757 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -546,6 +546,7 @@ void tick_setup_sched_timer(void)
{
struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
ktime_t now = ktime_get();
+ u64 offset;
/*
* Emulate tick processing via per-CPU hrtimers:
@@ -554,8 +555,12 @@ void tick_setup_sched_timer(void)
ts->sched_timer.function = tick_sched_timer;
ts->sched_timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ;
- /* Get the next period */
+ /* Get the next period (per cpu) */
ts->sched_timer.expires = tick_init_jiffy_update();
+ offset = ktime_to_ns(tick_period) >> 1;
+ do_div(offset, NR_CPUS);
+ offset *= smp_processor_id();
+ ts->sched_timer.expires = ktime_add_ns(ts->sched_timer.expires, offset);
for (;;) {
hrtimer_forward(&ts->sched_timer, now, tick_period);
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 728cedfd3cbd..88c81026e003 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -401,7 +401,7 @@ static __always_inline int clocksource_bigadjust(s64 error, s64 *interval,
* this is optimized for the most common adjustments of -1,0,1,
* for other values we can do a bit more work.
*/
-static void clocksource_adjust(struct clocksource *clock, s64 offset)
+static void clocksource_adjust(s64 offset)
{
s64 error, interval = clock->cycle_interval;
int adj;
@@ -466,17 +466,13 @@ void update_wall_time(void)
second_overflow();
}
- /* interpolator bits */
- time_interpolator_update(clock->xtime_interval
- >> clock->shift);
-
/* accumulate error between NTP and clock interval */
clock->error += current_tick_length();
clock->error -= clock->xtime_interval << (TICK_LENGTH_SHIFT - clock->shift);
}
/* correct the clock when NTP error is too big */
- clocksource_adjust(clock, offset);
+ clocksource_adjust(offset);
/* store full nanoseconds into xtime */
xtime.tv_nsec = (s64)clock->xtime_nsec >> clock->shift;
diff --git a/kernel/timer.c b/kernel/timer.c
index b7792fb03387..6ce1952eea7d 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -103,14 +103,14 @@ static inline tvec_base_t *tbase_get_base(tvec_base_t *base)
static inline void timer_set_deferrable(struct timer_list *timer)
{
timer->base = ((tvec_base_t *)((unsigned long)(timer->base) |
- TBASE_DEFERRABLE_FLAG));
+ TBASE_DEFERRABLE_FLAG));
}
static inline void
timer_set_base(struct timer_list *timer, tvec_base_t *new_base)
{
timer->base = (tvec_base_t *)((unsigned long)(new_base) |
- tbase_get_deferrable(timer->base));
+ tbase_get_deferrable(timer->base));
}
/**
@@ -445,10 +445,10 @@ EXPORT_SYMBOL(__mod_timer);
void add_timer_on(struct timer_list *timer, int cpu)
{
tvec_base_t *base = per_cpu(tvec_bases, cpu);
- unsigned long flags;
+ unsigned long flags;
timer_stats_timer_set_start_info(timer);
- BUG_ON(timer_pending(timer) || !timer->function);
+ BUG_ON(timer_pending(timer) || !timer->function);
spin_lock_irqsave(&base->lock, flags);
timer_set_base(timer, base);
internal_add_timer(base, timer);
@@ -627,7 +627,7 @@ static inline void __run_timers(tvec_base_t *base)
while (time_after_eq(jiffies, base->timer_jiffies)) {
struct list_head work_list;
struct list_head *head = &work_list;
- int index = base->timer_jiffies & TVR_MASK;
+ int index = base->timer_jiffies & TVR_MASK;
/*
* Cascade timers:
@@ -644,8 +644,8 @@ static inline void __run_timers(tvec_base_t *base)
unsigned long data;
timer = list_first_entry(head, struct timer_list,entry);
- fn = timer->function;
- data = timer->data;
+ fn = timer->function;
+ data = timer->data;
timer_stats_account_timer(timer);
@@ -689,8 +689,8 @@ static unsigned long __next_timer_interrupt(tvec_base_t *base)
index = slot = timer_jiffies & TVR_MASK;
do {
list_for_each_entry(nte, base->tv1.vec + slot, entry) {
- if (tbase_get_deferrable(nte->base))
- continue;
+ if (tbase_get_deferrable(nte->base))
+ continue;
found = 1;
expires = nte->expires;
@@ -834,7 +834,7 @@ void update_process_times(int user_tick)
if (rcu_pending(cpu))
rcu_check_callbacks(cpu, user_tick);
scheduler_tick();
- run_posix_cpu_timers(p);
+ run_posix_cpu_timers(p);
}
/*
@@ -909,7 +909,7 @@ static inline void update_times(unsigned long ticks)
update_wall_time();
calc_load(ticks);
}
-
+
/*
* The 64-bit jiffies value is not atomic - you MUST NOT read it
* without sampling the sequence number in xtime_lock.
@@ -1105,7 +1105,7 @@ asmlinkage long sys_gettid(void)
/**
* do_sysinfo - fill in sysinfo struct
* @info: pointer to buffer to fill
- */
+ */
int do_sysinfo(struct sysinfo *info)
{
unsigned long mem_total, sav_total;
@@ -1349,194 +1349,6 @@ void __init init_timers(void)
open_softirq(TIMER_SOFTIRQ, run_timer_softirq, NULL);
}
-#ifdef CONFIG_TIME_INTERPOLATION
-
-struct time_interpolator *time_interpolator __read_mostly;
-static struct time_interpolator *time_interpolator_list __read_mostly;
-static DEFINE_SPINLOCK(time_interpolator_lock);
-
-static inline cycles_t time_interpolator_get_cycles(unsigned int src)
-{
- unsigned long (*x)(void);
-
- switch (src)
- {
- case TIME_SOURCE_FUNCTION:
- x = time_interpolator->addr;
- return x();
-
- case TIME_SOURCE_MMIO64 :
- return readq_relaxed((void __iomem *)time_interpolator->addr);
-
- case TIME_SOURCE_MMIO32 :
- return readl_relaxed((void __iomem *)time_interpolator->addr);
-
- default: return get_cycles();
- }
-}
-
-static inline u64 time_interpolator_get_counter(int writelock)
-{
- unsigned int src = time_interpolator->source;
-
- if (time_interpolator->jitter)
- {
- cycles_t lcycle;
- cycles_t now;
-
- do {
- lcycle = time_interpolator->last_cycle;
- now = time_interpolator_get_cycles(src);
- if (lcycle && time_after(lcycle, now))
- return lcycle;
-
- /* When holding the xtime write lock, there's no need
- * to add the overhead of the cmpxchg. Readers are
- * force to retry until the write lock is released.
- */
- if (writelock) {
- time_interpolator->last_cycle = now;
- return now;
- }
- /* Keep track of the last timer value returned. The use of cmpxchg here
- * will cause contention in an SMP environment.
- */
- } while (unlikely(cmpxchg(&time_interpolator->last_cycle, lcycle, now) != lcycle));
- return now;
- }
- else
- return time_interpolator_get_cycles(src);
-}
-
-void time_interpolator_reset(void)
-{
- time_interpolator->offset = 0;
- time_interpolator->last_counter = time_interpolator_get_counter(1);
-}
-
-#define GET_TI_NSECS(count,i) (((((count) - i->last_counter) & (i)->mask) * (i)->nsec_per_cyc) >> (i)->shift)
-
-unsigned long time_interpolator_get_offset(void)
-{
- /* If we do not have a time interpolator set up then just return zero */
- if (!time_interpolator)
- return 0;
-
- return time_interpolator->offset +
- GET_TI_NSECS(time_interpolator_get_counter(0), time_interpolator);
-}
-
-#define INTERPOLATOR_ADJUST 65536
-#define INTERPOLATOR_MAX_SKIP 10*INTERPOLATOR_ADJUST
-
-void time_interpolator_update(long delta_nsec)
-{
- u64 counter;
- unsigned long offset;
-
- /* If there is no time interpolator set up then do nothing */
- if (!time_interpolator)
- return;
-
- /*
- * The interpolator compensates for late ticks by accumulating the late
- * time in time_interpolator->offset. A tick earlier than expected will
- * lead to a reset of the offset and a corresponding jump of the clock
- * forward. Again this only works if the interpolator clock is running
- * slightly slower than the regular clock and the tuning logic insures
- * that.
- */
-
- counter = time_interpolator_get_counter(1);
- offset = time_interpolator->offset +
- GET_TI_NSECS(counter, time_interpolator);
-
- if (delta_nsec < 0 || (unsigned long) delta_nsec < offset)
- time_interpolator->offset = offset - delta_nsec;
- else {
- time_interpolator->skips++;
- time_interpolator->ns_skipped += delta_nsec - offset;
- time_interpolator->offset = 0;
- }
- time_interpolator->last_counter = counter;
-
- /* Tuning logic for time interpolator invoked every minute or so.
- * Decrease interpolator clock speed if no skips occurred and an offset is carried.
- * Increase interpolator clock speed if we skip too much time.
- */
- if (jiffies % INTERPOLATOR_ADJUST == 0)
- {
- if (time_interpolator->skips == 0 && time_interpolator->offset > tick_nsec)
- time_interpolator->nsec_per_cyc--;
- if (time_interpolator->ns_skipped > INTERPOLATOR_MAX_SKIP && time_interpolator->offset == 0)
- time_interpolator->nsec_per_cyc++;
- time_interpolator->skips = 0;
- time_interpolator->ns_skipped = 0;
- }
-}
-
-static inline int
-is_better_time_interpolator(struct time_interpolator *new)
-{
- if (!time_interpolator)
- return 1;
- return new->frequency > 2*time_interpolator->frequency ||
- (unsigned long)new->drift < (unsigned long)time_interpolator->drift;
-}
-
-void
-register_time_interpolator(struct time_interpolator *ti)
-{
- unsigned long flags;
-
- /* Sanity check */
- BUG_ON(ti->frequency == 0 || ti->mask == 0);
-
- ti->nsec_per_cyc = ((u64)NSEC_PER_SEC << ti->shift) / ti->frequency;
- spin_lock(&time_interpolator_lock);
- write_seqlock_irqsave(&xtime_lock, flags);
- if (is_better_time_interpolator(ti)) {
- time_interpolator = ti;
- time_interpolator_reset();
- }
- write_sequnlock_irqrestore(&xtime_lock, flags);
-
- ti->next = time_interpolator_list;
- time_interpolator_list = ti;
- spin_unlock(&time_interpolator_lock);
-}
-
-void
-unregister_time_interpolator(struct time_interpolator *ti)
-{
- struct time_interpolator *curr, **prev;
- unsigned long flags;
-
- spin_lock(&time_interpolator_lock);
- prev = &time_interpolator_list;
- for (curr = *prev; curr; curr = curr->next) {
- if (curr == ti) {
- *prev = curr->next;
- break;
- }
- prev = &curr->next;
- }
-
- write_seqlock_irqsave(&xtime_lock, flags);
- if (ti == time_interpolator) {
- /* we lost the best time-interpolator: */
- time_interpolator = NULL;
- /* find the next-best interpolator */
- for (curr = time_interpolator_list; curr; curr = curr->next)
- if (is_better_time_interpolator(curr))
- time_interpolator = curr;
- time_interpolator_reset();
- }
- write_sequnlock_irqrestore(&xtime_lock, flags);
- spin_unlock(&time_interpolator_lock);
-}
-#endif /* CONFIG_TIME_INTERPOLATION */
-
/**
* msleep - sleep safely even with waitqueue interruptions
* @msecs: Time in milliseconds to sleep for
diff --git a/kernel/user.c b/kernel/user.c
index 98b82507797a..e7d11cef6998 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -208,7 +208,7 @@ static int __init uid_cache_init(void)
int n;
uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
- 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
+ 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
for(n = 0; n < UIDHASH_SZ; ++n)
INIT_LIST_HEAD(init_user_ns.uidhash_table + n);