summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/ahci_platform.h4
-rw-r--r--include/linux/clocksource.h17
-rw-r--r--include/linux/compiler.h2
-rw-r--r--include/linux/console.h18
-rw-r--r--include/linux/cpu.h27
-rw-r--r--include/linux/cpuset.h6
-rw-r--r--include/linux/delay.h1
-rw-r--r--include/linux/device.h15
-rw-r--r--include/linux/dmi.h9
-rw-r--r--include/linux/fb.h13
-rw-r--r--include/linux/fdtable.h1
-rw-r--r--include/linux/fscache-cache.h47
-rw-r--r--include/linux/ftrace.h5
-rw-r--r--include/linux/ftrace_event.h18
-rw-r--r--include/linux/interrupt.h7
-rw-r--r--include/linux/io-mapping.h16
-rw-r--r--include/linux/io.h4
-rw-r--r--include/linux/iommu.h1
-rw-r--r--include/linux/kdb.h4
-rw-r--r--include/linux/kernel.h12
-rw-r--r--include/linux/kgdb.h15
-rw-r--r--include/linux/kmemtrace.h25
-rw-r--r--include/linux/kthread.h65
-rw-r--r--include/linux/libata.h1
-rw-r--r--include/linux/msi.h2
-rw-r--r--include/linux/nmi.h13
-rw-r--r--include/linux/of.h10
-rw-r--r--include/linux/of_address.h44
-rw-r--r--include/linux/of_device.h61
-rw-r--r--include/linux/of_gpio.h35
-rw-r--r--include/linux/of_i2c.h13
-rw-r--r--include/linux/of_irq.h70
-rw-r--r--include/linux/of_platform.h53
-rw-r--r--include/linux/of_spi.h11
-rw-r--r--include/linux/page-flags.h2
-rw-r--r--include/linux/pci.h2
-rw-r--r--include/linux/pci_ids.h5
-rw-r--r--include/linux/perf_event.h97
-rw-r--r--include/linux/platform_device.h62
-rw-r--r--include/linux/rcupdate.h82
-rw-r--r--include/linux/sched.h83
-rw-r--r--include/linux/slab.h6
-rw-r--r--include/linux/slab_def.h3
-rw-r--r--include/linux/slow-work.h163
-rw-r--r--include/linux/slub_def.h3
-rw-r--r--include/linux/syscalls.h2
-rw-r--r--include/linux/sysfs.h12
-rw-r--r--include/linux/time.h21
-rw-r--r--include/linux/topology.h1
-rw-r--r--include/linux/tty.h3
-rw-r--r--include/linux/vmalloc.h2
-rw-r--r--include/linux/workqueue.h158
52 files changed, 892 insertions, 460 deletions
diff --git a/include/linux/ahci_platform.h b/include/linux/ahci_platform.h
index f7dd576dd5a4..be3d9a77d6ed 100644
--- a/include/linux/ahci_platform.h
+++ b/include/linux/ahci_platform.h
@@ -15,11 +15,13 @@
#ifndef _AHCI_PLATFORM_H
#define _AHCI_PLATFORM_H
+#include <linux/compiler.h>
+
struct device;
struct ata_port_info;
struct ahci_platform_data {
- int (*init)(struct device *dev);
+ int (*init)(struct device *dev, void __iomem *addr);
void (*exit)(struct device *dev);
const struct ata_port_info *ata_port_info;
unsigned int force_port_map;
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 5ea3c60c160c..c37b21ad5a3b 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -292,6 +292,8 @@ clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec);
*/
extern int
__clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq);
+extern void
+__clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq);
static inline int clocksource_register_hz(struct clocksource *cs, u32 hz)
{
@@ -303,6 +305,15 @@ static inline int clocksource_register_khz(struct clocksource *cs, u32 khz)
return __clocksource_register_scale(cs, 1000, khz);
}
+static inline void __clocksource_updatefreq_hz(struct clocksource *cs, u32 hz)
+{
+ __clocksource_updatefreq_scale(cs, 1, hz);
+}
+
+static inline void __clocksource_updatefreq_khz(struct clocksource *cs, u32 khz)
+{
+ __clocksource_updatefreq_scale(cs, 1000, khz);
+}
static inline void
clocksource_calc_mult_shift(struct clocksource *cs, u32 freq, u32 minsec)
@@ -313,11 +324,13 @@ clocksource_calc_mult_shift(struct clocksource *cs, u32 freq, u32 minsec)
#ifdef CONFIG_GENERIC_TIME_VSYSCALL
extern void
-update_vsyscall(struct timespec *ts, struct clocksource *c, u32 mult);
+update_vsyscall(struct timespec *ts, struct timespec *wtm,
+ struct clocksource *c, u32 mult);
extern void update_vsyscall_tz(void);
#else
static inline void
-update_vsyscall(struct timespec *ts, struct clocksource *c, u32 mult)
+update_vsyscall(struct timespec *ts, struct timespec *wtm,
+ struct clocksource *c, u32 mult)
{
}
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index a5a472b10746..c1a62c56a660 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -16,6 +16,7 @@
# define __release(x) __context__(x,-1)
# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
# define __percpu __attribute__((noderef, address_space(3)))
+# define __rcu
extern void __chk_user_ptr(const volatile void __user *);
extern void __chk_io_ptr(const volatile void __iomem *);
#else
@@ -34,6 +35,7 @@ extern void __chk_io_ptr(const volatile void __iomem *);
# define __release(x) (void)0
# define __cond_lock(x,c) (c)
# define __percpu
+# define __rcu
#endif
#ifdef __KERNEL__
diff --git a/include/linux/console.h b/include/linux/console.h
index dcca5339ceb3..95cf6f08a59d 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -55,6 +55,16 @@ struct consw {
void (*con_invert_region)(struct vc_data *, u16 *, int);
u16 *(*con_screen_pos)(struct vc_data *, int);
unsigned long (*con_getxy)(struct vc_data *, unsigned long, int *, int *);
+ /*
+ * Prepare the console for the debugger. This includes, but is not
+ * limited to, unblanking the console, loading an appropriate
+ * palette, and allowing debugger generated output.
+ */
+ int (*con_debug_enter)(struct vc_data *);
+ /*
+ * Restore the console to its pre-debug state as closely as possible.
+ */
+ int (*con_debug_leave)(struct vc_data *);
};
extern const struct consw *conswitchp;
@@ -69,6 +79,14 @@ int register_con_driver(const struct consw *csw, int first, int last);
int unregister_con_driver(const struct consw *csw);
int take_over_console(const struct consw *sw, int first, int last, int deflt);
void give_up_console(const struct consw *sw);
+#ifdef CONFIG_HW_CONSOLE
+int con_debug_enter(struct vc_data *vc);
+int con_debug_leave(void);
+#else
+#define con_debug_enter(vc) (0)
+#define con_debug_leave() (0)
+#endif
+
/* scroll */
#define SM_UP (1)
#define SM_DOWN (2)
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index e287863ac053..4823af64e9db 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -48,6 +48,33 @@ extern ssize_t arch_cpu_release(const char *, size_t);
#endif
struct notifier_block;
+/*
+ * CPU notifier priorities.
+ */
+enum {
+ /*
+ * SCHED_ACTIVE marks a cpu which is coming up active during
+ * CPU_ONLINE and CPU_DOWN_FAILED and must be the first
+ * notifier. CPUSET_ACTIVE adjusts cpuset according to
+ * cpu_active mask right after SCHED_ACTIVE. During
+ * CPU_DOWN_PREPARE, SCHED_INACTIVE and CPUSET_INACTIVE are
+ * ordered in the similar way.
+ *
+ * This ordering guarantees consistent cpu_active mask and
+ * migration behavior to all cpu notifiers.
+ */
+ CPU_PRI_SCHED_ACTIVE = INT_MAX,
+ CPU_PRI_CPUSET_ACTIVE = INT_MAX - 1,
+ CPU_PRI_SCHED_INACTIVE = INT_MIN + 1,
+ CPU_PRI_CPUSET_INACTIVE = INT_MIN,
+
+ /* migration should happen before other stuff but after perf */
+ CPU_PRI_PERF = 20,
+ CPU_PRI_MIGRATION = 10,
+ /* prepare workqueues for other notifiers */
+ CPU_PRI_WORKQUEUE = 5,
+};
+
#ifdef CONFIG_SMP
/* Need to know about CPUs going up/down? */
#if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE)
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 457ed765a116..f20eb8f16025 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -20,6 +20,7 @@ extern int number_of_cpusets; /* How many cpusets are defined in system? */
extern int cpuset_init(void);
extern void cpuset_init_smp(void);
+extern void cpuset_update_active_cpus(void);
extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
extern int cpuset_cpus_allowed_fallback(struct task_struct *p);
extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
@@ -132,6 +133,11 @@ static inline void set_mems_allowed(nodemask_t nodemask)
static inline int cpuset_init(void) { return 0; }
static inline void cpuset_init_smp(void) {}
+static inline void cpuset_update_active_cpus(void)
+{
+ partition_sched_domains(1, NULL, NULL);
+}
+
static inline void cpuset_cpus_allowed(struct task_struct *p,
struct cpumask *mask)
{
diff --git a/include/linux/delay.h b/include/linux/delay.h
index fd832c6d419e..a6ecb34cf547 100644
--- a/include/linux/delay.h
+++ b/include/linux/delay.h
@@ -45,6 +45,7 @@ extern unsigned long lpj_fine;
void calibrate_delay(void);
void msleep(unsigned int msecs);
unsigned long msleep_interruptible(unsigned int msecs);
+void usleep_range(unsigned long min, unsigned long max);
static inline void ssleep(unsigned int seconds)
{
diff --git a/include/linux/device.h b/include/linux/device.h
index 6a8276f683b6..516fecacf27b 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -84,9 +84,8 @@ struct device *bus_find_device_by_name(struct bus_type *bus,
struct device *start,
const char *name);
-int __must_check bus_for_each_drv(struct bus_type *bus,
- struct device_driver *start, void *data,
- int (*fn)(struct device_driver *, void *));
+int bus_for_each_drv(struct bus_type *bus, struct device_driver *start,
+ void *data, int (*fn)(struct device_driver *, void *));
void bus_sort_breadthfirst(struct bus_type *bus,
int (*compare)(const struct device *a,
@@ -110,10 +109,12 @@ extern int bus_unregister_notifier(struct bus_type *bus,
*/
#define BUS_NOTIFY_ADD_DEVICE 0x00000001 /* device added */
#define BUS_NOTIFY_DEL_DEVICE 0x00000002 /* device removed */
-#define BUS_NOTIFY_BOUND_DRIVER 0x00000003 /* driver bound to device */
-#define BUS_NOTIFY_UNBIND_DRIVER 0x00000004 /* driver about to be
+#define BUS_NOTIFY_BIND_DRIVER 0x00000003 /* driver about to be
+ bound */
+#define BUS_NOTIFY_BOUND_DRIVER 0x00000004 /* driver bound to device */
+#define BUS_NOTIFY_UNBIND_DRIVER 0x00000005 /* driver about to be
unbound */
-#define BUS_NOTIFY_UNBOUND_DRIVER 0x00000005 /* driver is unbound
+#define BUS_NOTIFY_UNBOUND_DRIVER 0x00000006 /* driver is unbound
from the device */
extern struct kset *bus_get_kset(struct bus_type *bus);
@@ -551,7 +552,7 @@ extern int device_for_each_child(struct device *dev, void *data,
int (*fn)(struct device *dev, void *data));
extern struct device *device_find_child(struct device *dev, void *data,
int (*match)(struct device *dev, void *data));
-extern int device_rename(struct device *dev, char *new_name);
+extern int device_rename(struct device *dev, const char *new_name);
extern int device_move(struct device *dev, struct device *new_parent,
enum dpm_order dpm_order);
extern const char *device_get_devnode(struct device *dev,
diff --git a/include/linux/dmi.h b/include/linux/dmi.h
index a8a3e1ac281d..90e087f8d951 100644
--- a/include/linux/dmi.h
+++ b/include/linux/dmi.h
@@ -20,6 +20,7 @@ enum dmi_device_type {
DMI_DEV_TYPE_SAS,
DMI_DEV_TYPE_IPMI = -1,
DMI_DEV_TYPE_OEM_STRING = -2,
+ DMI_DEV_TYPE_DEV_ONBOARD = -3,
};
struct dmi_header {
@@ -37,6 +38,14 @@ struct dmi_device {
#ifdef CONFIG_DMI
+struct dmi_dev_onboard {
+ struct dmi_device dev;
+ int instance;
+ int segment;
+ int bus;
+ int devfn;
+};
+
extern int dmi_check_system(const struct dmi_system_id *list);
const struct dmi_system_id *dmi_first_match(const struct dmi_system_id *list);
extern const char * dmi_get_system_info(int field);
diff --git a/include/linux/fb.h b/include/linux/fb.h
index e7445df44d6c..0c5659c41b01 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -3,6 +3,9 @@
#include <linux/types.h>
#include <linux/i2c.h>
+#ifdef __KERNEL__
+#include <linux/kgdb.h>
+#endif /* __KERNEL__ */
/* Definitions of frame buffers */
@@ -607,6 +610,12 @@ struct fb_deferred_io {
* LOCKING NOTE: those functions must _ALL_ be called with the console
* semaphore held, this is the only suitable locking mechanism we have
* in 2.6. Some may be called at interrupt time at this point though.
+ *
+ * The exception to this is the debug related hooks. Putting the fb
+ * into a debug state (e.g. flipping to the kernel console) and restoring
+ * it must be done in a lock-free manner, so low level drivers should
+ * keep track of the initial console (if applicable) and may need to
+ * perform direct, unlocked hardware writes in these hooks.
*/
struct fb_ops {
@@ -676,6 +685,10 @@ struct fb_ops {
/* teardown any resources to do with this framebuffer */
void (*fb_destroy)(struct fb_info *info);
+
+ /* called at KDB enter and leave time to prepare the console */
+ int (*fb_debug_enter)(struct fb_info *info);
+ int (*fb_debug_leave)(struct fb_info *info);
};
#ifdef CONFIG_FB_TILEBLITTING
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
index d147461bc271..f59ed297b661 100644
--- a/include/linux/fdtable.h
+++ b/include/linux/fdtable.h
@@ -11,6 +11,7 @@
#include <linux/rcupdate.h>
#include <linux/types.h>
#include <linux/init.h>
+#include <linux/fs.h>
#include <asm/atomic.h>
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
index c57db27ac861..b8581c09d19f 100644
--- a/include/linux/fscache-cache.h
+++ b/include/linux/fscache-cache.h
@@ -20,7 +20,7 @@
#include <linux/fscache.h>
#include <linux/sched.h>
-#include <linux/slow-work.h>
+#include <linux/workqueue.h>
#define NR_MAXCACHES BITS_PER_LONG
@@ -76,18 +76,14 @@ typedef void (*fscache_operation_release_t)(struct fscache_operation *op);
typedef void (*fscache_operation_processor_t)(struct fscache_operation *op);
struct fscache_operation {
- union {
- struct work_struct fast_work; /* record for fast ops */
- struct slow_work slow_work; /* record for (very) slow ops */
- };
+ struct work_struct work; /* record for async ops */
struct list_head pend_link; /* link in object->pending_ops */
struct fscache_object *object; /* object to be operated upon */
unsigned long flags;
#define FSCACHE_OP_TYPE 0x000f /* operation type */
-#define FSCACHE_OP_FAST 0x0001 /* - fast op, processor may not sleep for disk */
-#define FSCACHE_OP_SLOW 0x0002 /* - (very) slow op, processor may sleep for disk */
-#define FSCACHE_OP_MYTHREAD 0x0003 /* - processing is done be issuing thread, not pool */
+#define FSCACHE_OP_ASYNC 0x0001 /* - async op, processor may sleep for disk */
+#define FSCACHE_OP_MYTHREAD 0x0002 /* - processing is done be issuing thread, not pool */
#define FSCACHE_OP_WAITING 4 /* cleared when op is woken */
#define FSCACHE_OP_EXCLUSIVE 5 /* exclusive op, other ops must wait */
#define FSCACHE_OP_DEAD 6 /* op is now dead */
@@ -105,7 +101,8 @@ struct fscache_operation {
/* operation releaser */
fscache_operation_release_t release;
-#ifdef CONFIG_SLOW_WORK_DEBUG
+#ifdef CONFIG_WORKQUEUE_DEBUGFS
+ struct work_struct put_work; /* work to delay operation put */
const char *name; /* operation name */
const char *state; /* operation state */
#define fscache_set_op_name(OP, N) do { (OP)->name = (N); } while(0)
@@ -117,7 +114,7 @@ struct fscache_operation {
};
extern atomic_t fscache_op_debug_id;
-extern const struct slow_work_ops fscache_op_slow_work_ops;
+extern void fscache_op_work_func(struct work_struct *work);
extern void fscache_enqueue_operation(struct fscache_operation *);
extern void fscache_put_operation(struct fscache_operation *);
@@ -128,33 +125,21 @@ extern void fscache_put_operation(struct fscache_operation *);
* @release: The release function to assign
*
* Do basic initialisation of an operation. The caller must still set flags,
- * object, either fast_work or slow_work if necessary, and processor if needed.
+ * object and processor if needed.
*/
static inline void fscache_operation_init(struct fscache_operation *op,
- fscache_operation_release_t release)
+ fscache_operation_processor_t processor,
+ fscache_operation_release_t release)
{
+ INIT_WORK(&op->work, fscache_op_work_func);
atomic_set(&op->usage, 1);
op->debug_id = atomic_inc_return(&fscache_op_debug_id);
+ op->processor = processor;
op->release = release;
INIT_LIST_HEAD(&op->pend_link);
fscache_set_op_state(op, "Init");
}
-/**
- * fscache_operation_init_slow - Do additional initialisation of a slow op
- * @op: The operation to initialise
- * @processor: The processor function to assign
- *
- * Do additional initialisation of an operation as required for slow work.
- */
-static inline
-void fscache_operation_init_slow(struct fscache_operation *op,
- fscache_operation_processor_t processor)
-{
- op->processor = processor;
- slow_work_init(&op->slow_work, &fscache_op_slow_work_ops);
-}
-
/*
* data read operation
*/
@@ -389,7 +374,7 @@ struct fscache_object {
struct fscache_cache *cache; /* cache that supplied this object */
struct fscache_cookie *cookie; /* netfs's file/index object */
struct fscache_object *parent; /* parent object */
- struct slow_work work; /* attention scheduling record */
+ struct work_struct work; /* attention scheduling record */
struct list_head dependents; /* FIFO of dependent objects */
struct list_head dep_link; /* link in parent's dependents list */
struct list_head pending_ops; /* unstarted operations on this object */
@@ -411,7 +396,7 @@ extern const char *fscache_object_states[];
(test_bit(FSCACHE_IOERROR, &(obj)->cache->flags) && \
(obj)->state >= FSCACHE_OBJECT_DYING)
-extern const struct slow_work_ops fscache_object_slow_work_ops;
+extern void fscache_object_work_func(struct work_struct *work);
/**
* fscache_object_init - Initialise a cache object description
@@ -433,7 +418,7 @@ void fscache_object_init(struct fscache_object *object,
spin_lock_init(&object->lock);
INIT_LIST_HEAD(&object->cache_link);
INIT_HLIST_NODE(&object->cookie_link);
- vslow_work_init(&object->work, &fscache_object_slow_work_ops);
+ INIT_WORK(&object->work, fscache_object_work_func);
INIT_LIST_HEAD(&object->dependents);
INIT_LIST_HEAD(&object->dep_link);
INIT_LIST_HEAD(&object->pending_ops);
@@ -534,6 +519,8 @@ extern void fscache_io_error(struct fscache_cache *cache);
extern void fscache_mark_pages_cached(struct fscache_retrieval *op,
struct pagevec *pagevec);
+extern bool fscache_object_sleep_till_congested(signed long *timeoutp);
+
extern enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
const void *data,
uint16_t datalen);
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 41e46330d9be..dcd6a7c3a435 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -1,3 +1,8 @@
+/*
+ * Ftrace header. For implementation details beyond the random comments
+ * scattered below, see: Documentation/trace/ftrace-design.txt
+ */
+
#ifndef _LINUX_FTRACE_H
#define _LINUX_FTRACE_H
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 3167f2df4126..02b8b24f8f51 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -11,8 +11,6 @@ struct trace_array;
struct tracer;
struct dentry;
-DECLARE_PER_CPU(struct trace_seq, ftrace_event_seq);
-
struct trace_print_flags {
unsigned long mask;
const char *name;
@@ -58,6 +56,9 @@ struct trace_iterator {
struct ring_buffer_iter *buffer_iter[NR_CPUS];
unsigned long iter_flags;
+ /* trace_seq for __print_flags() and __print_symbolic() etc. */
+ struct trace_seq tmp_seq;
+
/* The below is zeroed out in pipe_read */
struct trace_seq seq;
struct trace_entry *ent;
@@ -146,14 +147,19 @@ struct ftrace_event_class {
int (*raw_init)(struct ftrace_event_call *);
};
+extern int ftrace_event_reg(struct ftrace_event_call *event,
+ enum trace_reg type);
+
enum {
TRACE_EVENT_FL_ENABLED_BIT,
TRACE_EVENT_FL_FILTERED_BIT,
+ TRACE_EVENT_FL_RECORDED_CMD_BIT,
};
enum {
- TRACE_EVENT_FL_ENABLED = (1 << TRACE_EVENT_FL_ENABLED_BIT),
- TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT),
+ TRACE_EVENT_FL_ENABLED = (1 << TRACE_EVENT_FL_ENABLED_BIT),
+ TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT),
+ TRACE_EVENT_FL_RECORDED_CMD = (1 << TRACE_EVENT_FL_RECORDED_CMD_BIT),
};
struct ftrace_event_call {
@@ -171,6 +177,7 @@ struct ftrace_event_call {
* 32 bit flags:
* bit 1: enabled
* bit 2: filter_active
+ * bit 3: enabled cmd record
*
* Changes to flags must hold the event_mutex.
*
@@ -257,8 +264,7 @@ static inline void
perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr,
u64 count, struct pt_regs *regs, void *head)
{
- perf_tp_event(addr, count, raw_data, size, regs, head);
- perf_swevent_put_recursion_context(rctx);
+ perf_tp_event(addr, count, raw_data, size, regs, head, rctx);
}
#endif
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index c2331138ca1b..a0384a4d1e6f 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -53,16 +53,21 @@
* IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
* Used by threaded interrupts which need to keep the
* irq line disabled until the threaded handler has been run.
+ * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend
+ *
*/
#define IRQF_DISABLED 0x00000020
#define IRQF_SAMPLE_RANDOM 0x00000040
#define IRQF_SHARED 0x00000080
#define IRQF_PROBE_SHARED 0x00000100
-#define IRQF_TIMER 0x00000200
+#define __IRQF_TIMER 0x00000200
#define IRQF_PERCPU 0x00000400
#define IRQF_NOBALANCING 0x00000800
#define IRQF_IRQPOLL 0x00001000
#define IRQF_ONESHOT 0x00002000
+#define IRQF_NO_SUSPEND 0x00004000
+
+#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND)
/*
* Bits used by threaded handlers:
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h
index 25085ddd955f..e0ea40f6c515 100644
--- a/include/linux/io-mapping.h
+++ b/include/linux/io-mapping.h
@@ -79,7 +79,9 @@ io_mapping_free(struct io_mapping *mapping)
/* Atomic map/unmap */
static inline void *
-io_mapping_map_atomic_wc(struct io_mapping *mapping, unsigned long offset)
+io_mapping_map_atomic_wc(struct io_mapping *mapping,
+ unsigned long offset,
+ int slot)
{
resource_size_t phys_addr;
unsigned long pfn;
@@ -87,13 +89,13 @@ io_mapping_map_atomic_wc(struct io_mapping *mapping, unsigned long offset)
BUG_ON(offset >= mapping->size);
phys_addr = mapping->base + offset;
pfn = (unsigned long) (phys_addr >> PAGE_SHIFT);
- return iomap_atomic_prot_pfn(pfn, KM_USER0, mapping->prot);
+ return iomap_atomic_prot_pfn(pfn, slot, mapping->prot);
}
static inline void
-io_mapping_unmap_atomic(void *vaddr)
+io_mapping_unmap_atomic(void *vaddr, int slot)
{
- iounmap_atomic(vaddr, KM_USER0);
+ iounmap_atomic(vaddr, slot);
}
static inline void *
@@ -133,13 +135,15 @@ io_mapping_free(struct io_mapping *mapping)
/* Atomic map/unmap */
static inline void *
-io_mapping_map_atomic_wc(struct io_mapping *mapping, unsigned long offset)
+io_mapping_map_atomic_wc(struct io_mapping *mapping,
+ unsigned long offset,
+ int slot)
{
return ((char *) mapping) + offset;
}
static inline void
-io_mapping_unmap_atomic(void *vaddr)
+io_mapping_unmap_atomic(void *vaddr, int slot)
{
}
diff --git a/include/linux/io.h b/include/linux/io.h
index 6c7f0ba0d5fa..7fd2d2138bf3 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -29,10 +29,10 @@ void __iowrite64_copy(void __iomem *to, const void *from, size_t count);
#ifdef CONFIG_MMU
int ioremap_page_range(unsigned long addr, unsigned long end,
- unsigned long phys_addr, pgprot_t prot);
+ phys_addr_t phys_addr, pgprot_t prot);
#else
static inline int ioremap_page_range(unsigned long addr, unsigned long end,
- unsigned long phys_addr, pgprot_t prot)
+ phys_addr_t phys_addr, pgprot_t prot)
{
return 0;
}
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index be22ad83689c..0a2ba4098996 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -30,6 +30,7 @@ struct iommu_domain {
};
#define IOMMU_CAP_CACHE_COHERENCY 0x1
+#define IOMMU_CAP_INTR_REMAP 0x2 /* isolates device intrs */
struct iommu_ops {
int (*domain_init)(struct iommu_domain *domain);
diff --git a/include/linux/kdb.h b/include/linux/kdb.h
index ccb2b3ec0fe8..ea6e5244ed3f 100644
--- a/include/linux/kdb.h
+++ b/include/linux/kdb.h
@@ -114,4 +114,8 @@ enum {
KDB_INIT_EARLY,
KDB_INIT_FULL,
};
+
+extern int kdbgetintenv(const char *, int *);
+extern int kdb_set(int, const char **);
+
#endif /* !_KDB_H */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 5de838b0fc1a..7d5b10ff63e0 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -252,6 +252,13 @@ extern struct pid *session_of_pgrp(struct pid *pgrp);
#define FW_WARN "[Firmware Warn]: "
#define FW_INFO "[Firmware Info]: "
+/*
+ * HW_ERR
+ * Add this to a message for hardware errors, so that user can report
+ * it to hardware vendor instead of LKML or software vendor.
+ */
+#define HW_ERR "[Hardware Error]: "
+
#ifdef CONFIG_PRINTK
asmlinkage int vprintk(const char *fmt, va_list args)
__attribute__ ((format (printf, 1, 0)));
@@ -513,9 +520,6 @@ extern void tracing_start(void);
extern void tracing_stop(void);
extern void ftrace_off_permanent(void);
-extern void
-ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3);
-
static inline void __attribute__ ((format (printf, 1, 2)))
____trace_printk_check_format(const char *fmt, ...)
{
@@ -591,8 +595,6 @@ __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode);
#else
-static inline void
-ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { }
static inline int
trace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 2)));
diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
index 9340f34d1bb5..cc96f0f23e04 100644
--- a/include/linux/kgdb.h
+++ b/include/linux/kgdb.h
@@ -90,6 +90,19 @@ struct kgdb_bkpt {
enum kgdb_bpstate state;
};
+struct dbg_reg_def_t {
+ char *name;
+ int size;
+ int offset;
+};
+
+#ifndef DBG_MAX_REG_NUM
+#define DBG_MAX_REG_NUM 0
+#else
+extern struct dbg_reg_def_t dbg_reg_def[];
+extern char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs);
+extern int dbg_set_reg(int regno, void *mem, struct pt_regs *regs);
+#endif
#ifndef KGDB_MAX_BREAKPOINTS
# define KGDB_MAX_BREAKPOINTS 1000
#endif
@@ -281,7 +294,7 @@ extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
extern struct kgdb_io *dbg_io_ops;
extern int kgdb_hex2long(char **ptr, unsigned long *long_val);
-extern int kgdb_mem2hex(char *mem, char *buf, int count);
+extern char *kgdb_mem2hex(char *mem, char *buf, int count);
extern int kgdb_hex2mem(char *buf, char *mem, int count);
extern int kgdb_isremovedbreak(unsigned long addr);
diff --git a/include/linux/kmemtrace.h b/include/linux/kmemtrace.h
deleted file mode 100644
index b616d3930c3b..000000000000
--- a/include/linux/kmemtrace.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright (C) 2008 Eduard - Gabriel Munteanu
- *
- * This file is released under GPL version 2.
- */
-
-#ifndef _LINUX_KMEMTRACE_H
-#define _LINUX_KMEMTRACE_H
-
-#ifdef __KERNEL__
-
-#include <trace/events/kmem.h>
-
-#ifdef CONFIG_KMEMTRACE
-extern void kmemtrace_init(void);
-#else
-static inline void kmemtrace_init(void)
-{
-}
-#endif
-
-#endif /* __KERNEL__ */
-
-#endif /* _LINUX_KMEMTRACE_H */
-
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index aabc8a13ba71..685ea65eb803 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -30,8 +30,73 @@ struct task_struct *kthread_create(int (*threadfn)(void *data),
void kthread_bind(struct task_struct *k, unsigned int cpu);
int kthread_stop(struct task_struct *k);
int kthread_should_stop(void);
+void *kthread_data(struct task_struct *k);
int kthreadd(void *unused);
extern struct task_struct *kthreadd_task;
+/*
+ * Simple work processor based on kthread.
+ *
+ * This provides easier way to make use of kthreads. A kthread_work
+ * can be queued and flushed using queue/flush_kthread_work()
+ * respectively. Queued kthread_works are processed by a kthread
+ * running kthread_worker_fn().
+ *
+ * A kthread_work can't be freed while it is executing.
+ */
+struct kthread_work;
+typedef void (*kthread_work_func_t)(struct kthread_work *work);
+
+struct kthread_worker {
+ spinlock_t lock;
+ struct list_head work_list;
+ struct task_struct *task;
+};
+
+struct kthread_work {
+ struct list_head node;
+ kthread_work_func_t func;
+ wait_queue_head_t done;
+ atomic_t flushing;
+ int queue_seq;
+ int done_seq;
+};
+
+#define KTHREAD_WORKER_INIT(worker) { \
+ .lock = SPIN_LOCK_UNLOCKED, \
+ .work_list = LIST_HEAD_INIT((worker).work_list), \
+ }
+
+#define KTHREAD_WORK_INIT(work, fn) { \
+ .node = LIST_HEAD_INIT((work).node), \
+ .func = (fn), \
+ .done = __WAIT_QUEUE_HEAD_INITIALIZER((work).done), \
+ .flushing = ATOMIC_INIT(0), \
+ }
+
+#define DEFINE_KTHREAD_WORKER(worker) \
+ struct kthread_worker worker = KTHREAD_WORKER_INIT(worker)
+
+#define DEFINE_KTHREAD_WORK(work, fn) \
+ struct kthread_work work = KTHREAD_WORK_INIT(work, fn)
+
+static inline void init_kthread_worker(struct kthread_worker *worker)
+{
+ *worker = (struct kthread_worker)KTHREAD_WORKER_INIT(*worker);
+}
+
+static inline void init_kthread_work(struct kthread_work *work,
+ kthread_work_func_t fn)
+{
+ *work = (struct kthread_work)KTHREAD_WORK_INIT(*work, fn);
+}
+
+int kthread_worker_fn(void *worker_ptr);
+
+bool queue_kthread_work(struct kthread_worker *worker,
+ struct kthread_work *work);
+void flush_kthread_work(struct kthread_work *work);
+void flush_kthread_worker(struct kthread_worker *worker);
+
#endif /* _LINUX_KTHREAD_H */
diff --git a/include/linux/libata.h b/include/linux/libata.h
index b85f3ff34d7d..f010f18a0f86 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -751,6 +751,7 @@ struct ata_port {
struct ata_host *host;
struct device *dev;
+ struct mutex scsi_scan_mutex;
struct delayed_work hotplug_task;
struct work_struct scsi_rescan_task;
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 6991ab5b24d1..91b05c171854 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -14,8 +14,10 @@ struct irq_desc;
extern void mask_msi_irq(unsigned int irq);
extern void unmask_msi_irq(unsigned int irq);
extern void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg);
+extern void get_cached_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg);
extern void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg);
extern void read_msi_msg(unsigned int irq, struct msi_msg *msg);
+extern void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg);
extern void write_msi_msg(unsigned int irq, struct msi_msg *msg);
struct msi_desc {
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index b752e807adde..06aab5eee134 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -20,10 +20,14 @@ extern void touch_nmi_watchdog(void);
extern void acpi_nmi_disable(void);
extern void acpi_nmi_enable(void);
#else
+#ifndef CONFIG_HARDLOCKUP_DETECTOR
static inline void touch_nmi_watchdog(void)
{
touch_softlockup_watchdog();
}
+#else
+extern void touch_nmi_watchdog(void);
+#endif
static inline void acpi_nmi_disable(void) { }
static inline void acpi_nmi_enable(void) { }
#endif
@@ -47,4 +51,13 @@ static inline bool trigger_all_cpu_backtrace(void)
}
#endif
+#ifdef CONFIG_LOCKUP_DETECTOR
+int hw_nmi_is_cpu_stuck(struct pt_regs *);
+u64 hw_nmi_get_sample_period(void);
+extern int watchdog_enabled;
+struct ctl_table;
+extern int proc_dowatchdog_enabled(struct ctl_table *, int ,
+ void __user *, size_t *, loff_t *);
+#endif
+
#endif
diff --git a/include/linux/of.h b/include/linux/of.h
index a367e19bb3af..cad7cf0ab278 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -70,6 +70,11 @@ extern struct device_node *allnodes;
extern struct device_node *of_chosen;
extern rwlock_t devtree_lock;
+static inline bool of_node_is_root(const struct device_node *node)
+{
+ return node && (node->parent == NULL);
+}
+
static inline int of_node_check_flag(struct device_node *n, unsigned long flag)
{
return test_bit(flag, &n->_flags);
@@ -141,6 +146,11 @@ static inline unsigned long of_read_ulong(const __be32 *cell, int size)
#define OF_BAD_ADDR ((u64)-1)
+#ifndef of_node_to_nid
+static inline int of_node_to_nid(struct device_node *np) { return -1; }
+#define of_node_to_nid of_node_to_nid
+#endif
+
extern struct device_node *of_find_node_by_name(struct device_node *from,
const char *name);
#define for_each_node_by_name(dn, name) \
diff --git a/include/linux/of_address.h b/include/linux/of_address.h
new file mode 100644
index 000000000000..8aea06f0564c
--- /dev/null
+++ b/include/linux/of_address.h
@@ -0,0 +1,44 @@
+#ifndef __OF_ADDRESS_H
+#define __OF_ADDRESS_H
+#include <linux/ioport.h>
+#include <linux/of.h>
+
+extern u64 of_translate_address(struct device_node *np, const u32 *addr);
+extern int of_address_to_resource(struct device_node *dev, int index,
+ struct resource *r);
+extern void __iomem *of_iomap(struct device_node *device, int index);
+
+/* Extract an address from a device, returns the region size and
+ * the address space flags too. The PCI version uses a BAR number
+ * instead of an absolute index
+ */
+extern const u32 *of_get_address(struct device_node *dev, int index,
+ u64 *size, unsigned int *flags);
+
+#ifndef pci_address_to_pio
+static inline unsigned long pci_address_to_pio(phys_addr_t addr) { return -1; }
+#define pci_address_to_pio pci_address_to_pio
+#endif
+
+#ifdef CONFIG_PCI
+extern const u32 *of_get_pci_address(struct device_node *dev, int bar_no,
+ u64 *size, unsigned int *flags);
+extern int of_pci_address_to_resource(struct device_node *dev, int bar,
+ struct resource *r);
+#else /* CONFIG_PCI */
+static inline int of_pci_address_to_resource(struct device_node *dev, int bar,
+ struct resource *r)
+{
+ return -ENOSYS;
+}
+
+static inline const u32 *of_get_pci_address(struct device_node *dev,
+ int bar_no, u64 *size, unsigned int *flags)
+{
+ return NULL;
+}
+#endif /* CONFIG_PCI */
+
+
+#endif /* __OF_ADDRESS_H */
+
diff --git a/include/linux/of_device.h b/include/linux/of_device.h
index 11651facc5f1..35aa44ad9f2c 100644
--- a/include/linux/of_device.h
+++ b/include/linux/of_device.h
@@ -1,32 +1,77 @@
#ifndef _LINUX_OF_DEVICE_H
#define _LINUX_OF_DEVICE_H
+/*
+ * The of_device *was* a kind of "base class" that was a superset of
+ * struct device for use by devices attached to an OF node and probed
+ * using OF properties. However, the important bit of OF-style
+ * probing, namely the device node pointer, has been moved into the
+ * common struct device when CONFIG_OF is set to make OF-style probing
+ * available to all bus types. So now, just make of_device and
+ * platform_device equivalent so that current of_platform bus users
+ * can be transparently migrated over to using the platform bus.
+ *
+ * This line will go away once all references to of_device are removed
+ * from the kernel.
+ */
+#define of_device platform_device
+#include <linux/platform_device.h>
+#include <linux/of_platform.h> /* temporary until merge */
+
#ifdef CONFIG_OF_DEVICE
#include <linux/device.h>
#include <linux/of.h>
#include <linux/mod_devicetable.h>
-#include <asm/of_device.h>
-
#define to_of_device(d) container_of(d, struct of_device, dev)
extern const struct of_device_id *of_match_device(
const struct of_device_id *matches, const struct device *dev);
+extern void of_device_make_bus_id(struct device *dev);
+
+/**
+ * of_driver_match_device - Tell if a driver's of_match_table matches a device.
+ * @drv: the device_driver structure to test
+ * @dev: the device structure to match against
+ */
+static inline int of_driver_match_device(const struct device *dev,
+ const struct device_driver *drv)
+{
+ return of_match_device(drv->of_match_table, dev) != NULL;
+}
-extern struct of_device *of_dev_get(struct of_device *dev);
-extern void of_dev_put(struct of_device *dev);
+extern struct platform_device *of_dev_get(struct platform_device *dev);
+extern void of_dev_put(struct platform_device *dev);
-extern int of_device_register(struct of_device *ofdev);
-extern void of_device_unregister(struct of_device *ofdev);
+extern int of_device_register(struct platform_device *ofdev);
+extern void of_device_unregister(struct platform_device *ofdev);
extern void of_release_dev(struct device *dev);
-static inline void of_device_free(struct of_device *dev)
+static inline void of_device_free(struct platform_device *dev)
{
of_release_dev(&dev->dev);
}
-extern ssize_t of_device_get_modalias(struct of_device *ofdev,
+extern ssize_t of_device_get_modalias(struct device *dev,
char *str, ssize_t len);
+
+extern int of_device_uevent(struct device *dev, struct kobj_uevent_env *env);
+
+
+#else /* CONFIG_OF_DEVICE */
+
+static inline int of_driver_match_device(struct device *dev,
+ struct device_driver *drv)
+{
+ return 0;
+}
+
+static inline int of_device_uevent(struct device *dev,
+ struct kobj_uevent_env *env)
+{
+ return -ENODEV;
+}
+
#endif /* CONFIG_OF_DEVICE */
#endif /* _LINUX_OF_DEVICE_H */
diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h
index fc2472c3c254..6598c04dab01 100644
--- a/include/linux/of_gpio.h
+++ b/include/linux/of_gpio.h
@@ -33,34 +33,17 @@ enum of_gpio_flags {
#ifdef CONFIG_OF_GPIO
/*
- * Generic OF GPIO chip
- */
-struct of_gpio_chip {
- struct gpio_chip gc;
- int gpio_cells;
- int (*xlate)(struct of_gpio_chip *of_gc, struct device_node *np,
- const void *gpio_spec, enum of_gpio_flags *flags);
-};
-
-static inline struct of_gpio_chip *to_of_gpio_chip(struct gpio_chip *gc)
-{
- return container_of(gc, struct of_gpio_chip, gc);
-}
-
-/*
* OF GPIO chip for memory mapped banks
*/
struct of_mm_gpio_chip {
- struct of_gpio_chip of_gc;
+ struct gpio_chip gc;
void (*save_regs)(struct of_mm_gpio_chip *mm_gc);
void __iomem *regs;
};
static inline struct of_mm_gpio_chip *to_of_mm_gpio_chip(struct gpio_chip *gc)
{
- struct of_gpio_chip *of_gc = to_of_gpio_chip(gc);
-
- return container_of(of_gc, struct of_mm_gpio_chip, of_gc);
+ return container_of(gc, struct of_mm_gpio_chip, gc);
}
extern int of_get_gpio_flags(struct device_node *np, int index,
@@ -69,11 +52,12 @@ extern unsigned int of_gpio_count(struct device_node *np);
extern int of_mm_gpiochip_add(struct device_node *np,
struct of_mm_gpio_chip *mm_gc);
-extern int of_gpio_simple_xlate(struct of_gpio_chip *of_gc,
- struct device_node *np,
- const void *gpio_spec,
- enum of_gpio_flags *flags);
-#else
+
+extern void of_gpiochip_add(struct gpio_chip *gc);
+extern void of_gpiochip_remove(struct gpio_chip *gc);
+extern struct gpio_chip *of_node_to_gpiochip(struct device_node *np);
+
+#else /* CONFIG_OF_GPIO */
/* Drivers may not strictly depend on the GPIO support, so let them link. */
static inline int of_get_gpio_flags(struct device_node *np, int index,
@@ -87,6 +71,9 @@ static inline unsigned int of_gpio_count(struct device_node *np)
return 0;
}
+static inline void of_gpiochip_add(struct gpio_chip *gc) { }
+static inline void of_gpiochip_remove(struct gpio_chip *gc) { }
+
#endif /* CONFIG_OF_GPIO */
/**
diff --git a/include/linux/of_i2c.h b/include/linux/of_i2c.h
index 34974b5a76f7..0efe8d465f55 100644
--- a/include/linux/of_i2c.h
+++ b/include/linux/of_i2c.h
@@ -12,12 +12,19 @@
#ifndef __LINUX_OF_I2C_H
#define __LINUX_OF_I2C_H
+#if defined(CONFIG_OF_I2C) || defined(CONFIG_OF_I2C_MODULE)
#include <linux/i2c.h>
-void of_register_i2c_devices(struct i2c_adapter *adap,
- struct device_node *adap_node);
+extern void of_i2c_register_devices(struct i2c_adapter *adap);
/* must call put_device() when done with returned i2c_client device */
-struct i2c_client *of_find_i2c_device_by_node(struct device_node *node);
+extern struct i2c_client *of_find_i2c_device_by_node(struct device_node *node);
+
+#else
+static inline void of_i2c_register_devices(struct i2c_adapter *adap)
+{
+ return;
+}
+#endif /* CONFIG_OF_I2C */
#endif /* __LINUX_OF_I2C_H */
diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h
new file mode 100644
index 000000000000..5929781c104d
--- /dev/null
+++ b/include/linux/of_irq.h
@@ -0,0 +1,70 @@
+#ifndef __OF_IRQ_H
+#define __OF_IRQ_H
+
+#if defined(CONFIG_OF)
+struct of_irq;
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/of.h>
+
+/*
+ * irq_of_parse_and_map() is used ba all OF enabled platforms; but SPARC
+ * implements it differently. However, the prototype is the same for all,
+ * so declare it here regardless of the CONFIG_OF_IRQ setting.
+ */
+extern unsigned int irq_of_parse_and_map(struct device_node *node, int index);
+
+#if defined(CONFIG_OF_IRQ)
+/**
+ * of_irq - container for device_node/irq_specifier pair for an irq controller
+ * @controller: pointer to interrupt controller device tree node
+ * @size: size of interrupt specifier
+ * @specifier: array of cells @size long specifing the specific interrupt
+ *
+ * This structure is returned when an interrupt is mapped. The controller
+ * field needs to be put() after use
+ */
+#define OF_MAX_IRQ_SPEC 4 /* We handle specifiers of at most 4 cells */
+struct of_irq {
+ struct device_node *controller; /* Interrupt controller node */
+ u32 size; /* Specifier size */
+ u32 specifier[OF_MAX_IRQ_SPEC]; /* Specifier copy */
+};
+
+/*
+ * Workarounds only applied to 32bit powermac machines
+ */
+#define OF_IMAP_OLDWORLD_MAC 0x00000001
+#define OF_IMAP_NO_PHANDLE 0x00000002
+
+#if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
+extern unsigned int of_irq_workarounds;
+extern struct device_node *of_irq_dflt_pic;
+extern int of_irq_map_oldworld(struct device_node *device, int index,
+ struct of_irq *out_irq);
+#else /* CONFIG_PPC32 && CONFIG_PPC_PMAC */
+#define of_irq_workarounds (0)
+#define of_irq_dflt_pic (NULL)
+static inline int of_irq_map_oldworld(struct device_node *device, int index,
+ struct of_irq *out_irq)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_PPC32 && CONFIG_PPC_PMAC */
+
+
+extern int of_irq_map_raw(struct device_node *parent, const u32 *intspec,
+ u32 ointsize, const u32 *addr,
+ struct of_irq *out_irq);
+extern int of_irq_map_one(struct device_node *device, int index,
+ struct of_irq *out_irq);
+extern unsigned int irq_create_of_mapping(struct device_node *controller,
+ const u32 *intspec,
+ unsigned int intsize);
+extern int of_irq_to_resource(struct device_node *dev, int index,
+ struct resource *r);
+
+#endif /* CONFIG_OF_IRQ */
+#endif /* CONFIG_OF */
+#endif /* __OF_IRQ_H */
diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h
index 1643d3761eb4..4e6d989c06df 100644
--- a/include/linux/of_platform.h
+++ b/include/linux/of_platform.h
@@ -17,29 +17,24 @@
#include <linux/mod_devicetable.h>
#include <linux/pm.h>
#include <linux/of_device.h>
-
-/*
- * The of_platform_bus_type is a bus type used by drivers that do not
- * attach to a macio or similar bus but still use OF probing
- * mechanism
- */
-extern struct bus_type of_platform_bus_type;
+#include <linux/platform_device.h>
/*
* An of_platform_driver driver is attached to a basic of_device on
- * the "platform bus" (of_platform_bus_type).
+ * the "platform bus" (platform_bus_type).
*/
struct of_platform_driver
{
- int (*probe)(struct of_device* dev,
+ int (*probe)(struct platform_device* dev,
const struct of_device_id *match);
- int (*remove)(struct of_device* dev);
+ int (*remove)(struct platform_device* dev);
- int (*suspend)(struct of_device* dev, pm_message_t state);
- int (*resume)(struct of_device* dev);
- int (*shutdown)(struct of_device* dev);
+ int (*suspend)(struct platform_device* dev, pm_message_t state);
+ int (*resume)(struct platform_device* dev);
+ int (*shutdown)(struct platform_device* dev);
struct device_driver driver;
+ struct platform_driver platform_driver;
};
#define to_of_platform_driver(drv) \
container_of(drv,struct of_platform_driver, driver)
@@ -49,20 +44,30 @@ extern int of_register_driver(struct of_platform_driver *drv,
extern void of_unregister_driver(struct of_platform_driver *drv);
/* Platform drivers register/unregister */
-static inline int of_register_platform_driver(struct of_platform_driver *drv)
-{
- return of_register_driver(drv, &of_platform_bus_type);
-}
-static inline void of_unregister_platform_driver(struct of_platform_driver *drv)
-{
- of_unregister_driver(drv);
-}
+extern int of_register_platform_driver(struct of_platform_driver *drv);
+extern void of_unregister_platform_driver(struct of_platform_driver *drv);
-#include <asm/of_platform.h>
-
-extern struct of_device *of_find_device_by_node(struct device_node *np);
+extern struct platform_device *of_device_alloc(struct device_node *np,
+ const char *bus_id,
+ struct device *parent);
+extern struct platform_device *of_find_device_by_node(struct device_node *np);
extern int of_bus_type_init(struct bus_type *bus, const char *name);
+
+#if !defined(CONFIG_SPARC) /* SPARC has its own device registration method */
+/* Platform devices and busses creation */
+extern struct platform_device *of_platform_device_create(struct device_node *np,
+ const char *bus_id,
+ struct device *parent);
+
+/* pseudo "matches" value to not do deep probe */
+#define OF_NO_DEEP_PROBE ((struct of_device_id *)-1)
+
+extern int of_platform_bus_probe(struct device_node *root,
+ const struct of_device_id *matches,
+ struct device *parent);
+#endif /* !CONFIG_SPARC */
+
#endif /* CONFIG_OF_DEVICE */
#endif /* _LINUX_OF_PLATFORM_H */
diff --git a/include/linux/of_spi.h b/include/linux/of_spi.h
index 5f71ee8c0868..9e3e70f78ae6 100644
--- a/include/linux/of_spi.h
+++ b/include/linux/of_spi.h
@@ -9,10 +9,15 @@
#ifndef __LINUX_OF_SPI_H
#define __LINUX_OF_SPI_H
-#include <linux/of.h>
#include <linux/spi/spi.h>
-extern void of_register_spi_devices(struct spi_master *master,
- struct device_node *np);
+#if defined(CONFIG_OF_SPI) || defined(CONFIG_OF_SPI_MODULE)
+extern void of_register_spi_devices(struct spi_master *master);
+#else
+static inline void of_register_spi_devices(struct spi_master *master)
+{
+ return;
+}
+#endif /* CONFIG_OF_SPI */
#endif /* __LINUX_OF_SPI */
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 5b59f35dcb8f..6fa317801e1c 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -128,7 +128,6 @@ enum pageflags {
/* SLUB */
PG_slub_frozen = PG_active,
- PG_slub_debug = PG_error,
};
#ifndef __GENERATING_BOUNDS_H
@@ -215,7 +214,6 @@ PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked)
__PAGEFLAG(SlobFree, slob_free)
__PAGEFLAG(SlubFrozen, slub_frozen)
-__PAGEFLAG(SlubDebug, slub_debug)
/*
* Private page markings that may be used by the filesystem that owns the page
diff --git a/include/linux/pci.h b/include/linux/pci.h
index f26fda76b87f..b1d17956a153 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -270,6 +270,8 @@ struct pci_dev {
unsigned int d1_support:1; /* Low power state D1 is supported */
unsigned int d2_support:1; /* Low power state D2 is supported */
unsigned int no_d1d2:1; /* Only allow D0 and D3 */
+ unsigned int mmio_always_on:1; /* disallow turning off io/mem
+ decoding during bar sizing */
unsigned int wakeup_prepared:1;
unsigned int d3_delay; /* D3->D0 transition time in ms */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 33145408f045..c81eec4d3c35 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2325,9 +2325,11 @@
#define PCI_DEVICE_ID_JMICRON_JMB361 0x2361
#define PCI_DEVICE_ID_JMICRON_JMB362 0x2362
#define PCI_DEVICE_ID_JMICRON_JMB363 0x2363
+#define PCI_DEVICE_ID_JMICRON_JMB364 0x2364
#define PCI_DEVICE_ID_JMICRON_JMB365 0x2365
#define PCI_DEVICE_ID_JMICRON_JMB366 0x2366
#define PCI_DEVICE_ID_JMICRON_JMB368 0x2368
+#define PCI_DEVICE_ID_JMICRON_JMB369 0x2369
#define PCI_DEVICE_ID_JMICRON_JMB38X_SD 0x2381
#define PCI_DEVICE_ID_JMICRON_JMB38X_MMC 0x2382
#define PCI_DEVICE_ID_JMICRON_JMB38X_MS 0x2383
@@ -2773,3 +2775,6 @@
#define PCI_DEVICE_ID_RME_DIGI32 0x9896
#define PCI_DEVICE_ID_RME_DIGI32_PRO 0x9897
#define PCI_DEVICE_ID_RME_DIGI32_8 0x9898
+
+#define PCI_VENDOR_ID_XEN 0x5853
+#define PCI_DEVICE_ID_XEN_PLATFORM 0x0001
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 5d0266d94985..716f99b682c1 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -214,8 +214,9 @@ struct perf_event_attr {
* See also PERF_RECORD_MISC_EXACT_IP
*/
precise_ip : 2, /* skid constraint */
+ mmap_data : 1, /* non-exec mmap data */
- __reserved_1 : 47;
+ __reserved_1 : 46;
union {
__u32 wakeup_events; /* wakeup every n events */
@@ -461,6 +462,7 @@ enum perf_callchain_context {
#ifdef CONFIG_PERF_EVENTS
# include <asm/perf_event.h>
+# include <asm/local64.h>
#endif
struct perf_guest_info_callbacks {
@@ -531,14 +533,16 @@ struct hw_perf_event {
struct hrtimer hrtimer;
};
#ifdef CONFIG_HAVE_HW_BREAKPOINT
- /* breakpoint */
- struct arch_hw_breakpoint info;
+ struct { /* breakpoint */
+ struct arch_hw_breakpoint info;
+ struct list_head bp_list;
+ };
#endif
};
- atomic64_t prev_count;
+ local64_t prev_count;
u64 sample_period;
u64 last_period;
- atomic64_t period_left;
+ local64_t period_left;
u64 interrupts;
u64 freq_time_stamp;
@@ -548,7 +552,10 @@ struct hw_perf_event {
struct perf_event;
-#define PERF_EVENT_TXN_STARTED 1
+/*
+ * Common implementation detail of pmu::{start,commit,cancel}_txn
+ */
+#define PERF_EVENT_TXN 0x1
/**
* struct pmu - generic performance monitoring unit
@@ -562,14 +569,28 @@ struct pmu {
void (*unthrottle) (struct perf_event *event);
/*
- * group events scheduling is treated as a transaction,
- * add group events as a whole and perform one schedulability test.
- * If test fails, roll back the whole group
+ * Group events scheduling is treated as a transaction, add group
+ * events as a whole and perform one schedulability test. If the test
+ * fails, roll back the whole group
*/
+ /*
+ * Start the transaction, after this ->enable() doesn't need
+ * to do schedulability tests.
+ */
void (*start_txn) (const struct pmu *pmu);
- void (*cancel_txn) (const struct pmu *pmu);
+ /*
+ * If ->start_txn() disabled the ->enable() schedulability test
+ * then ->commit_txn() is required to perform one. On success
+ * the transaction is closed. On error the transaction is kept
+ * open until ->cancel_txn() is called.
+ */
int (*commit_txn) (const struct pmu *pmu);
+ /*
+ * Will cancel the transaction, assumes ->disable() is called for
+ * each successfull ->enable() during the transaction.
+ */
+ void (*cancel_txn) (const struct pmu *pmu);
};
/**
@@ -584,7 +605,9 @@ enum perf_event_active_state {
struct file;
-struct perf_mmap_data {
+#define PERF_BUFFER_WRITABLE 0x01
+
+struct perf_buffer {
atomic_t refcount;
struct rcu_head rcu_head;
#ifdef CONFIG_PERF_USE_VMALLOC
@@ -650,7 +673,8 @@ struct perf_event {
enum perf_event_active_state state;
unsigned int attach_state;
- atomic64_t count;
+ local64_t count;
+ atomic64_t child_count;
/*
* These are the total time in nanoseconds that the event
@@ -709,7 +733,7 @@ struct perf_event {
atomic_t mmap_count;
int mmap_locked;
struct user_struct *mmap_user;
- struct perf_mmap_data *data;
+ struct perf_buffer *buffer;
/* poll related */
wait_queue_head_t waitq;
@@ -807,7 +831,7 @@ struct perf_cpu_context {
struct perf_output_handle {
struct perf_event *event;
- struct perf_mmap_data *data;
+ struct perf_buffer *buffer;
unsigned long wakeup;
unsigned long size;
void *addr;
@@ -910,8 +934,10 @@ extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64);
-extern void
-perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip);
+#ifndef perf_arch_fetch_caller_regs
+static inline void
+perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
+#endif
/*
* Take a snapshot of the regs. Skip ip and frame pointer to
@@ -921,31 +947,11 @@ perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip);
* - bp for callchains
* - eflags, for future purposes, just in case
*/
-static inline void perf_fetch_caller_regs(struct pt_regs *regs, int skip)
+static inline void perf_fetch_caller_regs(struct pt_regs *regs)
{
- unsigned long ip;
-
memset(regs, 0, sizeof(*regs));
- switch (skip) {
- case 1 :
- ip = CALLER_ADDR0;
- break;
- case 2 :
- ip = CALLER_ADDR1;
- break;
- case 3 :
- ip = CALLER_ADDR2;
- break;
- case 4:
- ip = CALLER_ADDR3;
- break;
- /* No need to support further for now */
- default:
- ip = 0;
- }
-
- return perf_arch_fetch_caller_regs(regs, ip, skip);
+ perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
}
static inline void
@@ -955,21 +961,14 @@ perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
struct pt_regs hot_regs;
if (!regs) {
- perf_fetch_caller_regs(&hot_regs, 1);
+ perf_fetch_caller_regs(&hot_regs);
regs = &hot_regs;
}
__perf_sw_event(event_id, nr, nmi, regs, addr);
}
}
-extern void __perf_event_mmap(struct vm_area_struct *vma);
-
-static inline void perf_event_mmap(struct vm_area_struct *vma)
-{
- if (vma->vm_flags & VM_EXEC)
- __perf_event_mmap(vma);
-}
-
+extern void perf_event_mmap(struct vm_area_struct *vma);
extern struct perf_guest_info_callbacks *perf_guest_cbs;
extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
@@ -1001,7 +1000,7 @@ static inline bool perf_paranoid_kernel(void)
extern void perf_event_init(void);
extern void perf_tp_event(u64 addr, u64 count, void *record,
int entry_size, struct pt_regs *regs,
- struct hlist_head *head);
+ struct hlist_head *head, int rctx);
extern void perf_bp_event(struct perf_event *event, void *data);
#ifndef perf_misc_flags
@@ -1068,7 +1067,7 @@ static inline void perf_event_disable(struct perf_event *event) { }
#define perf_cpu_notifier(fn) \
do { \
static struct notifier_block fn##_nb __cpuinitdata = \
- { .notifier_call = fn, .priority = 20 }; \
+ { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \
(void *)(unsigned long)smp_processor_id()); \
fn(&fn##_nb, (unsigned long)CPU_STARTING, \
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index 5417944d3687..d7ecad0093bb 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -43,10 +43,64 @@ extern struct resource *platform_get_resource_byname(struct platform_device *, u
extern int platform_get_irq_byname(struct platform_device *, const char *);
extern int platform_add_devices(struct platform_device **, int);
-extern struct platform_device *platform_device_register_simple(const char *, int id,
- const struct resource *, unsigned int);
-extern struct platform_device *platform_device_register_data(struct device *,
- const char *, int, const void *, size_t);
+extern struct platform_device *platform_device_register_resndata(
+ struct device *parent, const char *name, int id,
+ const struct resource *res, unsigned int num,
+ const void *data, size_t size);
+
+/**
+ * platform_device_register_simple - add a platform-level device and its resources
+ * @name: base name of the device we're adding
+ * @id: instance id
+ * @res: set of resources that needs to be allocated for the device
+ * @num: number of resources
+ *
+ * This function creates a simple platform device that requires minimal
+ * resource and memory management. Canned release function freeing memory
+ * allocated for the device allows drivers using such devices to be
+ * unloaded without waiting for the last reference to the device to be
+ * dropped.
+ *
+ * This interface is primarily intended for use with legacy drivers which
+ * probe hardware directly. Because such drivers create sysfs device nodes
+ * themselves, rather than letting system infrastructure handle such device
+ * enumeration tasks, they don't fully conform to the Linux driver model.
+ * In particular, when such drivers are built as modules, they can't be
+ * "hotplugged".
+ *
+ * Returns &struct platform_device pointer on success, or ERR_PTR() on error.
+ */
+static inline struct platform_device *platform_device_register_simple(
+ const char *name, int id,
+ const struct resource *res, unsigned int num)
+{
+ return platform_device_register_resndata(NULL, name, id,
+ res, num, NULL, 0);
+}
+
+/**
+ * platform_device_register_data - add a platform-level device with platform-specific data
+ * @parent: parent device for the device we're adding
+ * @name: base name of the device we're adding
+ * @id: instance id
+ * @data: platform specific data for this platform device
+ * @size: size of platform specific data
+ *
+ * This function creates a simple platform device that requires minimal
+ * resource and memory management. Canned release function freeing memory
+ * allocated for the device allows drivers using such devices to be
+ * unloaded without waiting for the last reference to the device to be
+ * dropped.
+ *
+ * Returns &struct platform_device pointer on success, or ERR_PTR() on error.
+ */
+static inline struct platform_device *platform_device_register_data(
+ struct device *parent, const char *name, int id,
+ const void *data, size_t size)
+{
+ return platform_device_register_resndata(parent, name, id,
+ NULL, 0, data, size);
+}
extern struct platform_device *platform_device_alloc(const char *name, int id);
extern int platform_device_add_resources(struct platform_device *pdev,
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index b653b4aaa8a6..9fbc54a2585d 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -40,6 +40,7 @@
#include <linux/seqlock.h>
#include <linux/lockdep.h>
#include <linux/completion.h>
+#include <linux/debugobjects.h>
#ifdef CONFIG_RCU_TORTURE_TEST
extern int rcutorture_runnable; /* for sysctl */
@@ -79,6 +80,16 @@ extern void rcu_init(void);
(ptr)->next = NULL; (ptr)->func = NULL; \
} while (0)
+/*
+ * init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic
+ * initialization and destruction of rcu_head on the stack. rcu_head structures
+ * allocated dynamically in the heap or defined statically don't need any
+ * initialization.
+ */
+#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
+extern void init_rcu_head_on_stack(struct rcu_head *head);
+extern void destroy_rcu_head_on_stack(struct rcu_head *head);
+#else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
static inline void init_rcu_head_on_stack(struct rcu_head *head)
{
}
@@ -86,6 +97,7 @@ static inline void init_rcu_head_on_stack(struct rcu_head *head)
static inline void destroy_rcu_head_on_stack(struct rcu_head *head)
{
}
+#endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -517,4 +529,74 @@ extern void call_rcu(struct rcu_head *head,
extern void call_rcu_bh(struct rcu_head *head,
void (*func)(struct rcu_head *head));
+/*
+ * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally
+ * by call_rcu() and rcu callback execution, and are therefore not part of the
+ * RCU API. Leaving in rcupdate.h because they are used by all RCU flavors.
+ */
+
+#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
+# define STATE_RCU_HEAD_READY 0
+# define STATE_RCU_HEAD_QUEUED 1
+
+extern struct debug_obj_descr rcuhead_debug_descr;
+
+static inline void debug_rcu_head_queue(struct rcu_head *head)
+{
+ debug_object_activate(head, &rcuhead_debug_descr);
+ debug_object_active_state(head, &rcuhead_debug_descr,
+ STATE_RCU_HEAD_READY,
+ STATE_RCU_HEAD_QUEUED);
+}
+
+static inline void debug_rcu_head_unqueue(struct rcu_head *head)
+{
+ debug_object_active_state(head, &rcuhead_debug_descr,
+ STATE_RCU_HEAD_QUEUED,
+ STATE_RCU_HEAD_READY);
+ debug_object_deactivate(head, &rcuhead_debug_descr);
+}
+#else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
+static inline void debug_rcu_head_queue(struct rcu_head *head)
+{
+}
+
+static inline void debug_rcu_head_unqueue(struct rcu_head *head)
+{
+}
+#endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
+
+#ifndef CONFIG_PROVE_RCU
+#define __do_rcu_dereference_check(c) do { } while (0)
+#endif /* #ifdef CONFIG_PROVE_RCU */
+
+#define __rcu_dereference_index_check(p, c) \
+ ({ \
+ typeof(p) _________p1 = ACCESS_ONCE(p); \
+ __do_rcu_dereference_check(c); \
+ smp_read_barrier_depends(); \
+ (_________p1); \
+ })
+
+/**
+ * rcu_dereference_index_check() - rcu_dereference for indices with debug checking
+ * @p: The pointer to read, prior to dereferencing
+ * @c: The conditions under which the dereference will take place
+ *
+ * Similar to rcu_dereference_check(), but omits the sparse checking.
+ * This allows rcu_dereference_index_check() to be used on integers,
+ * which can then be used as array indices. Attempting to use
+ * rcu_dereference_check() on an integer will give compiler warnings
+ * because the sparse address-space mechanism relies on dereferencing
+ * the RCU-protected pointer. Dereferencing integers is not something
+ * that even gcc will put up with.
+ *
+ * Note that this function does not implicitly check for RCU read-side
+ * critical sections. If this function gains lots of uses, it might
+ * make sense to provide versions for each flavor of RCU, but it does
+ * not make sense as of early 2010.
+ */
+#define rcu_dereference_index_check(p, c) \
+ __rcu_dereference_index_check((p), (c))
+
#endif /* __LINUX_RCUPDATE_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 0478888c6899..9591907c4f79 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -272,19 +272,10 @@ extern int runqueue_is_locked(int cpu);
extern cpumask_var_t nohz_cpu_mask;
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
-extern int select_nohz_load_balancer(int cpu);
-extern int get_nohz_load_balancer(void);
-extern int nohz_ratelimit(int cpu);
+extern void select_nohz_load_balancer(int stop_tick);
+extern int get_nohz_timer_target(void);
#else
-static inline int select_nohz_load_balancer(int cpu)
-{
- return 0;
-}
-
-static inline int nohz_ratelimit(int cpu)
-{
- return 0;
-}
+static inline void select_nohz_load_balancer(int stop_tick) { }
#endif
/*
@@ -316,20 +307,16 @@ extern void scheduler_tick(void);
extern void sched_show_task(struct task_struct *p);
-#ifdef CONFIG_DETECT_SOFTLOCKUP
-extern void softlockup_tick(void);
+#ifdef CONFIG_LOCKUP_DETECTOR
extern void touch_softlockup_watchdog(void);
extern void touch_softlockup_watchdog_sync(void);
extern void touch_all_softlockup_watchdogs(void);
-extern int proc_dosoftlockup_thresh(struct ctl_table *table, int write,
- void __user *buffer,
- size_t *lenp, loff_t *ppos);
+extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
+ void __user *buffer,
+ size_t *lenp, loff_t *ppos);
extern unsigned int softlockup_panic;
extern int softlockup_thresh;
#else
-static inline void softlockup_tick(void)
-{
-}
static inline void touch_softlockup_watchdog(void)
{
}
@@ -805,7 +792,7 @@ enum cpu_idle_type {
#define SD_POWERSAVINGS_BALANCE 0x0100 /* Balance for power savings */
#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */
#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */
-
+#define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */
#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */
enum powersavings_balance_level {
@@ -840,6 +827,8 @@ static inline int sd_balance_for_package_power(void)
return SD_PREFER_SIBLING;
}
+extern int __weak arch_sd_sibiling_asym_packing(void);
+
/*
* Optimise SD flags for power savings:
* SD_BALANCE_NEWIDLE helps agressive task consolidation and power savings.
@@ -861,7 +850,7 @@ struct sched_group {
* CPU power of this group, SCHED_LOAD_SCALE being max power for a
* single CPU.
*/
- unsigned int cpu_power;
+ unsigned int cpu_power, cpu_power_orig;
/*
* The CPUs this group covers.
@@ -1697,6 +1686,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
#define PF_EXITING 0x00000004 /* getting shut down */
#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
+#define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */
#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */
#define PF_MCE_PROCESS 0x00000080 /* process policy on mce errors */
#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */
@@ -1791,20 +1781,23 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
#endif
/*
- * Architectures can set this to 1 if they have specified
- * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
- * but then during bootup it turns out that sched_clock()
- * is reliable after all:
+ * Do not use outside of architecture code which knows its limitations.
+ *
+ * sched_clock() has no promise of monotonicity or bounded drift between
+ * CPUs, use (which you should not) requires disabling IRQs.
+ *
+ * Please use one of the three interfaces below.
*/
-#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
-extern int sched_clock_stable;
-#endif
-
-/* ftrace calls sched_clock() directly */
extern unsigned long long notrace sched_clock(void);
+/*
+ * See the comment in kernel/sched_clock.c
+ */
+extern u64 cpu_clock(int cpu);
+extern u64 local_clock(void);
+extern u64 sched_clock_cpu(int cpu);
+
extern void sched_clock_init(void);
-extern u64 sched_clock_cpu(int cpu);
#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
static inline void sched_clock_tick(void)
@@ -1819,17 +1812,19 @@ static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
{
}
#else
+/*
+ * Architectures can set this to 1 if they have specified
+ * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
+ * but then during bootup it turns out that sched_clock()
+ * is reliable after all:
+ */
+extern int sched_clock_stable;
+
extern void sched_clock_tick(void);
extern void sched_clock_idle_sleep_event(void);
extern void sched_clock_idle_wakeup_event(u64 delta_ns);
#endif
-/*
- * For kernel-internal use: high-speed (but slightly incorrect) per-cpu
- * clock constructed from sched_clock():
- */
-extern unsigned long long cpu_clock(int cpu);
-
extern unsigned long long
task_sched_runtime(struct task_struct *task);
extern unsigned long long thread_group_sched_runtime(struct task_struct *task);
@@ -2435,18 +2430,6 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
#endif /* CONFIG_SMP */
-#ifdef CONFIG_TRACING
-extern void
-__trace_special(void *__tr, void *__data,
- unsigned long arg1, unsigned long arg2, unsigned long arg3);
-#else
-static inline void
-__trace_special(void *__tr, void *__data,
- unsigned long arg1, unsigned long arg2, unsigned long arg3)
-{
-}
-#endif
-
extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 49d1247cd6d9..59260e21bdf5 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -268,7 +268,8 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
* allocator where we care about the real place the memory allocation
* request comes from.
*/
-#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB)
+#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
+ (defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
#define kmalloc_track_caller(size, flags) \
__kmalloc_track_caller(size, flags, _RET_IP_)
@@ -286,7 +287,8 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
* standard allocator where we care about the real place the memory
* allocation request comes from.
*/
-#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB)
+#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
+ (defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
#define kmalloc_node_track_caller(size, flags, node) \
__kmalloc_node_track_caller(size, flags, node, \
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 1812dac8c496..1acfa73ce2ac 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -14,7 +14,8 @@
#include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */
#include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
#include <linux/compiler.h>
-#include <linux/kmemtrace.h>
+
+#include <trace/events/kmem.h>
#ifndef ARCH_KMALLOC_MINALIGN
/*
diff --git a/include/linux/slow-work.h b/include/linux/slow-work.h
deleted file mode 100644
index 13337bf6c3f5..000000000000
--- a/include/linux/slow-work.h
+++ /dev/null
@@ -1,163 +0,0 @@
-/* Worker thread pool for slow items, such as filesystem lookups or mkdirs
- *
- * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
- *
- * See Documentation/slow-work.txt
- */
-
-#ifndef _LINUX_SLOW_WORK_H
-#define _LINUX_SLOW_WORK_H
-
-#ifdef CONFIG_SLOW_WORK
-
-#include <linux/sysctl.h>
-#include <linux/timer.h>
-
-struct slow_work;
-#ifdef CONFIG_SLOW_WORK_DEBUG
-struct seq_file;
-#endif
-
-/*
- * The operations used to support slow work items
- */
-struct slow_work_ops {
- /* owner */
- struct module *owner;
-
- /* get a ref on a work item
- * - return 0 if successful, -ve if not
- */
- int (*get_ref)(struct slow_work *work);
-
- /* discard a ref to a work item */
- void (*put_ref)(struct slow_work *work);
-
- /* execute a work item */
- void (*execute)(struct slow_work *work);
-
-#ifdef CONFIG_SLOW_WORK_DEBUG
- /* describe a work item for debugfs */
- void (*desc)(struct slow_work *work, struct seq_file *m);
-#endif
-};
-
-/*
- * A slow work item
- * - A reference is held on the parent object by the thread pool when it is
- * queued
- */
-struct slow_work {
- struct module *owner; /* the owning module */
- unsigned long flags;
-#define SLOW_WORK_PENDING 0 /* item pending (further) execution */
-#define SLOW_WORK_EXECUTING 1 /* item currently executing */
-#define SLOW_WORK_ENQ_DEFERRED 2 /* item enqueue deferred */
-#define SLOW_WORK_VERY_SLOW 3 /* item is very slow */
-#define SLOW_WORK_CANCELLING 4 /* item is being cancelled, don't enqueue */
-#define SLOW_WORK_DELAYED 5 /* item is struct delayed_slow_work with active timer */
- const struct slow_work_ops *ops; /* operations table for this item */
- struct list_head link; /* link in queue */
-#ifdef CONFIG_SLOW_WORK_DEBUG
- struct timespec mark; /* jiffies at which queued or exec begun */
-#endif
-};
-
-struct delayed_slow_work {
- struct slow_work work;
- struct timer_list timer;
-};
-
-/**
- * slow_work_init - Initialise a slow work item
- * @work: The work item to initialise
- * @ops: The operations to use to handle the slow work item
- *
- * Initialise a slow work item.
- */
-static inline void slow_work_init(struct slow_work *work,
- const struct slow_work_ops *ops)
-{
- work->flags = 0;
- work->ops = ops;
- INIT_LIST_HEAD(&work->link);
-}
-
-/**
- * slow_work_init - Initialise a delayed slow work item
- * @work: The work item to initialise
- * @ops: The operations to use to handle the slow work item
- *
- * Initialise a delayed slow work item.
- */
-static inline void delayed_slow_work_init(struct delayed_slow_work *dwork,
- const struct slow_work_ops *ops)
-{
- init_timer(&dwork->timer);
- slow_work_init(&dwork->work, ops);
-}
-
-/**
- * vslow_work_init - Initialise a very slow work item
- * @work: The work item to initialise
- * @ops: The operations to use to handle the slow work item
- *
- * Initialise a very slow work item. This item will be restricted such that
- * only a certain number of the pool threads will be able to execute items of
- * this type.
- */
-static inline void vslow_work_init(struct slow_work *work,
- const struct slow_work_ops *ops)
-{
- work->flags = 1 << SLOW_WORK_VERY_SLOW;
- work->ops = ops;
- INIT_LIST_HEAD(&work->link);
-}
-
-/**
- * slow_work_is_queued - Determine if a slow work item is on the work queue
- * work: The work item to test
- *
- * Determine if the specified slow-work item is on the work queue. This
- * returns true if it is actually on the queue.
- *
- * If the item is executing and has been marked for requeue when execution
- * finishes, then false will be returned.
- *
- * Anyone wishing to wait for completion of execution can wait on the
- * SLOW_WORK_EXECUTING bit.
- */
-static inline bool slow_work_is_queued(struct slow_work *work)
-{
- unsigned long flags = work->flags;
- return flags & SLOW_WORK_PENDING && !(flags & SLOW_WORK_EXECUTING);
-}
-
-extern int slow_work_enqueue(struct slow_work *work);
-extern void slow_work_cancel(struct slow_work *work);
-extern int slow_work_register_user(struct module *owner);
-extern void slow_work_unregister_user(struct module *owner);
-
-extern int delayed_slow_work_enqueue(struct delayed_slow_work *dwork,
- unsigned long delay);
-
-static inline void delayed_slow_work_cancel(struct delayed_slow_work *dwork)
-{
- slow_work_cancel(&dwork->work);
-}
-
-extern bool slow_work_sleep_till_thread_needed(struct slow_work *work,
- signed long *_timeout);
-
-#ifdef CONFIG_SYSCTL
-extern ctl_table slow_work_sysctls[];
-#endif
-
-#endif /* CONFIG_SLOW_WORK */
-#endif /* _LINUX_SLOW_WORK_H */
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 4ba59cfc1f75..6447a723ecb1 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -10,9 +10,10 @@
#include <linux/gfp.h>
#include <linux/workqueue.h>
#include <linux/kobject.h>
-#include <linux/kmemtrace.h>
#include <linux/kmemleak.h>
+#include <trace/events/kmem.h>
+
enum stat_item {
ALLOC_FASTPATH, /* Allocation from cpu slab */
ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 13ebb5413a79..a6bfd1367d2a 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -167,7 +167,6 @@ extern struct trace_event_functions exit_syscall_print_funcs;
.enter_event = &event_enter_##sname, \
.exit_event = &event_exit_##sname, \
.enter_fields = LIST_HEAD_INIT(__syscall_meta_##sname.enter_fields), \
- .exit_fields = LIST_HEAD_INIT(__syscall_meta_##sname.exit_fields), \
};
#define SYSCALL_DEFINE0(sname) \
@@ -182,7 +181,6 @@ extern struct trace_event_functions exit_syscall_print_funcs;
.enter_event = &event_enter__##sname, \
.exit_event = &event_exit__##sname, \
.enter_fields = LIST_HEAD_INIT(__syscall_meta__##sname.enter_fields), \
- .exit_fields = LIST_HEAD_INIT(__syscall_meta__##sname.exit_fields), \
}; \
asmlinkage long sys_##sname(void)
#else
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index f2694eb4dd3d..3c92121ba9af 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -22,14 +22,8 @@ struct kobject;
struct module;
enum kobj_ns_type;
-/* FIXME
- * The *owner field is no longer used.
- * x86 tree has been cleaned up. The owner
- * attribute is still left for other arches.
- */
struct attribute {
const char *name;
- struct module *owner;
mode_t mode;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lock_class_key *key;
@@ -136,8 +130,8 @@ int __must_check sysfs_create_file(struct kobject *kobj,
const struct attribute *attr);
int __must_check sysfs_create_files(struct kobject *kobj,
const struct attribute **attr);
-int __must_check sysfs_chmod_file(struct kobject *kobj, struct attribute *attr,
- mode_t mode);
+int __must_check sysfs_chmod_file(struct kobject *kobj,
+ const struct attribute *attr, mode_t mode);
void sysfs_remove_file(struct kobject *kobj, const struct attribute *attr);
void sysfs_remove_files(struct kobject *kobj, const struct attribute **attr);
@@ -225,7 +219,7 @@ static inline int sysfs_create_files(struct kobject *kobj,
}
static inline int sysfs_chmod_file(struct kobject *kobj,
- struct attribute *attr, mode_t mode)
+ const struct attribute *attr, mode_t mode)
{
return 0;
}
diff --git a/include/linux/time.h b/include/linux/time.h
index ea3559f0b3f2..cb34e35fabac 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -76,9 +76,25 @@ extern unsigned long mktime(const unsigned int year, const unsigned int mon,
const unsigned int min, const unsigned int sec);
extern void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec);
+
+/*
+ * timespec_add_safe assumes both values are positive and checks
+ * for overflow. It will return TIME_T_MAX if the reutrn would be
+ * smaller then either of the arguments.
+ */
extern struct timespec timespec_add_safe(const struct timespec lhs,
const struct timespec rhs);
+
+static inline struct timespec timespec_add(struct timespec lhs,
+ struct timespec rhs)
+{
+ struct timespec ts_delta;
+ set_normalized_timespec(&ts_delta, lhs.tv_sec + rhs.tv_sec,
+ lhs.tv_nsec + rhs.tv_nsec);
+ return ts_delta;
+}
+
/*
* sub = lhs - rhs, in normalized form
*/
@@ -97,8 +113,6 @@ static inline struct timespec timespec_sub(struct timespec lhs,
#define timespec_valid(ts) \
(((ts)->tv_sec >= 0) && (((unsigned long) (ts)->tv_nsec) < NSEC_PER_SEC))
-extern struct timespec xtime;
-extern struct timespec wall_to_monotonic;
extern seqlock_t xtime_lock;
extern void read_persistent_clock(struct timespec *ts);
@@ -110,7 +124,8 @@ extern int timekeeping_suspended;
unsigned long get_seconds(void);
struct timespec current_kernel_time(void);
-struct timespec __current_kernel_time(void); /* does not hold xtime_lock */
+struct timespec __current_kernel_time(void); /* does not take xtime_lock */
+struct timespec __get_wall_to_monotonic(void); /* does not take xtime_lock */
struct timespec get_monotonic_coarse(void);
#define CURRENT_TIME (current_kernel_time())
diff --git a/include/linux/topology.h b/include/linux/topology.h
index c44df50a05ab..b572e432d2f3 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -103,6 +103,7 @@ int arch_update_cpu_topology(void);
| 1*SD_SHARE_PKG_RESOURCES \
| 0*SD_SERIALIZE \
| 0*SD_PREFER_SIBLING \
+ | arch_sd_sibling_asym_packing() \
, \
.last_balance = jiffies, \
.balance_interval = 1, \
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 931078b73226..7802a243ee13 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -552,6 +552,9 @@ static inline void tty_audit_push_task(struct task_struct *tsk,
}
#endif
+/* tty_io.c */
+extern int __init tty_init(void);
+
/* tty_ioctl.c */
extern int n_tty_ioctl_helper(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg);
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 227c2a585e4f..de05e96e0a70 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -30,7 +30,7 @@ struct vm_struct {
unsigned long flags;
struct page **pages;
unsigned int nr_pages;
- unsigned long phys_addr;
+ phys_addr_t phys_addr;
void *caller;
};
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 9466e860d8c2..4f9d277bcd9a 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -9,6 +9,7 @@
#include <linux/linkage.h>
#include <linux/bitops.h>
#include <linux/lockdep.h>
+#include <linux/threads.h>
#include <asm/atomic.h>
struct workqueue_struct;
@@ -22,12 +23,59 @@ typedef void (*work_func_t)(struct work_struct *work);
*/
#define work_data_bits(work) ((unsigned long *)(&(work)->data))
+enum {
+ WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */
+ WORK_STRUCT_CWQ_BIT = 1, /* data points to cwq */
+ WORK_STRUCT_LINKED_BIT = 2, /* next work is linked to this one */
+#ifdef CONFIG_DEBUG_OBJECTS_WORK
+ WORK_STRUCT_STATIC_BIT = 3, /* static initializer (debugobjects) */
+ WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */
+#else
+ WORK_STRUCT_COLOR_SHIFT = 3, /* color for workqueue flushing */
+#endif
+
+ WORK_STRUCT_COLOR_BITS = 4,
+
+ WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
+ WORK_STRUCT_CWQ = 1 << WORK_STRUCT_CWQ_BIT,
+ WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT,
+#ifdef CONFIG_DEBUG_OBJECTS_WORK
+ WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT,
+#else
+ WORK_STRUCT_STATIC = 0,
+#endif
+
+ /*
+ * The last color is no color used for works which don't
+ * participate in workqueue flushing.
+ */
+ WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS) - 1,
+ WORK_NO_COLOR = WORK_NR_COLORS,
+
+ /* special cpu IDs */
+ WORK_CPU_UNBOUND = NR_CPUS,
+ WORK_CPU_NONE = NR_CPUS + 1,
+ WORK_CPU_LAST = WORK_CPU_NONE,
+
+ /*
+ * Reserve 7 bits off of cwq pointer w/ debugobjects turned
+ * off. This makes cwqs aligned to 128 bytes which isn't too
+ * excessive while allowing 15 workqueue flush colors.
+ */
+ WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT +
+ WORK_STRUCT_COLOR_BITS,
+
+ WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1,
+ WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
+ WORK_STRUCT_NO_CPU = WORK_CPU_NONE << WORK_STRUCT_FLAG_BITS,
+
+ /* bit mask for work_busy() return values */
+ WORK_BUSY_PENDING = 1 << 0,
+ WORK_BUSY_RUNNING = 1 << 1,
+};
+
struct work_struct {
atomic_long_t data;
-#define WORK_STRUCT_PENDING 0 /* T if work item pending execution */
-#define WORK_STRUCT_STATIC 1 /* static initializer (debugobjects) */
-#define WORK_STRUCT_FLAG_MASK (3UL)
-#define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK)
struct list_head entry;
work_func_t func;
#ifdef CONFIG_LOCKDEP
@@ -35,8 +83,9 @@ struct work_struct {
#endif
};
-#define WORK_DATA_INIT() ATOMIC_LONG_INIT(0)
-#define WORK_DATA_STATIC_INIT() ATOMIC_LONG_INIT(2)
+#define WORK_DATA_INIT() ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU)
+#define WORK_DATA_STATIC_INIT() \
+ ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU | WORK_STRUCT_STATIC)
struct delayed_work {
struct work_struct work;
@@ -96,9 +145,14 @@ struct execute_work {
#ifdef CONFIG_DEBUG_OBJECTS_WORK
extern void __init_work(struct work_struct *work, int onstack);
extern void destroy_work_on_stack(struct work_struct *work);
+static inline unsigned int work_static(struct work_struct *work)
+{
+ return *work_data_bits(work) & WORK_STRUCT_STATIC;
+}
#else
static inline void __init_work(struct work_struct *work, int onstack) { }
static inline void destroy_work_on_stack(struct work_struct *work) { }
+static inline unsigned int work_static(struct work_struct *work) { return 0; }
#endif
/*
@@ -162,7 +216,7 @@ static inline void destroy_work_on_stack(struct work_struct *work) { }
* @work: The work item in question
*/
#define work_pending(work) \
- test_bit(WORK_STRUCT_PENDING, work_data_bits(work))
+ test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
/**
* delayed_work_pending - Find out whether a delayable work item is currently
@@ -177,16 +231,56 @@ static inline void destroy_work_on_stack(struct work_struct *work) { }
* @work: The work item in question
*/
#define work_clear_pending(work) \
- clear_bit(WORK_STRUCT_PENDING, work_data_bits(work))
+ clear_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
+
+enum {
+ WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */
+ WQ_UNBOUND = 1 << 1, /* not bound to any cpu */
+ WQ_FREEZEABLE = 1 << 2, /* freeze during suspend */
+ WQ_RESCUER = 1 << 3, /* has an rescue worker */
+ WQ_HIGHPRI = 1 << 4, /* high priority */
+ WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */
+
+ WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
+ WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
+ WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
+};
+/* unbound wq's aren't per-cpu, scale max_active according to #cpus */
+#define WQ_UNBOUND_MAX_ACTIVE \
+ max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU)
+
+/*
+ * System-wide workqueues which are always present.
+ *
+ * system_wq is the one used by schedule[_delayed]_work[_on]().
+ * Multi-CPU multi-threaded. There are users which expect relatively
+ * short queue flush time. Don't queue works which can run for too
+ * long.
+ *
+ * system_long_wq is similar to system_wq but may host long running
+ * works. Queue flushing might take relatively long.
+ *
+ * system_nrt_wq is non-reentrant and guarantees that any given work
+ * item is never executed in parallel by multiple CPUs. Queue
+ * flushing might take relatively long.
+ *
+ * system_unbound_wq is unbound workqueue. Workers are not bound to
+ * any specific CPU, not concurrency managed, and all queued works are
+ * executed immediately as long as max_active limit is not reached and
+ * resources are available.
+ */
+extern struct workqueue_struct *system_wq;
+extern struct workqueue_struct *system_long_wq;
+extern struct workqueue_struct *system_nrt_wq;
+extern struct workqueue_struct *system_unbound_wq;
extern struct workqueue_struct *
-__create_workqueue_key(const char *name, int singlethread,
- int freezeable, int rt, struct lock_class_key *key,
- const char *lock_name);
+__alloc_workqueue_key(const char *name, unsigned int flags, int max_active,
+ struct lock_class_key *key, const char *lock_name);
#ifdef CONFIG_LOCKDEP
-#define __create_workqueue(name, singlethread, freezeable, rt) \
+#define alloc_workqueue(name, flags, max_active) \
({ \
static struct lock_class_key __key; \
const char *__lock_name; \
@@ -196,20 +290,20 @@ __create_workqueue_key(const char *name, int singlethread,
else \
__lock_name = #name; \
\
- __create_workqueue_key((name), (singlethread), \
- (freezeable), (rt), &__key, \
- __lock_name); \
+ __alloc_workqueue_key((name), (flags), (max_active), \
+ &__key, __lock_name); \
})
#else
-#define __create_workqueue(name, singlethread, freezeable, rt) \
- __create_workqueue_key((name), (singlethread), (freezeable), (rt), \
- NULL, NULL)
+#define alloc_workqueue(name, flags, max_active) \
+ __alloc_workqueue_key((name), (flags), (max_active), NULL, NULL)
#endif
-#define create_workqueue(name) __create_workqueue((name), 0, 0, 0)
-#define create_rt_workqueue(name) __create_workqueue((name), 0, 0, 1)
-#define create_freezeable_workqueue(name) __create_workqueue((name), 1, 1, 0)
-#define create_singlethread_workqueue(name) __create_workqueue((name), 1, 0, 0)
+#define create_workqueue(name) \
+ alloc_workqueue((name), WQ_RESCUER, 1)
+#define create_freezeable_workqueue(name) \
+ alloc_workqueue((name), WQ_FREEZEABLE | WQ_UNBOUND | WQ_RESCUER, 1)
+#define create_singlethread_workqueue(name) \
+ alloc_workqueue((name), WQ_UNBOUND | WQ_RESCUER, 1)
extern void destroy_workqueue(struct workqueue_struct *wq);
@@ -231,16 +325,19 @@ extern int schedule_delayed_work(struct delayed_work *work, unsigned long delay)
extern int schedule_delayed_work_on(int cpu, struct delayed_work *work,
unsigned long delay);
extern int schedule_on_each_cpu(work_func_t func);
-extern int current_is_keventd(void);
extern int keventd_up(void);
-extern void init_workqueues(void);
int execute_in_process_context(work_func_t fn, struct execute_work *);
extern int flush_work(struct work_struct *work);
-
extern int cancel_work_sync(struct work_struct *work);
+extern void workqueue_set_max_active(struct workqueue_struct *wq,
+ int max_active);
+extern bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq);
+extern unsigned int work_cpu(struct work_struct *work);
+extern unsigned int work_busy(struct work_struct *work);
+
/*
* Kill off a pending schedule_delayed_work(). Note that the work callback
* function may still be running on return from cancel_delayed_work(), unless
@@ -297,4 +394,15 @@ static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
#else
long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg);
#endif /* CONFIG_SMP */
+
+#ifdef CONFIG_FREEZER
+extern void freeze_workqueues_begin(void);
+extern bool freeze_workqueues_busy(void);
+extern void thaw_workqueues(void);
+#endif /* CONFIG_FREEZER */
+
+#ifdef CONFIG_LOCKDEP
+int in_workqueue_context(struct workqueue_struct *wq);
+#endif
+
#endif