summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/acpi.h12
-rw-r--r--include/linux/atmel_tc.h1
-rw-r--r--include/linux/bitops.h13
-rw-r--r--include/linux/blk-mq.h10
-rw-r--r--include/linux/blk_types.h24
-rw-r--r--include/linux/blkdev.h9
-rw-r--r--include/linux/buffer_head.h2
-rw-r--r--include/linux/ceph/libceph.h4
-rw-r--r--include/linux/ceph/messenger.h16
-rw-r--r--include/linux/compiler-gcc.h5
-rw-r--r--include/linux/configfs.h107
-rw-r--r--include/linux/context_tracking.h12
-rw-r--r--include/linux/cpuset.h6
-rw-r--r--include/linux/devfreq_cooling.h81
-rw-r--r--include/linux/device.h16
-rw-r--r--include/linux/dma-mapping.h18
-rw-r--r--include/linux/fb.h2
-rw-r--r--include/linux/fence.h25
-rw-r--r--include/linux/fs.h4
-rw-r--r--include/linux/gfp.h286
-rw-r--r--include/linux/hid.h8
-rw-r--r--include/linux/highmem.h1
-rw-r--r--include/linux/hugetlb_cgroup.h4
-rw-r--r--include/linux/i2c-ocores.h1
-rw-r--r--include/linux/i2c/i2c-rcar.h10
-rw-r--r--include/linux/irqchip/mips-gic.h17
-rw-r--r--include/linux/kdev_t.h9
-rw-r--r--include/linux/kernel.h44
-rw-r--r--include/linux/kref.h33
-rw-r--r--include/linux/kvm_host.h12
-rw-r--r--include/linux/lightnvm.h178
-rw-r--r--include/linux/lockd/lockd.h10
-rw-r--r--include/linux/marvell_phy.h1
-rw-r--r--include/linux/math64.h80
-rw-r--r--include/linux/mfd/cros_ec.h1
-rw-r--r--include/linux/mfd/syscon/imx7-iomuxc-gpr.h47
-rw-r--r--include/linux/mlx4/device.h2
-rw-r--r--include/linux/mlx4/qp.h24
-rw-r--r--include/linux/mlx5/mlx5_ifc.h24
-rw-r--r--include/linux/mm.h82
-rw-r--r--include/linux/mm_types.h40
-rw-r--r--include/linux/mmzone.h101
-rw-r--r--include/linux/moduleparam.h1
-rw-r--r--include/linux/netdevice.h33
-rw-r--r--include/linux/netfilter/ipset/ip_set.h2
-rw-r--r--include/linux/netfilter_ingress.h13
-rw-r--r--include/linux/nfs4.h3
-rw-r--r--include/linux/nfs_fs_sb.h2
-rw-r--r--include/linux/nfs_xdr.h29
-rw-r--r--include/linux/of.h4
-rw-r--r--include/linux/of_address.h7
-rw-r--r--include/linux/of_dma.h4
-rw-r--r--include/linux/of_pci.h3
-rw-r--r--include/linux/page-flags.h80
-rw-r--r--include/linux/pageblock-flags.h2
-rw-r--r--include/linux/pagemap.h7
-rw-r--r--include/linux/pci.h9
-rw-r--r--include/linux/platform_data/atmel.h8
-rw-r--r--include/linux/platform_data/dma-dw.h2
-rw-r--r--include/linux/platform_data/edma.h104
-rw-r--r--include/linux/pmem.h26
-rw-r--r--include/linux/property.h10
-rw-r--r--include/linux/psci.h2
-rw-r--r--include/linux/pwm.h3
-rw-r--r--include/linux/qcom_scm.h2
-rw-r--r--include/linux/rbtree.h12
-rw-r--r--include/linux/sched.h32
-rw-r--r--include/linux/scpi_protocol.h78
-rw-r--r--include/linux/signal.h1
-rw-r--r--include/linux/skbuff.h6
-rw-r--r--include/linux/slab.h45
-rw-r--r--include/linux/soc/brcmstb/brcmstb.h10
-rw-r--r--include/linux/soc/qcom/smd.h11
-rw-r--r--include/linux/soc/qcom/smem.h2
-rw-r--r--include/linux/spi/pxa2xx_spi.h1
-rw-r--r--include/linux/sunrpc/bc_xprt.h5
-rw-r--r--include/linux/sunrpc/cache.h16
-rw-r--r--include/linux/sunrpc/svc_rdma.h12
-rw-r--r--include/linux/sunrpc/xprt.h9
-rw-r--r--include/linux/sunrpc/xprtsock.h2
-rw-r--r--include/linux/sunxi-rsb.h105
-rw-r--r--include/linux/syscalls.h2
-rw-r--r--include/linux/sysfs.h18
-rw-r--r--include/linux/tcp.h7
-rw-r--r--include/linux/thermal.h11
-rw-r--r--include/linux/tty.h6
-rw-r--r--include/linux/types.h2
-rw-r--r--include/linux/usb/gadget_configfs.h19
-rw-r--r--include/linux/vfio.h3
-rw-r--r--include/linux/vga_switcheroo.h100
-rw-r--r--include/linux/watchdog.h23
-rw-r--r--include/linux/xattr.h18
-rw-r--r--include/linux/zpool.h8
-rw-r--r--include/linux/zsmalloc.h2
-rw-r--r--include/linux/zutil.h4
95 files changed, 1387 insertions, 921 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index d6f95bb481d4..054833939995 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -514,6 +514,11 @@ static inline bool has_acpi_companion(struct device *dev)
return false;
}
+static inline void acpi_preset_companion(struct device *dev,
+ struct acpi_device *parent, u64 addr)
+{
+}
+
static inline const char *acpi_dev_name(struct acpi_device *adev)
{
return NULL;
@@ -596,11 +601,16 @@ static inline int acpi_device_modalias(struct device *dev,
return -ENODEV;
}
-static inline bool acpi_check_dma(struct acpi_device *adev, bool *coherent)
+static inline bool acpi_dma_supported(struct acpi_device *adev)
{
return false;
}
+static inline enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev)
+{
+ return DEV_DMA_NOT_SUPPORTED;
+}
+
#define ACPI_PTR(_ptr) (NULL)
#endif /* !CONFIG_ACPI */
diff --git a/include/linux/atmel_tc.h b/include/linux/atmel_tc.h
index b87c1c7c242a..468fdfa643f0 100644
--- a/include/linux/atmel_tc.h
+++ b/include/linux/atmel_tc.h
@@ -67,6 +67,7 @@ struct atmel_tc {
const struct atmel_tcb_config *tcb_config;
int irq[3];
struct clk *clk[3];
+ struct clk *slow_clk;
struct list_head node;
bool allocated;
};
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index e63553386ae7..2b8ed123ad36 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -164,6 +164,8 @@ static inline __u8 ror8(__u8 word, unsigned int shift)
* sign_extend32 - sign extend a 32-bit value using specified bit as sign-bit
* @value: value to sign extend
* @index: 0 based bit index (0<=index<32) to sign bit
+ *
+ * This is safe to use for 16- and 8-bit types as well.
*/
static inline __s32 sign_extend32(__u32 value, int index)
{
@@ -171,6 +173,17 @@ static inline __s32 sign_extend32(__u32 value, int index)
return (__s32)(value << shift) >> shift;
}
+/**
+ * sign_extend64 - sign extend a 64-bit value using specified bit as sign-bit
+ * @value: value to sign extend
+ * @index: 0 based bit index (0<=index<64) to sign bit
+ */
+static inline __s64 sign_extend64(__u64 value, int index)
+{
+ __u8 shift = 63 - index;
+ return (__s64)(value << shift) >> shift;
+}
+
static inline unsigned fls_long(unsigned long l)
{
if (sizeof(l) == 4)
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 83cc9d4e5455..daf17d70aeca 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -59,6 +59,9 @@ struct blk_mq_hw_ctx {
struct blk_mq_cpu_notifier cpu_notifier;
struct kobject kobj;
+
+ unsigned long poll_invoked;
+ unsigned long poll_success;
};
struct blk_mq_tag_set {
@@ -97,6 +100,8 @@ typedef void (exit_request_fn)(void *, struct request *, unsigned int,
typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
bool);
typedef void (busy_tag_iter_fn)(struct request *, void *, bool);
+typedef int (poll_fn)(struct blk_mq_hw_ctx *, unsigned int);
+
struct blk_mq_ops {
/*
@@ -114,6 +119,11 @@ struct blk_mq_ops {
*/
timeout_fn *timeout;
+ /*
+ * Called to poll for completion of a specific tag.
+ */
+ poll_fn *poll;
+
softirq_done_fn *complete;
/*
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index e8130138f29d..0fb65843ec1e 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -244,4 +244,28 @@ enum rq_flag_bits {
#define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT)
#define REQ_NO_TIMEOUT (1ULL << __REQ_NO_TIMEOUT)
+typedef unsigned int blk_qc_t;
+#define BLK_QC_T_NONE -1U
+#define BLK_QC_T_SHIFT 16
+
+static inline bool blk_qc_t_valid(blk_qc_t cookie)
+{
+ return cookie != BLK_QC_T_NONE;
+}
+
+static inline blk_qc_t blk_tag_to_qc_t(unsigned int tag, unsigned int queue_num)
+{
+ return tag | (queue_num << BLK_QC_T_SHIFT);
+}
+
+static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)
+{
+ return cookie >> BLK_QC_T_SHIFT;
+}
+
+static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie)
+{
+ return cookie & ((1u << BLK_QC_T_SHIFT) - 1);
+}
+
#endif /* __LINUX_BLK_TYPES_H */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index d045ca8487af..c0d2b7927c1f 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -209,7 +209,7 @@ static inline unsigned short req_get_ioprio(struct request *req)
struct blk_queue_ctx;
typedef void (request_fn_proc) (struct request_queue *q);
-typedef void (make_request_fn) (struct request_queue *q, struct bio *bio);
+typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio);
typedef int (prep_rq_fn) (struct request_queue *, struct request *);
typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
@@ -487,6 +487,7 @@ struct request_queue {
#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */
#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */
#define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/
+#define QUEUE_FLAG_POLL 22 /* IO polling enabled if set */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_STACKABLE) | \
@@ -761,7 +762,7 @@ static inline void rq_flush_dcache_pages(struct request *rq)
extern int blk_register_queue(struct gendisk *disk);
extern void blk_unregister_queue(struct gendisk *disk);
-extern void generic_make_request(struct bio *bio);
+extern blk_qc_t generic_make_request(struct bio *bio);
extern void blk_rq_init(struct request_queue *q, struct request *rq);
extern void blk_put_request(struct request *);
extern void __blk_put_request(struct request_queue *, struct request *);
@@ -793,6 +794,8 @@ extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
struct scsi_ioctl_command __user *);
+extern int blk_queue_enter(struct request_queue *q, gfp_t gfp);
+extern void blk_queue_exit(struct request_queue *q);
extern void blk_start_queue(struct request_queue *q);
extern void blk_stop_queue(struct request_queue *q);
extern void blk_sync_queue(struct request_queue *q);
@@ -814,6 +817,8 @@ extern int blk_execute_rq(struct request_queue *, struct gendisk *,
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
struct request *, int, rq_end_io_fn *);
+bool blk_poll(struct request_queue *q, blk_qc_t cookie);
+
static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
{
return bdev->bd_disk->queue; /* this is never NULL */
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index e6797ded700e..89d9aa9e79bf 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -227,8 +227,6 @@ int cont_write_begin(struct file *, struct address_space *, loff_t,
get_block_t *, loff_t *);
int generic_cont_expand_simple(struct inode *inode, loff_t size);
int block_commit_write(struct page *page, unsigned from, unsigned to);
-int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
- get_block_t get_block);
int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
get_block_t get_block);
/* Convert errno to return value from ->page_mkwrite() call */
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index 397c5cd09794..3e3799cdc6e6 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -29,8 +29,9 @@
#define CEPH_OPT_NOSHARE (1<<1) /* don't share client with other sbs */
#define CEPH_OPT_MYIP (1<<2) /* specified my ip */
#define CEPH_OPT_NOCRC (1<<3) /* no data crc on writes */
-#define CEPH_OPT_NOMSGAUTH (1<<4) /* not require cephx message signature */
+#define CEPH_OPT_NOMSGAUTH (1<<4) /* don't require msg signing feat */
#define CEPH_OPT_TCP_NODELAY (1<<5) /* TCP_NODELAY on TCP sockets */
+#define CEPH_OPT_NOMSGSIGN (1<<6) /* don't sign msgs */
#define CEPH_OPT_DEFAULT (CEPH_OPT_TCP_NODELAY)
@@ -137,6 +138,7 @@ struct ceph_client {
#endif
};
+#define from_msgr(ms) container_of(ms, struct ceph_client, msgr)
/*
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index b2371d9b51fa..71b1d6cdcb5d 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -43,10 +43,9 @@ struct ceph_connection_operations {
struct ceph_msg * (*alloc_msg) (struct ceph_connection *con,
struct ceph_msg_header *hdr,
int *skip);
- int (*sign_message) (struct ceph_connection *con, struct ceph_msg *msg);
- int (*check_message_signature) (struct ceph_connection *con,
- struct ceph_msg *msg);
+ int (*sign_message) (struct ceph_msg *msg);
+ int (*check_message_signature) (struct ceph_msg *msg);
};
/* use format string %s%d */
@@ -58,8 +57,6 @@ struct ceph_messenger {
atomic_t stopping;
possible_net_t net;
- bool nocrc;
- bool tcp_nodelay;
/*
* the global_seq counts connections i (attempt to) initiate
@@ -67,9 +64,6 @@ struct ceph_messenger {
*/
u32 global_seq;
spinlock_t global_seq_lock;
-
- u64 supported_features;
- u64 required_features;
};
enum ceph_msg_data_type {
@@ -268,11 +262,7 @@ extern void ceph_msgr_exit(void);
extern void ceph_msgr_flush(void);
extern void ceph_messenger_init(struct ceph_messenger *msgr,
- struct ceph_entity_addr *myaddr,
- u64 supported_features,
- u64 required_features,
- bool nocrc,
- bool tcp_nodelay);
+ struct ceph_entity_addr *myaddr);
extern void ceph_messenger_fini(struct ceph_messenger *msgr);
extern void ceph_con_init(struct ceph_connection *con, void *private,
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 0e3110a0b771..22ab246feed3 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -205,7 +205,10 @@
#if GCC_VERSION >= 40600
/*
- * Tell the optimizer that something else uses this function or variable.
+ * When used with Link Time Optimization, gcc can optimize away C functions or
+ * variables which are referenced only from assembly code. __visible tells the
+ * optimizer that something else uses this function or variable, thus preventing
+ * this.
*/
#define __visible __attribute__((externally_visible))
#endif
diff --git a/include/linux/configfs.h b/include/linux/configfs.h
index 63a36e89d0eb..758a029011b1 100644
--- a/include/linux/configfs.h
+++ b/include/linux/configfs.h
@@ -125,86 +125,33 @@ struct configfs_attribute {
const char *ca_name;
struct module *ca_owner;
umode_t ca_mode;
+ ssize_t (*show)(struct config_item *, char *);
+ ssize_t (*store)(struct config_item *, const char *, size_t);
};
-/*
- * Users often need to create attribute structures for their configurable
- * attributes, containing a configfs_attribute member and function pointers
- * for the show() and store() operations on that attribute. If they don't
- * need anything else on the extended attribute structure, they can use
- * this macro to define it The argument _item is the name of the
- * config_item structure.
- */
-#define CONFIGFS_ATTR_STRUCT(_item) \
-struct _item##_attribute { \
- struct configfs_attribute attr; \
- ssize_t (*show)(struct _item *, char *); \
- ssize_t (*store)(struct _item *, const char *, size_t); \
+#define CONFIGFS_ATTR(_pfx, _name) \
+static struct configfs_attribute _pfx##attr_##_name = { \
+ .ca_name = __stringify(_name), \
+ .ca_mode = S_IRUGO | S_IWUSR, \
+ .ca_owner = THIS_MODULE, \
+ .show = _pfx##_name##_show, \
+ .store = _pfx##_name##_store, \
}
-/*
- * With the extended attribute structure, users can use this macro
- * (similar to sysfs' __ATTR) to make defining attributes easier.
- * An example:
- * #define MYITEM_ATTR(_name, _mode, _show, _store) \
- * struct myitem_attribute childless_attr_##_name = \
- * __CONFIGFS_ATTR(_name, _mode, _show, _store)
- */
-#define __CONFIGFS_ATTR(_name, _mode, _show, _store) \
-{ \
- .attr = { \
- .ca_name = __stringify(_name), \
- .ca_mode = _mode, \
- .ca_owner = THIS_MODULE, \
- }, \
- .show = _show, \
- .store = _store, \
-}
-/* Here is a readonly version, only requiring a show() operation */
-#define __CONFIGFS_ATTR_RO(_name, _show) \
-{ \
- .attr = { \
- .ca_name = __stringify(_name), \
- .ca_mode = 0444, \
- .ca_owner = THIS_MODULE, \
- }, \
- .show = _show, \
+#define CONFIGFS_ATTR_RO(_pfx, _name) \
+static struct configfs_attribute _pfx##attr_##_name = { \
+ .ca_name = __stringify(_name), \
+ .ca_mode = S_IRUGO, \
+ .ca_owner = THIS_MODULE, \
+ .show = _pfx##_name##_show, \
}
-/*
- * With these extended attributes, the simple show_attribute() and
- * store_attribute() operations need to call the show() and store() of the
- * attributes. This is a common pattern, so we provide a macro to define
- * them. The argument _item is the name of the config_item structure.
- * This macro expects the attributes to be named "struct <name>_attribute"
- * and the function to_<name>() to exist;
- */
-#define CONFIGFS_ATTR_OPS(_item) \
-static ssize_t _item##_attr_show(struct config_item *item, \
- struct configfs_attribute *attr, \
- char *page) \
-{ \
- struct _item *_item = to_##_item(item); \
- struct _item##_attribute *_item##_attr = \
- container_of(attr, struct _item##_attribute, attr); \
- ssize_t ret = 0; \
- \
- if (_item##_attr->show) \
- ret = _item##_attr->show(_item, page); \
- return ret; \
-} \
-static ssize_t _item##_attr_store(struct config_item *item, \
- struct configfs_attribute *attr, \
- const char *page, size_t count) \
-{ \
- struct _item *_item = to_##_item(item); \
- struct _item##_attribute *_item##_attr = \
- container_of(attr, struct _item##_attribute, attr); \
- ssize_t ret = -EINVAL; \
- \
- if (_item##_attr->store) \
- ret = _item##_attr->store(_item, page, count); \
- return ret; \
+#define CONFIGFS_ATTR_WO(_pfx, _name) \
+static struct configfs_attribute _pfx##attr_##_name = { \
+ .ca_name = __stringify(_name), \
+ .ca_mode = S_IWUSR, \
+ .ca_owner = THIS_MODULE, \
+ .store = _pfx##_name##_store, \
}
/*
@@ -223,8 +170,6 @@ static ssize_t _item##_attr_store(struct config_item *item, \
*/
struct configfs_item_operations {
void (*release)(struct config_item *);
- ssize_t (*show_attribute)(struct config_item *, struct configfs_attribute *,char *);
- ssize_t (*store_attribute)(struct config_item *,struct configfs_attribute *,const char *, size_t);
int (*allow_link)(struct config_item *src, struct config_item *target);
int (*drop_link)(struct config_item *src, struct config_item *target);
};
@@ -252,6 +197,16 @@ static inline struct configfs_subsystem *to_configfs_subsystem(struct config_gro
int configfs_register_subsystem(struct configfs_subsystem *subsys);
void configfs_unregister_subsystem(struct configfs_subsystem *subsys);
+int configfs_register_group(struct config_group *parent_group,
+ struct config_group *group);
+void configfs_unregister_group(struct config_group *group);
+
+struct config_group *
+configfs_register_default_group(struct config_group *parent_group,
+ const char *name,
+ struct config_item_type *item_type);
+void configfs_unregister_default_group(struct config_group *group);
+
/* These functions can sleep and can alloc with GFP_KERNEL */
/* WARNING: These cannot be called underneath configfs callbacks!! */
int configfs_depend_item(struct configfs_subsystem *subsys, struct config_item *target);
diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h
index 008fc67d0d96..68b575afe5f5 100644
--- a/include/linux/context_tracking.h
+++ b/include/linux/context_tracking.h
@@ -10,6 +10,10 @@
#ifdef CONFIG_CONTEXT_TRACKING
extern void context_tracking_cpu_set(int cpu);
+/* Called with interrupts disabled. */
+extern void __context_tracking_enter(enum ctx_state state);
+extern void __context_tracking_exit(enum ctx_state state);
+
extern void context_tracking_enter(enum ctx_state state);
extern void context_tracking_exit(enum ctx_state state);
extern void context_tracking_user_enter(void);
@@ -18,13 +22,13 @@ extern void context_tracking_user_exit(void);
static inline void user_enter(void)
{
if (context_tracking_is_enabled())
- context_tracking_user_enter();
+ context_tracking_enter(CONTEXT_USER);
}
static inline void user_exit(void)
{
if (context_tracking_is_enabled())
- context_tracking_user_exit();
+ context_tracking_exit(CONTEXT_USER);
}
static inline enum ctx_state exception_enter(void)
@@ -88,13 +92,13 @@ static inline void guest_enter(void)
current->flags |= PF_VCPU;
if (context_tracking_is_enabled())
- context_tracking_enter(CONTEXT_GUEST);
+ __context_tracking_enter(CONTEXT_GUEST);
}
static inline void guest_exit(void)
{
if (context_tracking_is_enabled())
- context_tracking_exit(CONTEXT_GUEST);
+ __context_tracking_exit(CONTEXT_GUEST);
if (vtime_accounting_enabled())
vtime_guest_exit(current);
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 5a1311942358..85a868ccb493 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -104,6 +104,9 @@ extern void cpuset_print_current_mems_allowed(void);
*/
static inline unsigned int read_mems_allowed_begin(void)
{
+ if (!cpusets_enabled())
+ return 0;
+
return read_seqcount_begin(&current->mems_allowed_seq);
}
@@ -115,6 +118,9 @@ static inline unsigned int read_mems_allowed_begin(void)
*/
static inline bool read_mems_allowed_retry(unsigned int seq)
{
+ if (!cpusets_enabled())
+ return false;
+
return read_seqcount_retry(&current->mems_allowed_seq, seq);
}
diff --git a/include/linux/devfreq_cooling.h b/include/linux/devfreq_cooling.h
new file mode 100644
index 000000000000..7adf6cc4b305
--- /dev/null
+++ b/include/linux/devfreq_cooling.h
@@ -0,0 +1,81 @@
+/*
+ * devfreq_cooling: Thermal cooling device implementation for devices using
+ * devfreq
+ *
+ * Copyright (C) 2014-2015 ARM Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DEVFREQ_COOLING_H__
+#define __DEVFREQ_COOLING_H__
+
+#include <linux/devfreq.h>
+#include <linux/thermal.h>
+
+#ifdef CONFIG_DEVFREQ_THERMAL
+
+/**
+ * struct devfreq_cooling_power - Devfreq cooling power ops
+ * @get_static_power: Take voltage, in mV, and return the static power
+ * in mW. If NULL, the static power is assumed
+ * to be 0.
+ * @get_dynamic_power: Take voltage, in mV, and frequency, in HZ, and
+ * return the dynamic power draw in mW. If NULL,
+ * a simple power model is used.
+ * @dyn_power_coeff: Coefficient for the simple dynamic power model in
+ * mW/(MHz mV mV).
+ * If get_dynamic_power() is NULL, then the
+ * dynamic power is calculated as
+ * @dyn_power_coeff * frequency * voltage^2
+ */
+struct devfreq_cooling_power {
+ unsigned long (*get_static_power)(unsigned long voltage);
+ unsigned long (*get_dynamic_power)(unsigned long freq,
+ unsigned long voltage);
+ unsigned long dyn_power_coeff;
+};
+
+struct thermal_cooling_device *
+of_devfreq_cooling_register_power(struct device_node *np, struct devfreq *df,
+ struct devfreq_cooling_power *dfc_power);
+struct thermal_cooling_device *
+of_devfreq_cooling_register(struct device_node *np, struct devfreq *df);
+struct thermal_cooling_device *devfreq_cooling_register(struct devfreq *df);
+void devfreq_cooling_unregister(struct thermal_cooling_device *dfc);
+
+#else /* !CONFIG_DEVFREQ_THERMAL */
+
+struct thermal_cooling_device *
+of_devfreq_cooling_register_power(struct device_node *np, struct devfreq *df,
+ struct devfreq_cooling_power *dfc_power)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline struct thermal_cooling_device *
+of_devfreq_cooling_register(struct device_node *np, struct devfreq *df)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline struct thermal_cooling_device *
+devfreq_cooling_register(struct devfreq *df)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline void
+devfreq_cooling_unregister(struct thermal_cooling_device *dfc)
+{
+}
+
+#endif /* CONFIG_DEVFREQ_THERMAL */
+#endif /* __DEVFREQ_COOLING_H__ */
diff --git a/include/linux/device.h b/include/linux/device.h
index 5d7bc6349930..b8f411b57dcb 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -604,13 +604,21 @@ typedef void (*dr_release_t)(struct device *dev, void *res);
typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data);
#ifdef CONFIG_DEBUG_DEVRES
-extern void *__devres_alloc(dr_release_t release, size_t size, gfp_t gfp,
- const char *name);
+extern void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp,
+ int nid, const char *name);
#define devres_alloc(release, size, gfp) \
- __devres_alloc(release, size, gfp, #release)
+ __devres_alloc_node(release, size, gfp, NUMA_NO_NODE, #release)
+#define devres_alloc_node(release, size, gfp, nid) \
+ __devres_alloc_node(release, size, gfp, nid, #release)
#else
-extern void *devres_alloc(dr_release_t release, size_t size, gfp_t gfp);
+extern void *devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp,
+ int nid);
+static inline void *devres_alloc(dr_release_t release, size_t size, gfp_t gfp)
+{
+ return devres_alloc_node(release, size, gfp, NUMA_NO_NODE);
+}
#endif
+
extern void devres_for_each_res(struct device *dev, dr_release_t release,
dr_match_t match, void *match_data,
void (*fn)(struct device *, void *, void *),
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index ac07ff090919..2e551e2d2d03 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -1,6 +1,7 @@
#ifndef _LINUX_DMA_MAPPING_H
#define _LINUX_DMA_MAPPING_H
+#include <linux/sizes.h>
#include <linux/string.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -145,7 +146,9 @@ static inline void arch_teardown_dma_ops(struct device *dev) { }
static inline unsigned int dma_get_max_seg_size(struct device *dev)
{
- return dev->dma_parms ? dev->dma_parms->max_segment_size : 65536;
+ if (dev->dma_parms && dev->dma_parms->max_segment_size)
+ return dev->dma_parms->max_segment_size;
+ return SZ_64K;
}
static inline unsigned int dma_set_max_seg_size(struct device *dev,
@@ -154,14 +157,15 @@ static inline unsigned int dma_set_max_seg_size(struct device *dev,
if (dev->dma_parms) {
dev->dma_parms->max_segment_size = size;
return 0;
- } else
- return -EIO;
+ }
+ return -EIO;
}
static inline unsigned long dma_get_seg_boundary(struct device *dev)
{
- return dev->dma_parms ?
- dev->dma_parms->segment_boundary_mask : 0xffffffff;
+ if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
+ return dev->dma_parms->segment_boundary_mask;
+ return DMA_BIT_MASK(32);
}
static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
@@ -169,8 +173,8 @@ static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
if (dev->dma_parms) {
dev->dma_parms->segment_boundary_mask = mask;
return 0;
- } else
- return -EIO;
+ }
+ return -EIO;
}
#ifndef dma_max_pfn
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 41a3b11f7796..3d003805aac3 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -156,7 +156,7 @@ struct fb_cursor_user {
#define FB_EVENT_GET_REQ 0x0D
/* Unbind from the console if possible */
#define FB_EVENT_FB_UNBIND 0x0E
-/* CONSOLE-SPECIFIC: remap all consoles to new fb - for vga switcheroo */
+/* CONSOLE-SPECIFIC: remap all consoles to new fb - for vga_switcheroo */
#define FB_EVENT_REMAP_ALL_CONSOLE 0x0F
/* A hardware display blank early change occured */
#define FB_EARLY_EVENT_BLANK 0x10
diff --git a/include/linux/fence.h b/include/linux/fence.h
index 39efee130d2b..bb522011383b 100644
--- a/include/linux/fence.h
+++ b/include/linux/fence.h
@@ -280,6 +280,22 @@ fence_is_signaled(struct fence *fence)
}
/**
+ * fence_is_later - return if f1 is chronologically later than f2
+ * @f1: [in] the first fence from the same context
+ * @f2: [in] the second fence from the same context
+ *
+ * Returns true if f1 is chronologically later than f2. Both fences must be
+ * from the same context, since a seqno is not re-used across contexts.
+ */
+static inline bool fence_is_later(struct fence *f1, struct fence *f2)
+{
+ if (WARN_ON(f1->context != f2->context))
+ return false;
+
+ return f1->seqno - f2->seqno < INT_MAX;
+}
+
+/**
* fence_later - return the chronologically later fence
* @f1: [in] the first fence from the same context
* @f2: [in] the second fence from the same context
@@ -298,14 +314,15 @@ static inline struct fence *fence_later(struct fence *f1, struct fence *f2)
* set if enable_signaling wasn't called, and enabling that here is
* overkill.
*/
- if (f2->seqno - f1->seqno <= INT_MAX)
- return fence_is_signaled(f2) ? NULL : f2;
- else
+ if (fence_is_later(f1, f2))
return fence_is_signaled(f1) ? NULL : f1;
+ else
+ return fence_is_signaled(f2) ? NULL : f2;
}
signed long fence_wait_timeout(struct fence *, bool intr, signed long timeout);
-
+signed long fence_wait_any_timeout(struct fence **fences, uint32_t count,
+ bool intr, signed long timeout);
/**
* fence_wait - sleep until the fence gets signaled
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 9a1cb8c605e0..3aa514254161 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1665,8 +1665,6 @@ struct inode_operations {
umode_t create_mode, int *opened);
int (*tmpfile) (struct inode *, struct dentry *, umode_t);
int (*set_acl)(struct inode *, struct posix_acl *, int);
-
- /* WARNING: probably going away soon, do not use! */
} ____cacheline_aligned;
ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
@@ -2613,7 +2611,7 @@ static inline void remove_inode_hash(struct inode *inode)
extern void inode_sb_list_add(struct inode *inode);
#ifdef CONFIG_BLOCK
-extern void submit_bio(int, struct bio *);
+extern blk_qc_t submit_bio(int, struct bio *);
extern int bdev_read_only(struct block_device *);
#endif
extern int set_blocksize(struct block_device *, int);
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index f92cbd2f4450..8942af0813e3 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -14,7 +14,7 @@ struct vm_area_struct;
#define ___GFP_HIGHMEM 0x02u
#define ___GFP_DMA32 0x04u
#define ___GFP_MOVABLE 0x08u
-#define ___GFP_WAIT 0x10u
+#define ___GFP_RECLAIMABLE 0x10u
#define ___GFP_HIGH 0x20u
#define ___GFP_IO 0x40u
#define ___GFP_FS 0x80u
@@ -29,18 +29,17 @@ struct vm_area_struct;
#define ___GFP_NOMEMALLOC 0x10000u
#define ___GFP_HARDWALL 0x20000u
#define ___GFP_THISNODE 0x40000u
-#define ___GFP_RECLAIMABLE 0x80000u
+#define ___GFP_ATOMIC 0x80000u
#define ___GFP_NOACCOUNT 0x100000u
#define ___GFP_NOTRACK 0x200000u
-#define ___GFP_NO_KSWAPD 0x400000u
+#define ___GFP_DIRECT_RECLAIM 0x400000u
#define ___GFP_OTHER_NODE 0x800000u
#define ___GFP_WRITE 0x1000000u
+#define ___GFP_KSWAPD_RECLAIM 0x2000000u
/* If the above are modified, __GFP_BITS_SHIFT may need updating */
/*
- * GFP bitmasks..
- *
- * Zone modifiers (see linux/mmzone.h - low three bits)
+ * Physical address zone modifiers (see linux/mmzone.h - low four bits)
*
* Do not put any conditional on these. If necessary modify the definitions
* without the underscores and use them consistently. The definitions here may
@@ -50,116 +49,229 @@ struct vm_area_struct;
#define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM)
#define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32)
#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* Page is movable */
+#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */
#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)
+
+/*
+ * Page mobility and placement hints
+ *
+ * These flags provide hints about how mobile the page is. Pages with similar
+ * mobility are placed within the same pageblocks to minimise problems due
+ * to external fragmentation.
+ *
+ * __GFP_MOVABLE (also a zone modifier) indicates that the page can be
+ * moved by page migration during memory compaction or can be reclaimed.
+ *
+ * __GFP_RECLAIMABLE is used for slab allocations that specify
+ * SLAB_RECLAIM_ACCOUNT and whose pages can be freed via shrinkers.
+ *
+ * __GFP_WRITE indicates the caller intends to dirty the page. Where possible,
+ * these pages will be spread between local zones to avoid all the dirty
+ * pages being in one zone (fair zone allocation policy).
+ *
+ * __GFP_HARDWALL enforces the cpuset memory allocation policy.
+ *
+ * __GFP_THISNODE forces the allocation to be satisified from the requested
+ * node with no fallbacks or placement policy enforcements.
+ */
+#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE)
+#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE)
+#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL)
+#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)
+
/*
- * Action modifiers - doesn't change the zoning
+ * Watermark modifiers -- controls access to emergency reserves
+ *
+ * __GFP_HIGH indicates that the caller is high-priority and that granting
+ * the request is necessary before the system can make forward progress.
+ * For example, creating an IO context to clean pages.
+ *
+ * __GFP_ATOMIC indicates that the caller cannot reclaim or sleep and is
+ * high priority. Users are typically interrupt handlers. This may be
+ * used in conjunction with __GFP_HIGH
+ *
+ * __GFP_MEMALLOC allows access to all memory. This should only be used when
+ * the caller guarantees the allocation will allow more memory to be freed
+ * very shortly e.g. process exiting or swapping. Users either should
+ * be the MM or co-ordinating closely with the VM (e.g. swap over NFS).
+ *
+ * __GFP_NOMEMALLOC is used to explicitly forbid access to emergency reserves.
+ * This takes precedence over the __GFP_MEMALLOC flag if both are set.
+ *
+ * __GFP_NOACCOUNT ignores the accounting for kmemcg limit enforcement.
+ */
+#define __GFP_ATOMIC ((__force gfp_t)___GFP_ATOMIC)
+#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH)
+#define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC)
+#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC)
+#define __GFP_NOACCOUNT ((__force gfp_t)___GFP_NOACCOUNT)
+
+/*
+ * Reclaim modifiers
+ *
+ * __GFP_IO can start physical IO.
+ *
+ * __GFP_FS can call down to the low-level FS. Clearing the flag avoids the
+ * allocator recursing into the filesystem which might already be holding
+ * locks.
+ *
+ * __GFP_DIRECT_RECLAIM indicates that the caller may enter direct reclaim.
+ * This flag can be cleared to avoid unnecessary delays when a fallback
+ * option is available.
+ *
+ * __GFP_KSWAPD_RECLAIM indicates that the caller wants to wake kswapd when
+ * the low watermark is reached and have it reclaim pages until the high
+ * watermark is reached. A caller may wish to clear this flag when fallback
+ * options are available and the reclaim is likely to disrupt the system. The
+ * canonical example is THP allocation where a fallback is cheap but
+ * reclaim/compaction may cause indirect stalls.
+ *
+ * __GFP_RECLAIM is shorthand to allow/forbid both direct and kswapd reclaim.
*
* __GFP_REPEAT: Try hard to allocate the memory, but the allocation attempt
- * _might_ fail. This depends upon the particular VM implementation.
+ * _might_ fail. This depends upon the particular VM implementation.
*
* __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller
- * cannot handle allocation failures. New users should be evaluated carefully
- * (and the flag should be used only when there is no reasonable failure policy)
- * but it is definitely preferable to use the flag rather than opencode endless
- * loop around allocator.
+ * cannot handle allocation failures. New users should be evaluated carefully
+ * (and the flag should be used only when there is no reasonable failure
+ * policy) but it is definitely preferable to use the flag rather than
+ * opencode endless loop around allocator.
*
* __GFP_NORETRY: The VM implementation must not retry indefinitely and will
- * return NULL when direct reclaim and memory compaction have failed to allow
- * the allocation to succeed. The OOM killer is not called with the current
- * implementation.
- *
- * __GFP_MOVABLE: Flag that this page will be movable by the page migration
- * mechanism or reclaimed
+ * return NULL when direct reclaim and memory compaction have failed to allow
+ * the allocation to succeed. The OOM killer is not called with the current
+ * implementation.
*/
-#define __GFP_WAIT ((__force gfp_t)___GFP_WAIT) /* Can wait and reschedule? */
-#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH) /* Should access emergency pools? */
-#define __GFP_IO ((__force gfp_t)___GFP_IO) /* Can start physical IO? */
-#define __GFP_FS ((__force gfp_t)___GFP_FS) /* Can call down to low-level FS? */
-#define __GFP_COLD ((__force gfp_t)___GFP_COLD) /* Cache-cold page required */
-#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN) /* Suppress page allocation failure warning */
-#define __GFP_REPEAT ((__force gfp_t)___GFP_REPEAT) /* See above */
-#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL) /* See above */
-#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY) /* See above */
-#define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC)/* Allow access to emergency reserves */
-#define __GFP_COMP ((__force gfp_t)___GFP_COMP) /* Add compound page metadata */
-#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) /* Return zeroed page on success */
-#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC) /* Don't use emergency reserves.
- * This takes precedence over the
- * __GFP_MEMALLOC flag if both are
- * set
- */
-#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) /* Enforce hardwall cpuset memory allocs */
-#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)/* No fallback, no policies */
-#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */
-#define __GFP_NOACCOUNT ((__force gfp_t)___GFP_NOACCOUNT) /* Don't account to kmemcg */
-#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) /* Don't track with kmemcheck */
-
-#define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
-#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
-#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
+#define __GFP_IO ((__force gfp_t)___GFP_IO)
+#define __GFP_FS ((__force gfp_t)___GFP_FS)
+#define __GFP_DIRECT_RECLAIM ((__force gfp_t)___GFP_DIRECT_RECLAIM) /* Caller can reclaim */
+#define __GFP_KSWAPD_RECLAIM ((__force gfp_t)___GFP_KSWAPD_RECLAIM) /* kswapd can wake */
+#define __GFP_RECLAIM ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM))
+#define __GFP_REPEAT ((__force gfp_t)___GFP_REPEAT)
+#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL)
+#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY)
/*
- * This may seem redundant, but it's a way of annotating false positives vs.
- * allocations that simply cannot be supported (e.g. page tables).
+ * Action modifiers
+ *
+ * __GFP_COLD indicates that the caller does not expect to be used in the near
+ * future. Where possible, a cache-cold page will be returned.
+ *
+ * __GFP_NOWARN suppresses allocation failure reports.
+ *
+ * __GFP_COMP address compound page metadata.
+ *
+ * __GFP_ZERO returns a zeroed page on success.
+ *
+ * __GFP_NOTRACK avoids tracking with kmemcheck.
+ *
+ * __GFP_NOTRACK_FALSE_POSITIVE is an alias of __GFP_NOTRACK. It's a means of
+ * distinguishing in the source between false positives and allocations that
+ * cannot be supported (e.g. page tables).
+ *
+ * __GFP_OTHER_NODE is for allocations that are on a remote node but that
+ * should not be accounted for as a remote allocation in vmstat. A
+ * typical user would be khugepaged collapsing a huge page on a remote
+ * node.
*/
+#define __GFP_COLD ((__force gfp_t)___GFP_COLD)
+#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN)
+#define __GFP_COMP ((__force gfp_t)___GFP_COMP)
+#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO)
+#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK)
#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
+#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE)
-#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
+/* Room for N __GFP_FOO bits */
+#define __GFP_BITS_SHIFT 26
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
-/* This equals 0, but use constants in case they ever change */
-#define GFP_NOWAIT (GFP_ATOMIC & ~__GFP_HIGH)
-/* GFP_ATOMIC means both !wait (__GFP_WAIT not set) and use emergency pool */
-#define GFP_ATOMIC (__GFP_HIGH)
-#define GFP_NOIO (__GFP_WAIT)
-#define GFP_NOFS (__GFP_WAIT | __GFP_IO)
-#define GFP_KERNEL (__GFP_WAIT | __GFP_IO | __GFP_FS)
-#define GFP_TEMPORARY (__GFP_WAIT | __GFP_IO | __GFP_FS | \
+/*
+ * Useful GFP flag combinations that are commonly used. It is recommended
+ * that subsystems start with one of these combinations and then set/clear
+ * __GFP_FOO flags as necessary.
+ *
+ * GFP_ATOMIC users can not sleep and need the allocation to succeed. A lower
+ * watermark is applied to allow access to "atomic reserves"
+ *
+ * GFP_KERNEL is typical for kernel-internal allocations. The caller requires
+ * ZONE_NORMAL or a lower zone for direct access but can direct reclaim.
+ *
+ * GFP_NOWAIT is for kernel allocations that should not stall for direct
+ * reclaim, start physical IO or use any filesystem callback.
+ *
+ * GFP_NOIO will use direct reclaim to discard clean pages or slab pages
+ * that do not require the starting of any physical IO.
+ *
+ * GFP_NOFS will use direct reclaim but will not use any filesystem interfaces.
+ *
+ * GFP_USER is for userspace allocations that also need to be directly
+ * accessibly by the kernel or hardware. It is typically used by hardware
+ * for buffers that are mapped to userspace (e.g. graphics) that hardware
+ * still must DMA to. cpuset limits are enforced for these allocations.
+ *
+ * GFP_DMA exists for historical reasons and should be avoided where possible.
+ * The flags indicates that the caller requires that the lowest zone be
+ * used (ZONE_DMA or 16M on x86-64). Ideally, this would be removed but
+ * it would require careful auditing as some users really require it and
+ * others use the flag to avoid lowmem reserves in ZONE_DMA and treat the
+ * lowest zone as a type of emergency reserve.
+ *
+ * GFP_DMA32 is similar to GFP_DMA except that the caller requires a 32-bit
+ * address.
+ *
+ * GFP_HIGHUSER is for userspace allocations that may be mapped to userspace,
+ * do not need to be directly accessible by the kernel but that cannot
+ * move once in use. An example may be a hardware allocation that maps
+ * data directly into userspace but has no addressing limitations.
+ *
+ * GFP_HIGHUSER_MOVABLE is for userspace allocations that the kernel does not
+ * need direct access to but can use kmap() when access is required. They
+ * are expected to be movable via page reclaim or page migration. Typically,
+ * pages on the LRU would also be allocated with GFP_HIGHUSER_MOVABLE.
+ *
+ * GFP_TRANSHUGE is used for THP allocations. They are compound allocations
+ * that will fail quickly if memory is not available and will not wake
+ * kswapd on failure.
+ */
+#define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM)
+#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)
+#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM)
+#define GFP_NOIO (__GFP_RECLAIM)
+#define GFP_NOFS (__GFP_RECLAIM | __GFP_IO)
+#define GFP_TEMPORARY (__GFP_RECLAIM | __GFP_IO | __GFP_FS | \
__GFP_RECLAIMABLE)
-#define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
+#define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
+#define GFP_DMA __GFP_DMA
+#define GFP_DMA32 __GFP_DMA32
#define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM)
#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE)
-#define GFP_IOFS (__GFP_IO | __GFP_FS)
-#define GFP_TRANSHUGE (GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
- __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \
- __GFP_NO_KSWAPD)
+#define GFP_TRANSHUGE ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
+ __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN) & \
+ ~__GFP_KSWAPD_RECLAIM)
-/* This mask makes up all the page movable related flags */
+/* Convert GFP flags to their corresponding migrate type */
#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE)
+#define GFP_MOVABLE_SHIFT 3
-/* Control page allocator reclaim behavior */
-#define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\
- __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\
- __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC)
-
-/* Control slab gfp mask during early boot */
-#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS))
-
-/* Control allocation constraints */
-#define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
-
-/* Do not use these with a slab allocator */
-#define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
-
-/* Flag - indicates that the buffer will be suitable for DMA. Ignored on some
- platforms, used as appropriate on others */
-
-#define GFP_DMA __GFP_DMA
-
-/* 4GB DMA on some platforms */
-#define GFP_DMA32 __GFP_DMA32
-
-/* Convert GFP flags to their corresponding migrate type */
static inline int gfpflags_to_migratetype(const gfp_t gfp_flags)
{
- WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
+ VM_WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
+ BUILD_BUG_ON((1UL << GFP_MOVABLE_SHIFT) != ___GFP_MOVABLE);
+ BUILD_BUG_ON((___GFP_MOVABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_MOVABLE);
if (unlikely(page_group_by_mobility_disabled))
return MIGRATE_UNMOVABLE;
/* Group based on mobility */
- return (((gfp_flags & __GFP_MOVABLE) != 0) << 1) |
- ((gfp_flags & __GFP_RECLAIMABLE) != 0);
+ return (gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT;
+}
+#undef GFP_MOVABLE_MASK
+#undef GFP_MOVABLE_SHIFT
+
+static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
+{
+ return (bool __force)(gfp_flags & __GFP_DIRECT_RECLAIM);
}
#ifdef CONFIG_HIGHMEM
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 251a1d382e23..75b66eccc692 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -168,6 +168,8 @@ struct hid_item {
#define HID_UP_MSVENDOR 0xff000000
#define HID_UP_CUSTOM 0x00ff0000
#define HID_UP_LOGIVENDOR 0xffbc0000
+#define HID_UP_LOGIVENDOR2 0xff090000
+#define HID_UP_LOGIVENDOR3 0xff430000
#define HID_UP_LNVENDOR 0xffa00000
#define HID_UP_SENSOR 0x00200000
@@ -563,6 +565,9 @@ struct hid_device { /* device report descriptor */
wait_queue_head_t debug_wait;
};
+#define to_hid_device(pdev) \
+ container_of(pdev, struct hid_device, dev)
+
static inline void *hid_get_drvdata(struct hid_device *hdev)
{
return dev_get_drvdata(&hdev->dev);
@@ -712,6 +717,9 @@ struct hid_driver {
struct device_driver driver;
};
+#define to_hid_driver(pdrv) \
+ container_of(pdrv, struct hid_driver, driver)
+
/**
* hid_ll_driver - low level driver callbacks
* @start: called on probe to start the device
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 6aefcd0031a6..bb3f3297062a 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -78,7 +78,6 @@ static inline void __kunmap_atomic(void *addr)
}
#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
-#define kmap_atomic_to_page(ptr) virt_to_page(ptr)
#define kmap_flush_unused() do {} while(0)
#endif
diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h
index 7edd30515298..24154c26d469 100644
--- a/include/linux/hugetlb_cgroup.h
+++ b/include/linux/hugetlb_cgroup.h
@@ -32,7 +32,7 @@ static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
return NULL;
- return (struct hugetlb_cgroup *)page[2].lru.next;
+ return (struct hugetlb_cgroup *)page[2].private;
}
static inline
@@ -42,7 +42,7 @@ int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg)
if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
return -1;
- page[2].lru.next = (void *)h_cg;
+ page[2].private = (unsigned long)h_cg;
return 0;
}
diff --git a/include/linux/i2c-ocores.h b/include/linux/i2c-ocores.h
index 1c06b5c7c308..01edd96fe1f7 100644
--- a/include/linux/i2c-ocores.h
+++ b/include/linux/i2c-ocores.h
@@ -15,6 +15,7 @@ struct ocores_i2c_platform_data {
u32 reg_shift; /* register offset shift value */
u32 reg_io_width; /* register io read/write width */
u32 clock_khz; /* input clock in kHz */
+ bool big_endian; /* registers are big endian */
u8 num_devices; /* number of devices in the devices list */
struct i2c_board_info const *devices; /* devices connected to the bus */
};
diff --git a/include/linux/i2c/i2c-rcar.h b/include/linux/i2c/i2c-rcar.h
deleted file mode 100644
index 496f5c2b23c9..000000000000
--- a/include/linux/i2c/i2c-rcar.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef __I2C_R_CAR_H__
-#define __I2C_R_CAR_H__
-
-#include <linux/platform_device.h>
-
-struct i2c_rcar_platform_data {
- u32 bus_speed;
-};
-
-#endif /* __I2C_R_CAR_H__ */
diff --git a/include/linux/irqchip/mips-gic.h b/include/linux/irqchip/mips-gic.h
index 4e6861605050..ce824db48d64 100644
--- a/include/linux/irqchip/mips-gic.h
+++ b/include/linux/irqchip/mips-gic.h
@@ -9,6 +9,7 @@
#define __LINUX_IRQCHIP_MIPS_GIC_H
#include <linux/clocksource.h>
+#include <linux/ioport.h>
#define GIC_MAX_INTRS 256
@@ -245,6 +246,8 @@
#define GIC_SHARED_TO_HWIRQ(x) (GIC_SHARED_HWIRQ_BASE + (x))
#define GIC_HWIRQ_TO_SHARED(x) ((x) - GIC_SHARED_HWIRQ_BASE)
+#ifdef CONFIG_MIPS_GIC
+
extern unsigned int gic_present;
extern void gic_init(unsigned long gic_base_addr,
@@ -264,4 +267,18 @@ extern unsigned int plat_ipi_resched_int_xlate(unsigned int);
extern int gic_get_c0_compare_int(void);
extern int gic_get_c0_perfcount_int(void);
extern int gic_get_c0_fdc_int(void);
+extern int gic_get_usm_range(struct resource *gic_usm_res);
+
+#else /* CONFIG_MIPS_GIC */
+
+#define gic_present 0
+
+static inline int gic_get_usm_range(struct resource *gic_usm_res)
+{
+ /* Shouldn't be called. */
+ return -1;
+}
+
+#endif /* CONFIG_MIPS_GIC */
+
#endif /* __LINUX_IRQCHIP_MIPS_GIC_H */
diff --git a/include/linux/kdev_t.h b/include/linux/kdev_t.h
index c838abe3ee0a..052c7b32cc91 100644
--- a/include/linux/kdev_t.h
+++ b/include/linux/kdev_t.h
@@ -20,7 +20,7 @@
})
/* acceptable for old filesystems */
-static inline int old_valid_dev(dev_t dev)
+static inline bool old_valid_dev(dev_t dev)
{
return MAJOR(dev) < 256 && MINOR(dev) < 256;
}
@@ -35,7 +35,7 @@ static inline dev_t old_decode_dev(u16 val)
return MKDEV((val >> 8) & 255, val & 255);
}
-static inline int new_valid_dev(dev_t dev)
+static inline bool new_valid_dev(dev_t dev)
{
return 1;
}
@@ -54,11 +54,6 @@ static inline dev_t new_decode_dev(u32 dev)
return MKDEV(major, minor);
}
-static inline int huge_valid_dev(dev_t dev)
-{
- return 1;
-}
-
static inline u64 huge_encode_dev(dev_t dev)
{
return new_encode_dev(dev);
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 5582410727cb..350dfb08aee3 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -200,28 +200,28 @@ extern int _cond_resched(void);
#define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0)
-/*
- * abs() handles unsigned and signed longs, ints, shorts and chars. For all
- * input types abs() returns a signed long.
- * abs() should not be used for 64-bit types (s64, u64, long long) - use abs64()
- * for those.
+/**
+ * abs - return absolute value of an argument
+ * @x: the value. If it is unsigned type, it is converted to signed type first
+ * (s64, long or int depending on its size).
+ *
+ * Return: an absolute value of x. If x is 64-bit, macro's return type is s64,
+ * otherwise it is signed long.
*/
-#define abs(x) ({ \
- long ret; \
- if (sizeof(x) == sizeof(long)) { \
- long __x = (x); \
- ret = (__x < 0) ? -__x : __x; \
- } else { \
- int __x = (x); \
- ret = (__x < 0) ? -__x : __x; \
- } \
- ret; \
- })
-
-#define abs64(x) ({ \
- s64 __x = (x); \
- (__x < 0) ? -__x : __x; \
- })
+#define abs(x) __builtin_choose_expr(sizeof(x) == sizeof(s64), ({ \
+ s64 __x = (x); \
+ (__x < 0) ? -__x : __x; \
+ }), ({ \
+ long ret; \
+ if (sizeof(x) == sizeof(long)) { \
+ long __x = (x); \
+ ret = (__x < 0) ? -__x : __x; \
+ } else { \
+ int __x = (x); \
+ ret = (__x < 0) ? -__x : __x; \
+ } \
+ ret; \
+ }))
/**
* reciprocal_scale - "scale" a value into range [0, ep_ro)
@@ -413,6 +413,8 @@ extern __printf(2, 3)
char *kasprintf(gfp_t gfp, const char *fmt, ...);
extern __printf(2, 0)
char *kvasprintf(gfp_t gfp, const char *fmt, va_list args);
+extern __printf(2, 0)
+const char *kvasprintf_const(gfp_t gfp, const char *fmt, va_list args);
extern __scanf(2, 3)
int sscanf(const char *, const char *, ...);
diff --git a/include/linux/kref.h b/include/linux/kref.h
index 484604d184be..e15828fd71f1 100644
--- a/include/linux/kref.h
+++ b/include/linux/kref.h
@@ -19,7 +19,6 @@
#include <linux/atomic.h>
#include <linux/kernel.h>
#include <linux/mutex.h>
-#include <linux/spinlock.h>
struct kref {
atomic_t refcount;
@@ -99,38 +98,6 @@ static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref)
return kref_sub(kref, 1, release);
}
-/**
- * kref_put_spinlock_irqsave - decrement refcount for object.
- * @kref: object.
- * @release: pointer to the function that will clean up the object when the
- * last reference to the object is released.
- * This pointer is required, and it is not acceptable to pass kfree
- * in as this function.
- * @lock: lock to take in release case
- *
- * Behaves identical to kref_put with one exception. If the reference count
- * drops to zero, the lock will be taken atomically wrt dropping the reference
- * count. The release function has to call spin_unlock() without _irqrestore.
- */
-static inline int kref_put_spinlock_irqsave(struct kref *kref,
- void (*release)(struct kref *kref),
- spinlock_t *lock)
-{
- unsigned long flags;
-
- WARN_ON(release == NULL);
- if (atomic_add_unless(&kref->refcount, -1, 1))
- return 0;
- spin_lock_irqsave(lock, flags);
- if (atomic_dec_and_test(&kref->refcount)) {
- release(kref);
- local_irq_restore(flags);
- return 1;
- }
- spin_unlock_irqrestore(lock, flags);
- return 0;
-}
-
static inline int kref_put_mutex(struct kref *kref,
void (*release)(struct kref *kref),
struct mutex *lock)
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 242a6d2b53ff..c923350ca20a 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -460,6 +460,17 @@ static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
(vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
idx++)
+static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
+{
+ struct kvm_vcpu *vcpu;
+ int i;
+
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ if (vcpu->vcpu_id == id)
+ return vcpu;
+ return NULL;
+}
+
#define kvm_for_each_memslot(memslot, slots) \
for (memslot = &slots->memslots[0]; \
memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
@@ -1183,4 +1194,5 @@ void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *);
int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
uint32_t guest_irq, bool set);
#endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */
+
#endif
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index 5ebd70d12f35..3db5552b17d5 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -58,7 +58,6 @@ enum {
struct nvm_id_group {
u8 mtype;
u8 fmtype;
- u16 res16;
u8 num_ch;
u8 num_lun;
u8 num_pln;
@@ -74,9 +73,9 @@ struct nvm_id_group {
u32 tbet;
u32 tbem;
u32 mpos;
+ u32 mccap;
u16 cpar;
- u8 res[913];
-} __packed;
+};
struct nvm_addr_format {
u8 ch_offset;
@@ -91,19 +90,15 @@ struct nvm_addr_format {
u8 pg_len;
u8 sect_offset;
u8 sect_len;
- u8 res[4];
};
struct nvm_id {
u8 ver_id;
u8 vmnt;
u8 cgrps;
- u8 res[5];
u32 cap;
u32 dom;
struct nvm_addr_format ppaf;
- u8 ppat;
- u8 resv[224];
struct nvm_id_group groups[4];
} __packed;
@@ -123,39 +118,28 @@ struct nvm_tgt_instance {
#define NVM_VERSION_MINOR 0
#define NVM_VERSION_PATCH 0
-#define NVM_SEC_BITS (8)
-#define NVM_PL_BITS (6)
-#define NVM_PG_BITS (16)
#define NVM_BLK_BITS (16)
-#define NVM_LUN_BITS (10)
+#define NVM_PG_BITS (16)
+#define NVM_SEC_BITS (8)
+#define NVM_PL_BITS (8)
+#define NVM_LUN_BITS (8)
#define NVM_CH_BITS (8)
struct ppa_addr {
+ /* Generic structure for all addresses */
union {
- /* Channel-based PPA format in nand 4x2x2x2x8x10 */
- struct {
- u64 ch : 4;
- u64 sec : 2; /* 4 sectors per page */
- u64 pl : 2; /* 4 planes per LUN */
- u64 lun : 2; /* 4 LUNs per channel */
- u64 pg : 8; /* 256 pages per block */
- u64 blk : 10;/* 1024 blocks per plane */
- u64 resved : 36;
- } chnl;
-
- /* Generic structure for all addresses */
struct {
+ u64 blk : NVM_BLK_BITS;
+ u64 pg : NVM_PG_BITS;
u64 sec : NVM_SEC_BITS;
u64 pl : NVM_PL_BITS;
- u64 pg : NVM_PG_BITS;
- u64 blk : NVM_BLK_BITS;
u64 lun : NVM_LUN_BITS;
u64 ch : NVM_CH_BITS;
} g;
u64 ppa;
};
-} __packed;
+};
struct nvm_rq {
struct nvm_tgt_instance *ins;
@@ -191,11 +175,11 @@ static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata)
struct nvm_block;
typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *);
-typedef int (nvm_bb_update_fn)(u32, void *, unsigned int, void *);
+typedef int (nvm_bb_update_fn)(struct ppa_addr, int, u8 *, void *);
typedef int (nvm_id_fn)(struct request_queue *, struct nvm_id *);
typedef int (nvm_get_l2p_tbl_fn)(struct request_queue *, u64, u32,
nvm_l2p_update_fn *, void *);
-typedef int (nvm_op_bb_tbl_fn)(struct request_queue *, int, unsigned int,
+typedef int (nvm_op_bb_tbl_fn)(struct request_queue *, struct ppa_addr, int,
nvm_bb_update_fn *, void *);
typedef int (nvm_op_set_bb_fn)(struct request_queue *, struct nvm_rq *, int);
typedef int (nvm_submit_io_fn)(struct request_queue *, struct nvm_rq *);
@@ -210,7 +194,7 @@ struct nvm_dev_ops {
nvm_id_fn *identity;
nvm_get_l2p_tbl_fn *get_l2p_tbl;
nvm_op_bb_tbl_fn *get_bb_tbl;
- nvm_op_set_bb_fn *set_bb;
+ nvm_op_set_bb_fn *set_bb_tbl;
nvm_submit_io_fn *submit_io;
nvm_erase_blk_fn *erase_block;
@@ -220,7 +204,7 @@ struct nvm_dev_ops {
nvm_dev_dma_alloc_fn *dev_dma_alloc;
nvm_dev_dma_free_fn *dev_dma_free;
- uint8_t max_phys_sect;
+ unsigned int max_phys_sect;
};
struct nvm_lun {
@@ -229,7 +213,9 @@ struct nvm_lun {
int lun_id;
int chnl_id;
+ unsigned int nr_inuse_blocks; /* Number of used blocks */
unsigned int nr_free_blocks; /* Number of unused blocks */
+ unsigned int nr_bad_blocks; /* Number of bad blocks */
struct nvm_block *blocks;
spinlock_t lock;
@@ -263,8 +249,7 @@ struct nvm_dev {
int blks_per_lun;
int sec_size;
int oob_size;
- int addr_mode;
- struct nvm_addr_format addr_format;
+ struct nvm_addr_format ppaf;
/* Calculated/Cached values. These do not reflect the actual usable
* blocks at run-time.
@@ -290,118 +275,45 @@ struct nvm_dev {
char name[DISK_NAME_LEN];
};
-/* fallback conversion */
-static struct ppa_addr __generic_to_linear_addr(struct nvm_dev *dev,
- struct ppa_addr r)
-{
- struct ppa_addr l;
-
- l.ppa = r.g.sec +
- r.g.pg * dev->sec_per_pg +
- r.g.blk * (dev->pgs_per_blk *
- dev->sec_per_pg) +
- r.g.lun * (dev->blks_per_lun *
- dev->pgs_per_blk *
- dev->sec_per_pg) +
- r.g.ch * (dev->blks_per_lun *
- dev->pgs_per_blk *
- dev->luns_per_chnl *
- dev->sec_per_pg);
-
- return l;
-}
-
-/* fallback conversion */
-static struct ppa_addr __linear_to_generic_addr(struct nvm_dev *dev,
- struct ppa_addr r)
+static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev,
+ struct ppa_addr r)
{
struct ppa_addr l;
- int secs, pgs, blks, luns;
- sector_t ppa = r.ppa;
- l.ppa = 0;
-
- div_u64_rem(ppa, dev->sec_per_pg, &secs);
- l.g.sec = secs;
-
- sector_div(ppa, dev->sec_per_pg);
- div_u64_rem(ppa, dev->sec_per_blk, &pgs);
- l.g.pg = pgs;
-
- sector_div(ppa, dev->pgs_per_blk);
- div_u64_rem(ppa, dev->blks_per_lun, &blks);
- l.g.blk = blks;
-
- sector_div(ppa, dev->blks_per_lun);
- div_u64_rem(ppa, dev->luns_per_chnl, &luns);
- l.g.lun = luns;
-
- sector_div(ppa, dev->luns_per_chnl);
- l.g.ch = ppa;
+ l.ppa = ((u64)r.g.blk) << dev->ppaf.blk_offset;
+ l.ppa |= ((u64)r.g.pg) << dev->ppaf.pg_offset;
+ l.ppa |= ((u64)r.g.sec) << dev->ppaf.sect_offset;
+ l.ppa |= ((u64)r.g.pl) << dev->ppaf.pln_offset;
+ l.ppa |= ((u64)r.g.lun) << dev->ppaf.lun_offset;
+ l.ppa |= ((u64)r.g.ch) << dev->ppaf.ch_offset;
return l;
}
-static struct ppa_addr __generic_to_chnl_addr(struct ppa_addr r)
+static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev,
+ struct ppa_addr r)
{
struct ppa_addr l;
- l.ppa = 0;
-
- l.chnl.sec = r.g.sec;
- l.chnl.pl = r.g.pl;
- l.chnl.pg = r.g.pg;
- l.chnl.blk = r.g.blk;
- l.chnl.lun = r.g.lun;
- l.chnl.ch = r.g.ch;
-
- return l;
-}
-
-static struct ppa_addr __chnl_to_generic_addr(struct ppa_addr r)
-{
- struct ppa_addr l;
-
- l.ppa = 0;
-
- l.g.sec = r.chnl.sec;
- l.g.pl = r.chnl.pl;
- l.g.pg = r.chnl.pg;
- l.g.blk = r.chnl.blk;
- l.g.lun = r.chnl.lun;
- l.g.ch = r.chnl.ch;
+ /*
+ * (r.ppa << X offset) & X len bitmask. X eq. blk, pg, etc.
+ */
+ l.g.blk = (r.ppa >> dev->ppaf.blk_offset) &
+ (((1 << dev->ppaf.blk_len) - 1));
+ l.g.pg |= (r.ppa >> dev->ppaf.pg_offset) &
+ (((1 << dev->ppaf.pg_len) - 1));
+ l.g.sec |= (r.ppa >> dev->ppaf.sect_offset) &
+ (((1 << dev->ppaf.sect_len) - 1));
+ l.g.pl |= (r.ppa >> dev->ppaf.pln_offset) &
+ (((1 << dev->ppaf.pln_len) - 1));
+ l.g.lun |= (r.ppa >> dev->ppaf.lun_offset) &
+ (((1 << dev->ppaf.lun_len) - 1));
+ l.g.ch |= (r.ppa >> dev->ppaf.ch_offset) &
+ (((1 << dev->ppaf.ch_len) - 1));
return l;
}
-static inline struct ppa_addr addr_to_generic_mode(struct nvm_dev *dev,
- struct ppa_addr gppa)
-{
- switch (dev->addr_mode) {
- case NVM_ADDRMODE_LINEAR:
- return __linear_to_generic_addr(dev, gppa);
- case NVM_ADDRMODE_CHANNEL:
- return __chnl_to_generic_addr(gppa);
- default:
- BUG();
- }
- return gppa;
-}
-
-static inline struct ppa_addr generic_to_addr_mode(struct nvm_dev *dev,
- struct ppa_addr gppa)
-{
- switch (dev->addr_mode) {
- case NVM_ADDRMODE_LINEAR:
- return __generic_to_linear_addr(dev, gppa);
- case NVM_ADDRMODE_CHANNEL:
- return __generic_to_chnl_addr(gppa);
- default:
- BUG();
- }
- return gppa;
-}
-
static inline int ppa_empty(struct ppa_addr ppa_addr)
{
return (ppa_addr.ppa == ADDR_EMPTY);
@@ -426,7 +338,7 @@ static inline struct ppa_addr block_to_ppa(struct nvm_dev *dev,
return ppa;
}
-typedef void (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *);
+typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *);
typedef sector_t (nvm_tgt_capacity_fn)(void *);
typedef int (nvm_tgt_end_io_fn)(struct nvm_rq *, int);
typedef void *(nvm_tgt_init_fn)(struct nvm_dev *, struct gendisk *, int, int);
@@ -468,7 +380,7 @@ typedef int (nvmm_end_io_fn)(struct nvm_rq *, int);
typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *,
unsigned long);
typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int);
-typedef void (nvmm_free_blocks_print_fn)(struct nvm_dev *);
+typedef void (nvmm_lun_info_print_fn)(struct nvm_dev *);
struct nvmm_type {
const char *name;
@@ -492,7 +404,7 @@ struct nvmm_type {
nvmm_get_lun_fn *get_lun;
/* Statistics */
- nvmm_free_blocks_print_fn *free_blocks_print;
+ nvmm_lun_info_print_fn *lun_info_print;
struct list_head list;
};
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index ff82a32871b5..c15373894a42 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -68,6 +68,7 @@ struct nlm_host {
struct nsm_handle *h_nsmhandle; /* NSM status handle */
char *h_addrbuf; /* address eyecatcher */
struct net *net; /* host net */
+ char nodename[UNX_MAXNODENAME + 1];
};
/*
@@ -235,7 +236,8 @@ void nlm_rebind_host(struct nlm_host *);
struct nlm_host * nlm_get_host(struct nlm_host *);
void nlm_shutdown_hosts(void);
void nlm_shutdown_hosts_net(struct net *net);
-void nlm_host_rebooted(const struct nlm_reboot *);
+void nlm_host_rebooted(const struct net *net,
+ const struct nlm_reboot *);
/*
* Host monitoring
@@ -243,11 +245,13 @@ void nlm_host_rebooted(const struct nlm_reboot *);
int nsm_monitor(const struct nlm_host *host);
void nsm_unmonitor(const struct nlm_host *host);
-struct nsm_handle *nsm_get_handle(const struct sockaddr *sap,
+struct nsm_handle *nsm_get_handle(const struct net *net,
+ const struct sockaddr *sap,
const size_t salen,
const char *hostname,
const size_t hostname_len);
-struct nsm_handle *nsm_reboot_lookup(const struct nlm_reboot *info);
+struct nsm_handle *nsm_reboot_lookup(const struct net *net,
+ const struct nlm_reboot *info);
void nsm_release(struct nsm_handle *nsm);
/*
diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h
index e6982ac3200d..a57f0dfb6db7 100644
--- a/include/linux/marvell_phy.h
+++ b/include/linux/marvell_phy.h
@@ -16,6 +16,7 @@
#define MARVELL_PHY_ID_88E1318S 0x01410e90
#define MARVELL_PHY_ID_88E1116R 0x01410e40
#define MARVELL_PHY_ID_88E1510 0x01410dd0
+#define MARVELL_PHY_ID_88E1540 0x01410eb0
#define MARVELL_PHY_ID_88E3016 0x01410e60
/* struct phy_device dev_flags definitions */
diff --git a/include/linux/math64.h b/include/linux/math64.h
index c45c089bfdac..6e8b5b270ffe 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -142,6 +142,13 @@ static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
}
#endif /* mul_u64_u32_shr */
+#ifndef mul_u64_u64_shr
+static inline u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift)
+{
+ return (u64)(((unsigned __int128)a * mul) >> shift);
+}
+#endif /* mul_u64_u64_shr */
+
#else
#ifndef mul_u64_u32_shr
@@ -161,6 +168,79 @@ static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
}
#endif /* mul_u64_u32_shr */
+#ifndef mul_u64_u64_shr
+static inline u64 mul_u64_u64_shr(u64 a, u64 b, unsigned int shift)
+{
+ union {
+ u64 ll;
+ struct {
+#ifdef __BIG_ENDIAN
+ u32 high, low;
+#else
+ u32 low, high;
+#endif
+ } l;
+ } rl, rm, rn, rh, a0, b0;
+ u64 c;
+
+ a0.ll = a;
+ b0.ll = b;
+
+ rl.ll = (u64)a0.l.low * b0.l.low;
+ rm.ll = (u64)a0.l.low * b0.l.high;
+ rn.ll = (u64)a0.l.high * b0.l.low;
+ rh.ll = (u64)a0.l.high * b0.l.high;
+
+ /*
+ * Each of these lines computes a 64-bit intermediate result into "c",
+ * starting at bits 32-95. The low 32-bits go into the result of the
+ * multiplication, the high 32-bits are carried into the next step.
+ */
+ rl.l.high = c = (u64)rl.l.high + rm.l.low + rn.l.low;
+ rh.l.low = c = (c >> 32) + rm.l.high + rn.l.high + rh.l.low;
+ rh.l.high = (c >> 32) + rh.l.high;
+
+ /*
+ * The 128-bit result of the multiplication is in rl.ll and rh.ll,
+ * shift it right and throw away the high part of the result.
+ */
+ if (shift == 0)
+ return rl.ll;
+ if (shift < 64)
+ return (rl.ll >> shift) | (rh.ll << (64 - shift));
+ return rh.ll >> (shift & 63);
+}
+#endif /* mul_u64_u64_shr */
+
#endif
+#ifndef mul_u64_u32_div
+static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
+{
+ union {
+ u64 ll;
+ struct {
+#ifdef __BIG_ENDIAN
+ u32 high, low;
+#else
+ u32 low, high;
+#endif
+ } l;
+ } u, rl, rh;
+
+ u.ll = a;
+ rl.ll = (u64)u.l.low * mul;
+ rh.ll = (u64)u.l.high * mul + rl.l.high;
+
+ /* Bits 32-63 of the result will be in rh.l.low. */
+ rl.l.high = do_div(rh.ll, divisor);
+
+ /* Bits 0-31 of the result will be in rl.l.low. */
+ do_div(rl.ll, divisor);
+
+ rl.l.high = rh.l.low;
+ return rl.ll;
+}
+#endif /* mul_u64_u32_div */
+
#endif /* _LINUX_MATH64_H */
diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h
index da72671a42fa..494682ce4bf3 100644
--- a/include/linux/mfd/cros_ec.h
+++ b/include/linux/mfd/cros_ec.h
@@ -255,5 +255,6 @@ int cros_ec_query_all(struct cros_ec_device *ec_dev);
/* sysfs stuff */
extern struct attribute_group cros_ec_attr_group;
extern struct attribute_group cros_ec_lightbar_attr_group;
+extern struct attribute_group cros_ec_vbc_attr_group;
#endif /* __LINUX_MFD_CROS_EC_H */
diff --git a/include/linux/mfd/syscon/imx7-iomuxc-gpr.h b/include/linux/mfd/syscon/imx7-iomuxc-gpr.h
new file mode 100644
index 000000000000..4585d6105d68
--- /dev/null
+++ b/include/linux/mfd/syscon/imx7-iomuxc-gpr.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2015 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_IMX7_IOMUXC_GPR_H
+#define __LINUX_IMX7_IOMUXC_GPR_H
+
+#define IOMUXC_GPR0 0x00
+#define IOMUXC_GPR1 0x04
+#define IOMUXC_GPR2 0x08
+#define IOMUXC_GPR3 0x0c
+#define IOMUXC_GPR4 0x10
+#define IOMUXC_GPR5 0x14
+#define IOMUXC_GPR6 0x18
+#define IOMUXC_GPR7 0x1c
+#define IOMUXC_GPR8 0x20
+#define IOMUXC_GPR9 0x24
+#define IOMUXC_GPR10 0x28
+#define IOMUXC_GPR11 0x2c
+#define IOMUXC_GPR12 0x30
+#define IOMUXC_GPR13 0x34
+#define IOMUXC_GPR14 0x38
+#define IOMUXC_GPR15 0x3c
+#define IOMUXC_GPR16 0x40
+#define IOMUXC_GPR17 0x44
+#define IOMUXC_GPR18 0x48
+#define IOMUXC_GPR19 0x4c
+#define IOMUXC_GPR20 0x50
+#define IOMUXC_GPR21 0x54
+#define IOMUXC_GPR22 0x58
+
+/* For imx7d iomux gpr register field define */
+#define IMX7D_GPR1_IRQ_MASK (0x1 << 12)
+#define IMX7D_GPR1_ENET1_TX_CLK_SEL_MASK (0x1 << 13)
+#define IMX7D_GPR1_ENET2_TX_CLK_SEL_MASK (0x1 << 14)
+#define IMX7D_GPR1_ENET_TX_CLK_SEL_MASK (0x3 << 13)
+#define IMX7D_GPR1_ENET1_CLK_DIR_MASK (0x1 << 17)
+#define IMX7D_GPR1_ENET2_CLK_DIR_MASK (0x1 << 18)
+#define IMX7D_GPR1_ENET_CLK_DIR_MASK (0x3 << 17)
+
+#define IMX7D_GPR5_CSI_MUX_CONTROL_MIPI (0x1 << 4)
+
+#endif /* __LINUX_IMX7_IOMUXC_GPR_H */
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 5a8677bafe04..7501626ab529 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -214,6 +214,8 @@ enum {
MLX4_DEV_CAP_FLAG2_IGNORE_FCS = 1LL << 28,
MLX4_DEV_CAP_FLAG2_PHV_EN = 1LL << 29,
MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN = 1LL << 30,
+ MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB = 1ULL << 31,
+ MLX4_DEV_CAP_FLAG2_LB_SRC_CHK = 1ULL << 32,
};
enum {
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index de45a51b3f04..fe052e234906 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -135,7 +135,10 @@ struct mlx4_rss_context {
struct mlx4_qp_path {
u8 fl;
- u8 vlan_control;
+ union {
+ u8 vlan_control;
+ u8 control;
+ };
u8 disable_pkey_check;
u8 pkey_index;
u8 counter_index;
@@ -156,9 +159,16 @@ struct mlx4_qp_path {
};
enum { /* fl */
- MLX4_FL_CV = 1 << 6,
- MLX4_FL_ETH_HIDE_CQE_VLAN = 1 << 2
+ MLX4_FL_CV = 1 << 6,
+ MLX4_FL_ETH_HIDE_CQE_VLAN = 1 << 2,
+ MLX4_FL_ETH_SRC_CHECK_MC_LB = 1 << 1,
+ MLX4_FL_ETH_SRC_CHECK_UC_LB = 1 << 0,
};
+
+enum { /* control */
+ MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER = 1 << 7,
+};
+
enum { /* vlan_control */
MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED = 1 << 6,
MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED = 1 << 5, /* 802.1p priority tag */
@@ -254,6 +264,8 @@ enum {
MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE = 14 + 32,
MLX4_UPD_QP_PATH_MASK_IF_COUNTER_INDEX = 15 + 32,
MLX4_UPD_QP_PATH_MASK_FVL_RX = 16 + 32,
+ MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_UC_LB = 18 + 32,
+ MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB = 19 + 32,
};
enum { /* param3 */
@@ -436,11 +448,13 @@ enum mlx4_update_qp_attr {
MLX4_UPDATE_QP_VSD = 1 << 1,
MLX4_UPDATE_QP_RATE_LIMIT = 1 << 2,
MLX4_UPDATE_QP_QOS_VPORT = 1 << 3,
- MLX4_UPDATE_QP_SUPPORTED_ATTRS = (1 << 4) - 1
+ MLX4_UPDATE_QP_ETH_SRC_CHECK_MC_LB = 1 << 4,
+ MLX4_UPDATE_QP_SUPPORTED_ATTRS = (1 << 5) - 1
};
enum mlx4_update_qp_params_flags {
- MLX4_UPDATE_QP_PARAMS_FLAGS_VSD_ENABLE = 1 << 0,
+ MLX4_UPDATE_QP_PARAMS_FLAGS_ETH_CHECK_MC_LB = 1 << 0,
+ MLX4_UPDATE_QP_PARAMS_FLAGS_VSD_ENABLE = 1 << 1,
};
struct mlx4_update_qp_params {
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index dd2097455a2e..1565324eb620 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -453,26 +453,28 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
u8 lro_cap[0x1];
u8 lro_psh_flag[0x1];
u8 lro_time_stamp[0x1];
- u8 reserved_0[0x6];
+ u8 reserved_0[0x3];
+ u8 self_lb_en_modifiable[0x1];
+ u8 reserved_1[0x2];
u8 max_lso_cap[0x5];
- u8 reserved_1[0x4];
+ u8 reserved_2[0x4];
u8 rss_ind_tbl_cap[0x4];
- u8 reserved_2[0x3];
+ u8 reserved_3[0x3];
u8 tunnel_lso_const_out_ip_id[0x1];
- u8 reserved_3[0x2];
+ u8 reserved_4[0x2];
u8 tunnel_statless_gre[0x1];
u8 tunnel_stateless_vxlan[0x1];
- u8 reserved_4[0x20];
+ u8 reserved_5[0x20];
- u8 reserved_5[0x10];
+ u8 reserved_6[0x10];
u8 lro_min_mss_size[0x10];
- u8 reserved_6[0x120];
+ u8 reserved_7[0x120];
u8 lro_timer_supported_periods[4][0x20];
- u8 reserved_7[0x600];
+ u8 reserved_8[0x600];
};
struct mlx5_ifc_roce_cap_bits {
@@ -4051,9 +4053,11 @@ struct mlx5_ifc_modify_tis_in_bits {
};
struct mlx5_ifc_modify_tir_bitmask_bits {
- u8 reserved[0x20];
+ u8 reserved_0[0x20];
- u8 reserved1[0x1f];
+ u8 reserved_1[0x1b];
+ u8 self_lb_en[0x1];
+ u8 reserved_2[0x3];
u8 lro[0x1];
};
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 906c46a05707..00bad7793788 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -430,46 +430,6 @@ static inline void compound_unlock_irqrestore(struct page *page,
#endif
}
-static inline struct page *compound_head_by_tail(struct page *tail)
-{
- struct page *head = tail->first_page;
-
- /*
- * page->first_page may be a dangling pointer to an old
- * compound page, so recheck that it is still a tail
- * page before returning.
- */
- smp_rmb();
- if (likely(PageTail(tail)))
- return head;
- return tail;
-}
-
-/*
- * Since either compound page could be dismantled asynchronously in THP
- * or we access asynchronously arbitrary positioned struct page, there
- * would be tail flag race. To handle this race, we should call
- * smp_rmb() before checking tail flag. compound_head_by_tail() did it.
- */
-static inline struct page *compound_head(struct page *page)
-{
- if (unlikely(PageTail(page)))
- return compound_head_by_tail(page);
- return page;
-}
-
-/*
- * If we access compound page synchronously such as access to
- * allocated page, there is no need to handle tail flag race, so we can
- * check tail flag directly without any synchronization primitive.
- */
-static inline struct page *compound_head_fast(struct page *page)
-{
- if (unlikely(PageTail(page)))
- return page->first_page;
- return page;
-}
-
/*
* The atomic page->_mapcount, starts from -1: so that transitions
* both from it and to it can be tracked, using atomic_inc_and_test
@@ -518,7 +478,7 @@ static inline void get_huge_page_tail(struct page *page)
VM_BUG_ON_PAGE(!PageTail(page), page);
VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
VM_BUG_ON_PAGE(atomic_read(&page->_count) != 0, page);
- if (compound_tail_refcounted(page->first_page))
+ if (compound_tail_refcounted(compound_head(page)))
atomic_inc(&page->_mapcount);
}
@@ -541,13 +501,7 @@ static inline struct page *virt_to_head_page(const void *x)
{
struct page *page = virt_to_page(x);
- /*
- * We don't need to worry about synchronization of tail flag
- * when we call virt_to_head_page() since it is only called for
- * already allocated page and this page won't be freed until
- * this virt_to_head_page() is finished. So use _fast variant.
- */
- return compound_head_fast(page);
+ return compound_head(page);
}
/*
@@ -568,28 +522,42 @@ int split_free_page(struct page *page);
/*
* Compound pages have a destructor function. Provide a
* prototype for that function and accessor functions.
- * These are _only_ valid on the head of a PG_compound page.
+ * These are _only_ valid on the head of a compound page.
*/
+typedef void compound_page_dtor(struct page *);
+
+/* Keep the enum in sync with compound_page_dtors array in mm/page_alloc.c */
+enum compound_dtor_id {
+ NULL_COMPOUND_DTOR,
+ COMPOUND_PAGE_DTOR,
+#ifdef CONFIG_HUGETLB_PAGE
+ HUGETLB_PAGE_DTOR,
+#endif
+ NR_COMPOUND_DTORS,
+};
+extern compound_page_dtor * const compound_page_dtors[];
static inline void set_compound_page_dtor(struct page *page,
- compound_page_dtor *dtor)
+ enum compound_dtor_id compound_dtor)
{
- page[1].compound_dtor = dtor;
+ VM_BUG_ON_PAGE(compound_dtor >= NR_COMPOUND_DTORS, page);
+ page[1].compound_dtor = compound_dtor;
}
static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
{
- return page[1].compound_dtor;
+ VM_BUG_ON_PAGE(page[1].compound_dtor >= NR_COMPOUND_DTORS, page);
+ return compound_page_dtors[page[1].compound_dtor];
}
-static inline int compound_order(struct page *page)
+static inline unsigned int compound_order(struct page *page)
{
if (!PageHead(page))
return 0;
return page[1].compound_order;
}
-static inline void set_compound_order(struct page *page, unsigned long order)
+static inline void set_compound_order(struct page *page, unsigned int order)
{
page[1].compound_order = order;
}
@@ -1572,8 +1540,7 @@ static inline bool ptlock_init(struct page *page)
* with 0. Make sure nobody took it in use in between.
*
* It can happen if arch try to use slab for page table allocation:
- * slab code uses page->slab_cache and page->first_page (for tail
- * pages), which share storage with page->ptl.
+ * slab code uses page->slab_cache, which share storage with page->ptl.
*/
VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page);
if (!ptlock_alloc(page))
@@ -1843,7 +1810,8 @@ extern void si_meminfo(struct sysinfo * val);
extern void si_meminfo_node(struct sysinfo *val, int nid);
extern __printf(3, 4)
-void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...);
+void warn_alloc_failed(gfp_t gfp_mask, unsigned int order,
+ const char *fmt, ...);
extern void setup_per_cpu_pageset(void);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 0a85da25a822..f8d1492a114f 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -28,8 +28,6 @@ struct mem_cgroup;
IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK))
#define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8)
-typedef void compound_page_dtor(struct page *);
-
/*
* Each physical page in the system has a struct page associated with
* it to keep track of whatever it is we are using the page for at the
@@ -113,7 +111,13 @@ struct page {
};
};
- /* Third double word block */
+ /*
+ * Third double word block
+ *
+ * WARNING: bit 0 of the first word encode PageTail(). That means
+ * the rest users of the storage space MUST NOT use the bit to
+ * avoid collision and false-positive PageTail().
+ */
union {
struct list_head lru; /* Pageout list, eg. active_list
* protected by zone->lru_lock !
@@ -131,18 +135,37 @@ struct page {
#endif
};
- struct slab *slab_page; /* slab fields */
struct rcu_head rcu_head; /* Used by SLAB
* when destroying via RCU
*/
- /* First tail page of compound page */
+ /* Tail pages of compound page */
struct {
- compound_page_dtor *compound_dtor;
- unsigned long compound_order;
+ unsigned long compound_head; /* If bit zero is set */
+
+ /* First tail page only */
+#ifdef CONFIG_64BIT
+ /*
+ * On 64 bit system we have enough space in struct page
+ * to encode compound_dtor and compound_order with
+ * unsigned int. It can help compiler generate better or
+ * smaller code on some archtectures.
+ */
+ unsigned int compound_dtor;
+ unsigned int compound_order;
+#else
+ unsigned short int compound_dtor;
+ unsigned short int compound_order;
+#endif
};
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS
- pgtable_t pmd_huge_pte; /* protected by page->ptl */
+ struct {
+ unsigned long __pad; /* do not overlay pmd_huge_pte
+ * with compound_head to avoid
+ * possible bit 0 collision.
+ */
+ pgtable_t pmd_huge_pte; /* protected by page->ptl */
+ };
#endif
};
@@ -163,7 +186,6 @@ struct page {
#endif
#endif
struct kmem_cache *slab_cache; /* SL[AU]B: Pointer to slab */
- struct page *first_page; /* Compound tail pages */
};
#ifdef CONFIG_MEMCG
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 2d7e660cdefe..e23a9e704536 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -37,10 +37,10 @@
enum {
MIGRATE_UNMOVABLE,
- MIGRATE_RECLAIMABLE,
MIGRATE_MOVABLE,
+ MIGRATE_RECLAIMABLE,
MIGRATE_PCPTYPES, /* the number of types on the pcp lists */
- MIGRATE_RESERVE = MIGRATE_PCPTYPES,
+ MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES,
#ifdef CONFIG_CMA
/*
* MIGRATE_CMA migration type is designed to mimic the way
@@ -334,13 +334,16 @@ struct zone {
/* zone watermarks, access with *_wmark_pages(zone) macros */
unsigned long watermark[NR_WMARK];
+ unsigned long nr_reserved_highatomic;
+
/*
- * We don't know if the memory that we're going to allocate will be freeable
- * or/and it will be released eventually, so to avoid totally wasting several
- * GB of ram we must reserve some of the lower zone memory (otherwise we risk
- * to run OOM on the lower zones despite there's tons of freeable ram
- * on the higher zones). This array is recalculated at runtime if the
- * sysctl_lowmem_reserve_ratio sysctl changes.
+ * We don't know if the memory that we're going to allocate will be
+ * freeable or/and it will be released eventually, so to avoid totally
+ * wasting several GB of ram we must reserve some of the lower zone
+ * memory (otherwise we risk to run OOM on the lower zones despite
+ * there being tons of freeable ram on the higher zones). This array is
+ * recalculated at runtime if the sysctl_lowmem_reserve_ratio sysctl
+ * changes.
*/
long lowmem_reserve[MAX_NR_ZONES];
@@ -429,12 +432,6 @@ struct zone {
const char *name;
- /*
- * Number of MIGRATE_RESERVE page block. To maintain for just
- * optimization. Protected by zone->lock.
- */
- int nr_migrate_reserve_block;
-
#ifdef CONFIG_MEMORY_ISOLATION
/*
* Number of isolated pageblock. It is used to solve incorrect
@@ -589,75 +586,8 @@ static inline bool zone_is_empty(struct zone *zone)
* [1] : No fallback (__GFP_THISNODE)
*/
#define MAX_ZONELISTS 2
-
-
-/*
- * We cache key information from each zonelist for smaller cache
- * footprint when scanning for free pages in get_page_from_freelist().
- *
- * 1) The BITMAP fullzones tracks which zones in a zonelist have come
- * up short of free memory since the last time (last_fullzone_zap)
- * we zero'd fullzones.
- * 2) The array z_to_n[] maps each zone in the zonelist to its node
- * id, so that we can efficiently evaluate whether that node is
- * set in the current tasks mems_allowed.
- *
- * Both fullzones and z_to_n[] are one-to-one with the zonelist,
- * indexed by a zones offset in the zonelist zones[] array.
- *
- * The get_page_from_freelist() routine does two scans. During the
- * first scan, we skip zones whose corresponding bit in 'fullzones'
- * is set or whose corresponding node in current->mems_allowed (which
- * comes from cpusets) is not set. During the second scan, we bypass
- * this zonelist_cache, to ensure we look methodically at each zone.
- *
- * Once per second, we zero out (zap) fullzones, forcing us to
- * reconsider nodes that might have regained more free memory.
- * The field last_full_zap is the time we last zapped fullzones.
- *
- * This mechanism reduces the amount of time we waste repeatedly
- * reexaming zones for free memory when they just came up low on
- * memory momentarilly ago.
- *
- * The zonelist_cache struct members logically belong in struct
- * zonelist. However, the mempolicy zonelists constructed for
- * MPOL_BIND are intentionally variable length (and usually much
- * shorter). A general purpose mechanism for handling structs with
- * multiple variable length members is more mechanism than we want
- * here. We resort to some special case hackery instead.
- *
- * The MPOL_BIND zonelists don't need this zonelist_cache (in good
- * part because they are shorter), so we put the fixed length stuff
- * at the front of the zonelist struct, ending in a variable length
- * zones[], as is needed by MPOL_BIND.
- *
- * Then we put the optional zonelist cache on the end of the zonelist
- * struct. This optional stuff is found by a 'zlcache_ptr' pointer in
- * the fixed length portion at the front of the struct. This pointer
- * both enables us to find the zonelist cache, and in the case of
- * MPOL_BIND zonelists, (which will just set the zlcache_ptr to NULL)
- * to know that the zonelist cache is not there.
- *
- * The end result is that struct zonelists come in two flavors:
- * 1) The full, fixed length version, shown below, and
- * 2) The custom zonelists for MPOL_BIND.
- * The custom MPOL_BIND zonelists have a NULL zlcache_ptr and no zlcache.
- *
- * Even though there may be multiple CPU cores on a node modifying
- * fullzones or last_full_zap in the same zonelist_cache at the same
- * time, we don't lock it. This is just hint data - if it is wrong now
- * and then, the allocator will still function, perhaps a bit slower.
- */
-
-
-struct zonelist_cache {
- unsigned short z_to_n[MAX_ZONES_PER_ZONELIST]; /* zone->nid */
- DECLARE_BITMAP(fullzones, MAX_ZONES_PER_ZONELIST); /* zone full? */
- unsigned long last_full_zap; /* when last zap'd (jiffies) */
-};
#else
#define MAX_ZONELISTS 1
-struct zonelist_cache;
#endif
/*
@@ -675,9 +605,6 @@ struct zoneref {
* allocation, the other zones are fallback zones, in decreasing
* priority.
*
- * If zlcache_ptr is not NULL, then it is just the address of zlcache,
- * as explained above. If zlcache_ptr is NULL, there is no zlcache.
- * *
* To speed the reading of the zonelist, the zonerefs contain the zone index
* of the entry being read. Helper functions to access information given
* a struct zoneref are
@@ -687,11 +614,7 @@ struct zoneref {
* zonelist_node_idx() - Return the index of the node for an entry
*/
struct zonelist {
- struct zonelist_cache *zlcache_ptr; // NULL or &zlcache
struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1];
-#ifdef CONFIG_NUMA
- struct zonelist_cache zlcache; // optional ...
-#endif
};
#ifndef CONFIG_DISCONTIGMEM
@@ -817,7 +740,7 @@ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx);
bool zone_watermark_ok(struct zone *z, unsigned int order,
unsigned long mark, int classzone_idx, int alloc_flags);
bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
- unsigned long mark, int classzone_idx, int alloc_flags);
+ unsigned long mark, int classzone_idx);
enum memmap_context {
MEMMAP_EARLY,
MEMMAP_HOTPLUG,
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index c12f2147c350..52666d90ca94 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -386,6 +386,7 @@ extern int param_get_ullong(char *buffer, const struct kernel_param *kp);
extern const struct kernel_param_ops param_ops_charp;
extern int param_set_charp(const char *val, const struct kernel_param *kp);
extern int param_get_charp(char *buffer, const struct kernel_param *kp);
+extern void param_free_charp(void *arg);
#define param_check_charp(name, p) __param_check(name, p, char *)
/* We used to allow int as well as bool. We're taking that away! */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index a9e3bf42d287..67bfac1abfc1 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1322,6 +1322,7 @@ enum netdev_priv_flags {
#define IFF_L3MDEV_MASTER IFF_L3MDEV_MASTER
#define IFF_NO_QUEUE IFF_NO_QUEUE
#define IFF_OPENVSWITCH IFF_OPENVSWITCH
+#define IFF_L3MDEV_SLAVE IFF_L3MDEV_SLAVE
/**
* struct net_device - The DEVICE structure.
@@ -2067,20 +2068,23 @@ struct pcpu_sw_netstats {
struct u64_stats_sync syncp;
};
-#define netdev_alloc_pcpu_stats(type) \
-({ \
- typeof(type) __percpu *pcpu_stats = alloc_percpu(type); \
- if (pcpu_stats) { \
- int __cpu; \
- for_each_possible_cpu(__cpu) { \
- typeof(type) *stat; \
- stat = per_cpu_ptr(pcpu_stats, __cpu); \
- u64_stats_init(&stat->syncp); \
- } \
- } \
- pcpu_stats; \
+#define __netdev_alloc_pcpu_stats(type, gfp) \
+({ \
+ typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\
+ if (pcpu_stats) { \
+ int __cpu; \
+ for_each_possible_cpu(__cpu) { \
+ typeof(type) *stat; \
+ stat = per_cpu_ptr(pcpu_stats, __cpu); \
+ u64_stats_init(&stat->syncp); \
+ } \
+ } \
+ pcpu_stats; \
})
+#define netdev_alloc_pcpu_stats(type) \
+ __netdev_alloc_pcpu_stats(type, GFP_KERNEL);
+
#include <linux/notifier.h>
/* netdevice notifier chain. Please remember to update the rtnetlink
@@ -3853,6 +3857,11 @@ static inline bool netif_is_bridge_master(const struct net_device *dev)
return dev->priv_flags & IFF_EBRIDGE;
}
+static inline bool netif_is_bridge_port(const struct net_device *dev)
+{
+ return dev->priv_flags & IFF_BRIDGE_PORT;
+}
+
static inline bool netif_is_ovs_master(const struct net_device *dev)
{
return dev->priv_flags & IFF_OPENVSWITCH;
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index 48bb01edcf30..0e1f433cc4b7 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -421,7 +421,7 @@ extern void ip_set_free(void *members);
extern int ip_set_get_ipaddr4(struct nlattr *nla, __be32 *ipaddr);
extern int ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr);
extern size_t ip_set_elem_len(struct ip_set *set, struct nlattr *tb[],
- size_t len);
+ size_t len, size_t align);
extern int ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[],
struct ip_set_ext *ext);
diff --git a/include/linux/netfilter_ingress.h b/include/linux/netfilter_ingress.h
index 187feabe557c..5fcd375ef175 100644
--- a/include/linux/netfilter_ingress.h
+++ b/include/linux/netfilter_ingress.h
@@ -5,10 +5,13 @@
#include <linux/netdevice.h>
#ifdef CONFIG_NETFILTER_INGRESS
-static inline int nf_hook_ingress_active(struct sk_buff *skb)
+static inline bool nf_hook_ingress_active(const struct sk_buff *skb)
{
- return nf_hook_list_active(&skb->dev->nf_hooks_ingress,
- NFPROTO_NETDEV, NF_NETDEV_INGRESS);
+#ifdef HAVE_JUMP_LABEL
+ if (!static_key_false(&nf_hooks_needed[NFPROTO_NETDEV][NF_NETDEV_INGRESS]))
+ return false;
+#endif
+ return !list_empty(&skb->dev->nf_hooks_ingress);
}
static inline int nf_hook_ingress(struct sk_buff *skb)
@@ -16,8 +19,8 @@ static inline int nf_hook_ingress(struct sk_buff *skb)
struct nf_hook_state state;
nf_hook_state_init(&state, &skb->dev->nf_hooks_ingress,
- NF_NETDEV_INGRESS, INT_MIN, NFPROTO_NETDEV, NULL,
- skb->dev, NULL, dev_net(skb->dev), NULL);
+ NF_NETDEV_INGRESS, INT_MIN, NFPROTO_NETDEV,
+ skb->dev, NULL, NULL, dev_net(skb->dev), NULL);
return nf_hook_slow(skb, &state);
}
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 00121f298269..e7e78537aea2 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -130,6 +130,7 @@ enum nfs_opnum4 {
OP_READ_PLUS = 68,
OP_SEEK = 69,
OP_WRITE_SAME = 70,
+ OP_CLONE = 71,
OP_ILLEGAL = 10044,
};
@@ -421,6 +422,7 @@ enum lock_type4 {
#define FATTR4_WORD2_LAYOUT_TYPES (1UL << 0)
#define FATTR4_WORD2_LAYOUT_BLKSIZE (1UL << 1)
#define FATTR4_WORD2_MDSTHRESHOLD (1UL << 4)
+#define FATTR4_WORD2_CLONE_BLKSIZE (1UL << 13)
#define FATTR4_WORD2_SECURITY_LABEL (1UL << 16)
/* MDS threshold bitmap bits */
@@ -501,6 +503,7 @@ enum {
NFSPROC4_CLNT_ALLOCATE,
NFSPROC4_CLNT_DEALLOCATE,
NFSPROC4_CLNT_LAYOUTSTATS,
+ NFSPROC4_CLNT_CLONE,
};
/* nfs41 types */
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 570a7df2775b..2469ab0bb3a1 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -147,6 +147,7 @@ struct nfs_server {
unsigned int acdirmax;
unsigned int namelen;
unsigned int options; /* extra options enabled by mount */
+ unsigned int clone_blksize; /* granularity of a CLONE operation */
#define NFS_OPTION_FSCACHE 0x00000001 /* - local caching enabled */
#define NFS_OPTION_MIGRATION 0x00000002 /* - NFSv4 migration enabled */
@@ -243,5 +244,6 @@ struct nfs_server {
#define NFS_CAP_ALLOCATE (1U << 20)
#define NFS_CAP_DEALLOCATE (1U << 21)
#define NFS_CAP_LAYOUTSTATS (1U << 22)
+#define NFS_CAP_CLONE (1U << 23)
#endif
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 52faf7e96c65..11bbae44f4cb 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -141,6 +141,7 @@ struct nfs_fsinfo {
__u32 lease_time; /* in seconds */
__u32 layouttype; /* supported pnfs layout driver */
__u32 blksize; /* preferred pnfs io block size */
+ __u32 clone_blksize; /* granularity of a CLONE operation */
};
struct nfs_fsstat {
@@ -250,6 +251,7 @@ struct nfs4_layoutget {
struct nfs4_layoutget_res res;
struct rpc_cred *cred;
gfp_t gfp_flags;
+ long timeout;
};
struct nfs4_getdeviceinfo_args {
@@ -359,6 +361,25 @@ struct nfs42_layoutstat_data {
struct nfs42_layoutstat_res res;
};
+struct nfs42_clone_args {
+ struct nfs4_sequence_args seq_args;
+ struct nfs_fh *src_fh;
+ struct nfs_fh *dst_fh;
+ nfs4_stateid src_stateid;
+ nfs4_stateid dst_stateid;
+ __u64 src_offset;
+ __u64 dst_offset;
+ __u64 count;
+ const u32 *dst_bitmask;
+};
+
+struct nfs42_clone_res {
+ struct nfs4_sequence_res seq_res;
+ unsigned int rpc_status;
+ struct nfs_fattr *dst_fattr;
+ const struct nfs_server *server;
+};
+
struct stateowner_id {
__u64 create_time;
__u32 uniquifier;
@@ -528,7 +549,7 @@ struct nfs4_delegreturnargs {
struct nfs4_delegreturnres {
struct nfs4_sequence_res seq_res;
struct nfs_fattr * fattr;
- const struct nfs_server *server;
+ struct nfs_server *server;
};
/*
@@ -601,7 +622,7 @@ struct nfs_removeargs {
struct nfs_removeres {
struct nfs4_sequence_res seq_res;
- const struct nfs_server *server;
+ struct nfs_server *server;
struct nfs_fattr *dir_attr;
struct nfs4_change_info cinfo;
};
@@ -619,7 +640,7 @@ struct nfs_renameargs {
struct nfs_renameres {
struct nfs4_sequence_res seq_res;
- const struct nfs_server *server;
+ struct nfs_server *server;
struct nfs4_change_info old_cinfo;
struct nfs_fattr *old_fattr;
struct nfs4_change_info new_cinfo;
@@ -685,7 +706,6 @@ struct nfs_setaclargs {
struct nfs4_sequence_args seq_args;
struct nfs_fh * fh;
size_t acl_len;
- unsigned int acl_pgbase;
struct page ** acl_pages;
};
@@ -697,7 +717,6 @@ struct nfs_getaclargs {
struct nfs4_sequence_args seq_args;
struct nfs_fh * fh;
size_t acl_len;
- unsigned int acl_pgbase;
struct page ** acl_pages;
};
diff --git a/include/linux/of.h b/include/linux/of.h
index 2194b8ca41f9..dd10626a615f 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -126,6 +126,8 @@ extern raw_spinlock_t devtree_lock;
#define OF_POPULATED 3 /* device already created for the node */
#define OF_POPULATED_BUS 4 /* of_platform_populate recursed to children of this node */
+#define OF_BAD_ADDR ((u64)-1)
+
#ifdef CONFIG_OF
void of_core_init(void);
@@ -229,8 +231,6 @@ static inline unsigned long of_read_ulong(const __be32 *cell, int size)
#define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags)
#define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags)
-#define OF_BAD_ADDR ((u64)-1)
-
static inline const char *of_node_full_name(const struct device_node *np)
{
return np ? np->full_name : "<no-node>";
diff --git a/include/linux/of_address.h b/include/linux/of_address.h
index d88e81be6368..507daad0bc8d 100644
--- a/include/linux/of_address.h
+++ b/include/linux/of_address.h
@@ -57,6 +57,13 @@ extern int of_dma_get_range(struct device_node *np, u64 *dma_addr,
u64 *paddr, u64 *size);
extern bool of_dma_is_coherent(struct device_node *np);
#else /* CONFIG_OF_ADDRESS */
+
+static inline u64 of_translate_address(struct device_node *np,
+ const __be32 *addr)
+{
+ return OF_BAD_ADDR;
+}
+
static inline struct device_node *of_find_matching_node_by_address(
struct device_node *from,
const struct of_device_id *matches,
diff --git a/include/linux/of_dma.h b/include/linux/of_dma.h
index 98ba7525929e..b90d8ec57c1f 100644
--- a/include/linux/of_dma.h
+++ b/include/linux/of_dma.h
@@ -34,7 +34,7 @@ struct of_dma_filter_info {
dma_filter_fn filter_fn;
};
-#ifdef CONFIG_OF
+#ifdef CONFIG_DMA_OF
extern int of_dma_controller_register(struct device_node *np,
struct dma_chan *(*of_dma_xlate)
(struct of_phandle_args *, struct of_dma *),
@@ -80,7 +80,7 @@ static inline int of_dma_router_register(struct device_node *np,
static inline struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
const char *name)
{
- return NULL;
+ return ERR_PTR(-ENODEV);
}
static inline struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec,
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h
index 38c0533a3359..2c51ee78b1c0 100644
--- a/include/linux/of_pci.h
+++ b/include/linux/of_pci.h
@@ -16,7 +16,6 @@ int of_pci_get_devfn(struct device_node *np);
int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin);
int of_pci_parse_bus_range(struct device_node *node, struct resource *res);
int of_get_pci_domain_nr(struct device_node *node);
-void of_pci_dma_configure(struct pci_dev *pci_dev);
void of_pci_check_probe_only(void);
#else
static inline int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq)
@@ -53,8 +52,6 @@ of_get_pci_domain_nr(struct device_node *node)
return -1;
}
-static inline void of_pci_dma_configure(struct pci_dev *pci_dev) { }
-
static inline void of_pci_check_probe_only(void) { }
#endif
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index a525e5067484..bb53c7b86315 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -86,12 +86,7 @@ enum pageflags {
PG_private, /* If pagecache, has fs-private data */
PG_private_2, /* If pagecache, has fs aux data */
PG_writeback, /* Page is under writeback */
-#ifdef CONFIG_PAGEFLAGS_EXTENDED
PG_head, /* A head page */
- PG_tail, /* A tail page */
-#else
- PG_compound, /* A compound page */
-#endif
PG_swapcache, /* Swap page: swp_entry_t in private */
PG_mappedtodisk, /* Has blocks allocated on-disk */
PG_reclaim, /* To be reclaimed asap */
@@ -398,85 +393,46 @@ static inline void set_page_writeback_keepwrite(struct page *page)
test_set_page_writeback_keepwrite(page);
}
-#ifdef CONFIG_PAGEFLAGS_EXTENDED
-/*
- * System with lots of page flags available. This allows separate
- * flags for PageHead() and PageTail() checks of compound pages so that bit
- * tests can be used in performance sensitive paths. PageCompound is
- * generally not used in hot code paths except arch/powerpc/mm/init_64.c
- * and arch/powerpc/kvm/book3s_64_vio_hv.c which use it to detect huge pages
- * and avoid handling those in real mode.
- */
__PAGEFLAG(Head, head) CLEARPAGEFLAG(Head, head)
-__PAGEFLAG(Tail, tail)
-static inline int PageCompound(struct page *page)
-{
- return page->flags & ((1L << PG_head) | (1L << PG_tail));
-
-}
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-static inline void ClearPageCompound(struct page *page)
+static inline int PageTail(struct page *page)
{
- BUG_ON(!PageHead(page));
- ClearPageHead(page);
+ return READ_ONCE(page->compound_head) & 1;
}
-#endif
-
-#define PG_head_mask ((1L << PG_head))
-#else
-/*
- * Reduce page flag use as much as possible by overlapping
- * compound page flags with the flags used for page cache pages. Possible
- * because PageCompound is always set for compound pages and not for
- * pages on the LRU and/or pagecache.
- */
-TESTPAGEFLAG(Compound, compound)
-__SETPAGEFLAG(Head, compound) __CLEARPAGEFLAG(Head, compound)
-
-/*
- * PG_reclaim is used in combination with PG_compound to mark the
- * head and tail of a compound page. This saves one page flag
- * but makes it impossible to use compound pages for the page cache.
- * The PG_reclaim bit would have to be used for reclaim or readahead
- * if compound pages enter the page cache.
- *
- * PG_compound & PG_reclaim => Tail page
- * PG_compound & ~PG_reclaim => Head page
- */
-#define PG_head_mask ((1L << PG_compound))
-#define PG_head_tail_mask ((1L << PG_compound) | (1L << PG_reclaim))
-
-static inline int PageHead(struct page *page)
+static inline void set_compound_head(struct page *page, struct page *head)
{
- return ((page->flags & PG_head_tail_mask) == PG_head_mask);
+ WRITE_ONCE(page->compound_head, (unsigned long)head + 1);
}
-static inline int PageTail(struct page *page)
+static inline void clear_compound_head(struct page *page)
{
- return ((page->flags & PG_head_tail_mask) == PG_head_tail_mask);
+ WRITE_ONCE(page->compound_head, 0);
}
-static inline void __SetPageTail(struct page *page)
+static inline struct page *compound_head(struct page *page)
{
- page->flags |= PG_head_tail_mask;
+ unsigned long head = READ_ONCE(page->compound_head);
+
+ if (unlikely(head & 1))
+ return (struct page *) (head - 1);
+ return page;
}
-static inline void __ClearPageTail(struct page *page)
+static inline int PageCompound(struct page *page)
{
- page->flags &= ~PG_head_tail_mask;
-}
+ return PageHead(page) || PageTail(page);
+}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline void ClearPageCompound(struct page *page)
{
- BUG_ON((page->flags & PG_head_tail_mask) != (1 << PG_compound));
- clear_bit(PG_compound, &page->flags);
+ BUG_ON(!PageHead(page));
+ ClearPageHead(page);
}
#endif
-#endif /* !PAGEFLAGS_EXTENDED */
+#define PG_head_mask ((1L << PG_head))
#ifdef CONFIG_HUGETLB_PAGE
int PageHuge(struct page *page);
diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h
index 2baeee12f48e..e942558b3585 100644
--- a/include/linux/pageblock-flags.h
+++ b/include/linux/pageblock-flags.h
@@ -44,7 +44,7 @@ enum pageblock_bits {
#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
/* Huge page sizes are variable */
-extern int pageblock_order;
+extern unsigned int pageblock_order;
#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index a6c78e00ea96..26eabf5ec718 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -69,6 +69,13 @@ static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
}
+/* Restricts the given gfp_mask to what the mapping allows. */
+static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
+ gfp_t gfp_mask)
+{
+ return mapping_gfp_mask(mapping) & gfp_mask;
+}
+
/*
* This is non-atomic. Only to be used before the mapping is activated.
* Probably needs a barrier...
diff --git a/include/linux/pci.h b/include/linux/pci.h
index e828e7b4afec..6ae25aae88fd 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -412,9 +412,18 @@ struct pci_host_bridge {
void (*release_fn)(struct pci_host_bridge *);
void *release_data;
unsigned int ignore_reset_delay:1; /* for entire hierarchy */
+ /* Resource alignment requirements */
+ resource_size_t (*align_resource)(struct pci_dev *dev,
+ const struct resource *res,
+ resource_size_t start,
+ resource_size_t size,
+ resource_size_t align);
};
#define to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev)
+
+struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus);
+
void pci_set_host_bridge_release(struct pci_host_bridge *bridge,
void (*release_fn)(struct pci_host_bridge *),
void *release_data);
diff --git a/include/linux/platform_data/atmel.h b/include/linux/platform_data/atmel.h
index 91b16adab0cd..3c8825b67298 100644
--- a/include/linux/platform_data/atmel.h
+++ b/include/linux/platform_data/atmel.h
@@ -9,15 +9,7 @@
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
-#include <linux/device.h>
-#include <linux/i2c.h>
-#include <linux/leds.h>
-#include <linux/spi/spi.h>
-#include <linux/usb/atmel_usba_udc.h>
-#include <linux/atmel-mci.h>
-#include <sound/atmel-ac97c.h>
#include <linux/serial.h>
-#include <linux/platform_data/macb.h>
/* Compact Flash */
struct at91_cf_data {
diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h
index 87ac14c584f2..03b6095d3b18 100644
--- a/include/linux/platform_data/dma-dw.h
+++ b/include/linux/platform_data/dma-dw.h
@@ -37,6 +37,7 @@ struct dw_dma_slave {
* @nr_channels: Number of channels supported by hardware (max 8)
* @is_private: The device channels should be marked as private and not for
* by the general purpose DMA channel allocator.
+ * @is_memcpy: The device channels do support memory-to-memory transfers.
* @chan_allocation_order: Allocate channels starting from 0 or 7
* @chan_priority: Set channel priority increasing from 0 to 7 or 7 to 0.
* @block_size: Maximum block size supported by the controller
@@ -47,6 +48,7 @@ struct dw_dma_slave {
struct dw_dma_platform_data {
unsigned int nr_channels;
bool is_private;
+ bool is_memcpy;
#define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */
#define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */
unsigned char chan_allocation_order;
diff --git a/include/linux/platform_data/edma.h b/include/linux/platform_data/edma.h
index bdb2710e2aab..e2878baeb90e 100644
--- a/include/linux/platform_data/edma.h
+++ b/include/linux/platform_data/edma.h
@@ -41,51 +41,6 @@
#ifndef EDMA_H_
#define EDMA_H_
-/* PaRAM slots are laid out like this */
-struct edmacc_param {
- u32 opt;
- u32 src;
- u32 a_b_cnt;
- u32 dst;
- u32 src_dst_bidx;
- u32 link_bcntrld;
- u32 src_dst_cidx;
- u32 ccnt;
-} __packed;
-
-/* fields in edmacc_param.opt */
-#define SAM BIT(0)
-#define DAM BIT(1)
-#define SYNCDIM BIT(2)
-#define STATIC BIT(3)
-#define EDMA_FWID (0x07 << 8)
-#define TCCMODE BIT(11)
-#define EDMA_TCC(t) ((t) << 12)
-#define TCINTEN BIT(20)
-#define ITCINTEN BIT(21)
-#define TCCHEN BIT(22)
-#define ITCCHEN BIT(23)
-
-/*ch_status paramater of callback function possible values*/
-#define EDMA_DMA_COMPLETE 1
-#define EDMA_DMA_CC_ERROR 2
-#define EDMA_DMA_TC1_ERROR 3
-#define EDMA_DMA_TC2_ERROR 4
-
-enum address_mode {
- INCR = 0,
- FIFO = 1
-};
-
-enum fifo_width {
- W8BIT = 0,
- W16BIT = 1,
- W32BIT = 2,
- W64BIT = 3,
- W128BIT = 4,
- W256BIT = 5
-};
-
enum dma_event_q {
EVENTQ_0 = 0,
EVENTQ_1 = 1,
@@ -94,64 +49,10 @@ enum dma_event_q {
EVENTQ_DEFAULT = -1
};
-enum sync_dimension {
- ASYNC = 0,
- ABSYNC = 1
-};
-
#define EDMA_CTLR_CHAN(ctlr, chan) (((ctlr) << 16) | (chan))
#define EDMA_CTLR(i) ((i) >> 16)
#define EDMA_CHAN_SLOT(i) ((i) & 0xffff)
-#define EDMA_CHANNEL_ANY -1 /* for edma_alloc_channel() */
-#define EDMA_SLOT_ANY -1 /* for edma_alloc_slot() */
-#define EDMA_CONT_PARAMS_ANY 1001
-#define EDMA_CONT_PARAMS_FIXED_EXACT 1002
-#define EDMA_CONT_PARAMS_FIXED_NOT_EXACT 1003
-
-#define EDMA_MAX_CC 2
-
-/* alloc/free DMA channels and their dedicated parameter RAM slots */
-int edma_alloc_channel(int channel,
- void (*callback)(unsigned channel, u16 ch_status, void *data),
- void *data, enum dma_event_q);
-void edma_free_channel(unsigned channel);
-
-/* alloc/free parameter RAM slots */
-int edma_alloc_slot(unsigned ctlr, int slot);
-void edma_free_slot(unsigned slot);
-
-/* alloc/free a set of contiguous parameter RAM slots */
-int edma_alloc_cont_slots(unsigned ctlr, unsigned int id, int slot, int count);
-int edma_free_cont_slots(unsigned slot, int count);
-
-/* calls that operate on part of a parameter RAM slot */
-void edma_set_src(unsigned slot, dma_addr_t src_port,
- enum address_mode mode, enum fifo_width);
-void edma_set_dest(unsigned slot, dma_addr_t dest_port,
- enum address_mode mode, enum fifo_width);
-dma_addr_t edma_get_position(unsigned slot, bool dst);
-void edma_set_src_index(unsigned slot, s16 src_bidx, s16 src_cidx);
-void edma_set_dest_index(unsigned slot, s16 dest_bidx, s16 dest_cidx);
-void edma_set_transfer_params(unsigned slot, u16 acnt, u16 bcnt, u16 ccnt,
- u16 bcnt_rld, enum sync_dimension sync_mode);
-void edma_link(unsigned from, unsigned to);
-void edma_unlink(unsigned from);
-
-/* calls that operate on an entire parameter RAM slot */
-void edma_write_slot(unsigned slot, const struct edmacc_param *params);
-void edma_read_slot(unsigned slot, struct edmacc_param *params);
-
-/* channel control operations */
-int edma_start(unsigned channel);
-void edma_stop(unsigned channel);
-void edma_clean_channel(unsigned channel);
-void edma_clear_event(unsigned channel);
-void edma_pause(unsigned channel);
-void edma_resume(unsigned channel);
-
-void edma_assign_channel_eventq(unsigned channel, enum dma_event_q eventq_no);
-
struct edma_rsv_info {
const s16 (*rsv_chans)[2];
@@ -170,10 +71,11 @@ struct edma_soc_info {
/* Resource reservation for other cores */
struct edma_rsv_info *rsv;
+ /* List of channels allocated for memcpy, terminated with -1 */
+ s16 *memcpy_channels;
+
s8 (*queue_priority_mapping)[2];
const s16 (*xbar_chans)[2];
};
-int edma_trigger_channel(unsigned);
-
#endif
diff --git a/include/linux/pmem.h b/include/linux/pmem.h
index 85f810b33917..acfea8ce4a07 100644
--- a/include/linux/pmem.h
+++ b/include/linux/pmem.h
@@ -65,11 +65,6 @@ static inline void memcpy_from_pmem(void *dst, void __pmem const *src, size_t si
memcpy(dst, (void __force const *) src, size);
}
-static inline void memunmap_pmem(struct device *dev, void __pmem *addr)
-{
- devm_memunmap(dev, (void __force *) addr);
-}
-
static inline bool arch_has_pmem_api(void)
{
return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API);
@@ -93,7 +88,7 @@ static inline bool arch_has_wmb_pmem(void)
* These defaults seek to offer decent performance and minimize the
* window between i/o completion and writes being durable on media.
* However, it is undefined / architecture specific whether
- * default_memremap_pmem + default_memcpy_to_pmem is sufficient for
+ * ARCH_MEMREMAP_PMEM + default_memcpy_to_pmem is sufficient for
* making data durable relative to i/o completion.
*/
static inline void default_memcpy_to_pmem(void __pmem *dst, const void *src,
@@ -117,25 +112,6 @@ static inline void default_clear_pmem(void __pmem *addr, size_t size)
}
/**
- * memremap_pmem - map physical persistent memory for pmem api
- * @offset: physical address of persistent memory
- * @size: size of the mapping
- *
- * Establish a mapping of the architecture specific memory type expected
- * by memcpy_to_pmem() and wmb_pmem(). For example, it may be
- * the case that an uncacheable or writethrough mapping is sufficient,
- * or a writeback mapping provided memcpy_to_pmem() and
- * wmb_pmem() arrange for the data to be written through the
- * cache to persistent media.
- */
-static inline void __pmem *memremap_pmem(struct device *dev,
- resource_size_t offset, unsigned long size)
-{
- return (void __pmem *) devm_memremap(dev, offset, size,
- ARCH_MEMREMAP_PMEM);
-}
-
-/**
* memcpy_to_pmem - copy data to persistent memory
* @dst: destination buffer for the copy
* @src: source buffer for the copy
diff --git a/include/linux/property.h b/include/linux/property.h
index 463de52fe891..0a3705a7c9f2 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -27,6 +27,12 @@ enum dev_prop_type {
DEV_PROP_MAX,
};
+enum dev_dma_attr {
+ DEV_DMA_NOT_SUPPORTED,
+ DEV_DMA_NON_COHERENT,
+ DEV_DMA_COHERENT,
+};
+
bool device_property_present(struct device *dev, const char *propname);
int device_property_read_u8_array(struct device *dev, const char *propname,
u8 *val, size_t nval);
@@ -168,7 +174,9 @@ struct property_set {
void device_add_property_set(struct device *dev, struct property_set *pset);
-bool device_dma_is_coherent(struct device *dev);
+bool device_dma_supported(struct device *dev);
+
+enum dev_dma_attr device_get_dma_attr(struct device *dev);
int device_get_phy_mode(struct device *dev);
diff --git a/include/linux/psci.h b/include/linux/psci.h
index a682fcc91c33..12c4865457ad 100644
--- a/include/linux/psci.h
+++ b/include/linux/psci.h
@@ -21,6 +21,8 @@
#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1
bool psci_tos_resident_on(int cpu);
+bool psci_power_state_loses_context(u32 state);
+bool psci_power_state_is_valid(u32 state);
struct psci_operations {
int (*cpu_suspend)(u32 state, unsigned long entry_point);
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index d681f6875aef..cfc3ed46cad2 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -2,6 +2,7 @@
#define __LINUX_PWM_H
#include <linux/err.h>
+#include <linux/mutex.h>
#include <linux/of.h>
struct pwm_device;
@@ -87,6 +88,7 @@ enum {
* @pwm: global index of the PWM device
* @chip: PWM chip providing this PWM device
* @chip_data: chip-private data associated with the PWM device
+ * @lock: used to serialize accesses to the PWM device where necessary
* @period: period of the PWM signal (in nanoseconds)
* @duty_cycle: duty cycle of the PWM signal (in nanoseconds)
* @polarity: polarity of the PWM signal
@@ -98,6 +100,7 @@ struct pwm_device {
unsigned int pwm;
struct pwm_chip *chip;
void *chip_data;
+ struct mutex lock;
unsigned int period;
unsigned int duty_cycle;
diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h
index 6e7d5ec65838..9e12000914b3 100644
--- a/include/linux/qcom_scm.h
+++ b/include/linux/qcom_scm.h
@@ -23,6 +23,8 @@ struct qcom_scm_hdcp_req {
u32 val;
};
+extern bool qcom_scm_is_available(void);
+
extern bool qcom_scm_hdcp_available(void);
extern int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt,
u32 *resp);
diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h
index 830c4992088d..a5aa7ae671f4 100644
--- a/include/linux/rbtree.h
+++ b/include/linux/rbtree.h
@@ -101,13 +101,21 @@ static inline void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent
})
/**
- * rbtree_postorder_for_each_entry_safe - iterate over rb_root in post order of
- * given type safe against removal of rb_node entry
+ * rbtree_postorder_for_each_entry_safe - iterate in post-order over rb_root of
+ * given type allowing the backing memory of @pos to be invalidated
*
* @pos: the 'type *' to use as a loop cursor.
* @n: another 'type *' to use as temporary storage
* @root: 'rb_root *' of the rbtree.
* @field: the name of the rb_node field within 'type'.
+ *
+ * rbtree_postorder_for_each_entry_safe() provides a similar guarantee as
+ * list_for_each_entry_safe() and allows the iteration to continue independent
+ * of changes to @pos by the body of the loop.
+ *
+ * Note, however, that it cannot handle other modifications that re-order the
+ * rbtree it is iterating over. This includes calling rb_erase() on @pos, as
+ * rb_erase() may rebalance the tree, causing us to miss some nodes.
*/
#define rbtree_postorder_for_each_entry_safe(pos, n, root, field) \
for (pos = rb_entry_safe(rb_first_postorder(root), typeof(*pos), field); \
diff --git a/include/linux/sched.h b/include/linux/sched.h
index eeb5066a44fb..edad7a43edea 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -484,9 +484,11 @@ static inline int get_dumpable(struct mm_struct *mm)
#define MMF_DUMP_ELF_HEADERS 6
#define MMF_DUMP_HUGETLB_PRIVATE 7
#define MMF_DUMP_HUGETLB_SHARED 8
+#define MMF_DUMP_DAX_PRIVATE 9
+#define MMF_DUMP_DAX_SHARED 10
#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS
-#define MMF_DUMP_FILTER_BITS 7
+#define MMF_DUMP_FILTER_BITS 9
#define MMF_DUMP_FILTER_MASK \
(((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
#define MMF_DUMP_FILTER_DEFAULT \
@@ -1570,9 +1572,7 @@ struct task_struct {
unsigned long sas_ss_sp;
size_t sas_ss_size;
- int (*notifier)(void *priv);
- void *notifier_data;
- sigset_t *notifier_mask;
+
struct callback_head *task_works;
struct audit_context *audit_context;
@@ -2464,21 +2464,29 @@ extern void ignore_signals(struct task_struct *);
extern void flush_signal_handlers(struct task_struct *, int force_default);
extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
-static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
+static inline int kernel_dequeue_signal(siginfo_t *info)
{
- unsigned long flags;
+ struct task_struct *tsk = current;
+ siginfo_t __info;
int ret;
- spin_lock_irqsave(&tsk->sighand->siglock, flags);
- ret = dequeue_signal(tsk, mask, info);
- spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
+ spin_lock_irq(&tsk->sighand->siglock);
+ ret = dequeue_signal(tsk, &tsk->blocked, info ?: &__info);
+ spin_unlock_irq(&tsk->sighand->siglock);
return ret;
}
-extern void block_all_signals(int (*notifier)(void *priv), void *priv,
- sigset_t *mask);
-extern void unblock_all_signals(void);
+static inline void kernel_signal_stop(void)
+{
+ spin_lock_irq(&current->sighand->siglock);
+ if (current->jobctl & JOBCTL_STOP_DEQUEUED)
+ __set_current_state(TASK_STOPPED);
+ spin_unlock_irq(&current->sighand->siglock);
+
+ schedule();
+}
+
extern void release_task(struct task_struct * p);
extern int send_sig_info(int, struct siginfo *, struct task_struct *);
extern int force_sigsegv(int, struct task_struct *);
diff --git a/include/linux/scpi_protocol.h b/include/linux/scpi_protocol.h
new file mode 100644
index 000000000000..72ce932c69b2
--- /dev/null
+++ b/include/linux/scpi_protocol.h
@@ -0,0 +1,78 @@
+/*
+ * SCPI Message Protocol driver header
+ *
+ * Copyright (C) 2014 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/types.h>
+
+struct scpi_opp {
+ u32 freq;
+ u32 m_volt;
+} __packed;
+
+struct scpi_dvfs_info {
+ unsigned int count;
+ unsigned int latency; /* in nanoseconds */
+ struct scpi_opp *opps;
+};
+
+enum scpi_sensor_class {
+ TEMPERATURE,
+ VOLTAGE,
+ CURRENT,
+ POWER,
+};
+
+struct scpi_sensor_info {
+ u16 sensor_id;
+ u8 class;
+ u8 trigger_type;
+ char name[20];
+} __packed;
+
+/**
+ * struct scpi_ops - represents the various operations provided
+ * by SCP through SCPI message protocol
+ * @get_version: returns the major and minor revision on the SCPI
+ * message protocol
+ * @clk_get_range: gets clock range limit(min - max in Hz)
+ * @clk_get_val: gets clock value(in Hz)
+ * @clk_set_val: sets the clock value, setting to 0 will disable the
+ * clock (if supported)
+ * @dvfs_get_idx: gets the Operating Point of the given power domain.
+ * OPP is an index to the list return by @dvfs_get_info
+ * @dvfs_set_idx: sets the Operating Point of the given power domain.
+ * OPP is an index to the list return by @dvfs_get_info
+ * @dvfs_get_info: returns the DVFS capabilities of the given power
+ * domain. It includes the OPP list and the latency information
+ */
+struct scpi_ops {
+ u32 (*get_version)(void);
+ int (*clk_get_range)(u16, unsigned long *, unsigned long *);
+ unsigned long (*clk_get_val)(u16);
+ int (*clk_set_val)(u16, unsigned long);
+ int (*dvfs_get_idx)(u8);
+ int (*dvfs_set_idx)(u8, u8);
+ struct scpi_dvfs_info *(*dvfs_get_info)(u8);
+ int (*sensor_get_capability)(u16 *sensors);
+ int (*sensor_get_info)(u16 sensor_id, struct scpi_sensor_info *);
+ int (*sensor_get_value)(u16, u32 *);
+};
+
+#if IS_REACHABLE(CONFIG_ARM_SCPI_PROTOCOL)
+struct scpi_ops *get_scpi_ops(void);
+#else
+static inline struct scpi_ops *get_scpi_ops(void) { return NULL; }
+#endif
diff --git a/include/linux/signal.h b/include/linux/signal.h
index ab1e0392b5ac..92557bbce7e7 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -239,7 +239,6 @@ extern int sigprocmask(int, sigset_t *, sigset_t *);
extern void set_current_blocked(sigset_t *);
extern void __set_current_blocked(const sigset_t *);
extern int show_unhandled_signals;
-extern int sigsuspend(sigset_t *);
struct sigaction {
#ifndef __ARCH_HAS_IRIX_SIGACTION
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 24f4dfd94c51..4355129fff91 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1224,7 +1224,7 @@ static inline int skb_cloned(const struct sk_buff *skb)
static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
{
- might_sleep_if(pri & __GFP_WAIT);
+ might_sleep_if(gfpflags_allow_blocking(pri));
if (skb_cloned(skb))
return pskb_expand_head(skb, 0, 0, pri);
@@ -1308,7 +1308,7 @@ static inline int skb_shared(const struct sk_buff *skb)
*/
static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri)
{
- might_sleep_if(pri & __GFP_WAIT);
+ might_sleep_if(gfpflags_allow_blocking(pri));
if (skb_shared(skb)) {
struct sk_buff *nskb = skb_clone(skb, pri);
@@ -1344,7 +1344,7 @@ static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri)
static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
gfp_t pri)
{
- might_sleep_if(pri & __GFP_WAIT);
+ might_sleep_if(gfpflags_allow_blocking(pri));
if (skb_cloned(skb)) {
struct sk_buff *nskb = skb_copy(skb, pri);
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 7c82e3b307a3..2037a861e367 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -158,6 +158,24 @@ size_t ksize(const void *);
#endif
/*
+ * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
+ * Intended for arches that get misalignment faults even for 64 bit integer
+ * aligned buffers.
+ */
+#ifndef ARCH_SLAB_MINALIGN
+#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
+#endif
+
+/*
+ * kmalloc and friends return ARCH_KMALLOC_MINALIGN aligned
+ * pointers. kmem_cache_alloc and friends return ARCH_SLAB_MINALIGN
+ * aligned pointers.
+ */
+#define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
+#define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
+#define __assume_page_alignment __assume_aligned(PAGE_SIZE)
+
+/*
* Kmalloc array related definitions
*/
@@ -286,8 +304,8 @@ static __always_inline int kmalloc_index(size_t size)
}
#endif /* !CONFIG_SLOB */
-void *__kmalloc(size_t size, gfp_t flags);
-void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
+void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment;
+void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment;
void kmem_cache_free(struct kmem_cache *, void *);
/*
@@ -298,11 +316,11 @@ void kmem_cache_free(struct kmem_cache *, void *);
* Note that interrupts must be enabled when calling these functions.
*/
void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
-bool kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
+int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
#ifdef CONFIG_NUMA
-void *__kmalloc_node(size_t size, gfp_t flags, int node);
-void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
+void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment;
+void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment;
#else
static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
{
@@ -316,12 +334,12 @@ static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t f
#endif
#ifdef CONFIG_TRACING
-extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
+extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment;
#ifdef CONFIG_NUMA
extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
gfp_t gfpflags,
- int node, size_t size);
+ int node, size_t size) __assume_slab_alignment;
#else
static __always_inline void *
kmem_cache_alloc_node_trace(struct kmem_cache *s,
@@ -354,10 +372,10 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
}
#endif /* CONFIG_TRACING */
-extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order);
+extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment;
#ifdef CONFIG_TRACING
-extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order);
+extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment;
#else
static __always_inline void *
kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
@@ -482,15 +500,6 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
return __kmalloc_node(size, flags, node);
}
-/*
- * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
- * Intended for arches that get misalignment faults even for 64 bit integer
- * aligned buffers.
- */
-#ifndef ARCH_SLAB_MINALIGN
-#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
-#endif
-
struct memcg_cache_array {
struct rcu_head rcu;
struct kmem_cache *entries[0];
diff --git a/include/linux/soc/brcmstb/brcmstb.h b/include/linux/soc/brcmstb/brcmstb.h
new file mode 100644
index 000000000000..337ce414e898
--- /dev/null
+++ b/include/linux/soc/brcmstb/brcmstb.h
@@ -0,0 +1,10 @@
+#ifndef __BRCMSTB_SOC_H
+#define __BRCMSTB_SOC_H
+
+/*
+ * Bus Interface Unit control register setup, must happen early during boot,
+ * before SMP is brought up, called by machine entry point.
+ */
+void brcmstb_biuctrl_init(void);
+
+#endif /* __BRCMSTB_SOC_H */
diff --git a/include/linux/soc/qcom/smd.h b/include/linux/soc/qcom/smd.h
index d7e50aa6a4ac..d0cb6d189a0a 100644
--- a/include/linux/soc/qcom/smd.h
+++ b/include/linux/soc/qcom/smd.h
@@ -9,6 +9,14 @@ struct qcom_smd_channel;
struct qcom_smd_lookup;
/**
+ * struct qcom_smd_id - struct used for matching a smd device
+ * @name: name of the channel
+ */
+struct qcom_smd_id {
+ char name[20];
+};
+
+/**
* struct qcom_smd_device - smd device struct
* @dev: the device struct
* @channel: handle to the smd channel for this device
@@ -21,6 +29,7 @@ struct qcom_smd_device {
/**
* struct qcom_smd_driver - smd driver struct
* @driver: underlying device driver
+ * @smd_match_table: static channel match table
* @probe: invoked when the smd channel is found
* @remove: invoked when the smd channel is closed
* @callback: invoked when an inbound message is received on the channel,
@@ -29,6 +38,8 @@ struct qcom_smd_device {
*/
struct qcom_smd_driver {
struct device_driver driver;
+ const struct qcom_smd_id *smd_match_table;
+
int (*probe)(struct qcom_smd_device *dev);
void (*remove)(struct qcom_smd_device *dev);
int (*callback)(struct qcom_smd_device *, const void *, size_t);
diff --git a/include/linux/soc/qcom/smem.h b/include/linux/soc/qcom/smem.h
index bc9630d3aced..785e196ee2ca 100644
--- a/include/linux/soc/qcom/smem.h
+++ b/include/linux/soc/qcom/smem.h
@@ -4,7 +4,7 @@
#define QCOM_SMEM_HOST_ANY -1
int qcom_smem_alloc(unsigned host, unsigned item, size_t size);
-int qcom_smem_get(unsigned host, unsigned item, void **ptr, size_t *size);
+void *qcom_smem_get(unsigned host, unsigned item, size_t *size);
int qcom_smem_get_free_space(unsigned host);
diff --git a/include/linux/spi/pxa2xx_spi.h b/include/linux/spi/pxa2xx_spi.h
index 6d36dacec4ba..9ec4c147abbc 100644
--- a/include/linux/spi/pxa2xx_spi.h
+++ b/include/linux/spi/pxa2xx_spi.h
@@ -23,7 +23,6 @@ struct dma_chan;
/* device.platform_data for SSP controller devices */
struct pxa2xx_spi_master {
- u32 clock_enable;
u16 num_chipselect;
u8 enable_dma;
diff --git a/include/linux/sunrpc/bc_xprt.h b/include/linux/sunrpc/bc_xprt.h
index 8df43c9f11dc..4397a4824c81 100644
--- a/include/linux/sunrpc/bc_xprt.h
+++ b/include/linux/sunrpc/bc_xprt.h
@@ -38,6 +38,11 @@ void xprt_free_bc_request(struct rpc_rqst *req);
int xprt_setup_backchannel(struct rpc_xprt *, unsigned int min_reqs);
void xprt_destroy_backchannel(struct rpc_xprt *, unsigned int max_reqs);
+/* Socket backchannel transport methods */
+int xprt_setup_bc(struct rpc_xprt *xprt, unsigned int min_reqs);
+void xprt_destroy_bc(struct rpc_xprt *xprt, unsigned int max_reqs);
+void xprt_free_bc_rqst(struct rpc_rqst *req);
+
/*
* Determine if a shared backchannel is in use
*/
diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
index 03d3b4c92d9f..ed03c9f7f908 100644
--- a/include/linux/sunrpc/cache.h
+++ b/include/linux/sunrpc/cache.h
@@ -48,8 +48,10 @@
struct cache_head {
struct hlist_node cache_list;
time_t expiry_time; /* After time time, don't use the data */
- time_t last_refresh; /* If CACHE_PENDING, this is when upcall
- * was sent, else this is when update was received
+ time_t last_refresh; /* If CACHE_PENDING, this is when upcall was
+ * sent, else this is when update was
+ * received, though it is alway set to
+ * be *after* ->flush_time.
*/
struct kref ref;
unsigned long flags;
@@ -105,8 +107,12 @@ struct cache_detail {
/* fields below this comment are for internal use
* and should not be touched by cache owners
*/
- time_t flush_time; /* flush all cache items with last_refresh
- * earlier than this */
+ time_t flush_time; /* flush all cache items with
+ * last_refresh at or earlier
+ * than this. last_refresh
+ * is never set at or earlier
+ * than this.
+ */
struct list_head others;
time_t nextcheck;
int entries;
@@ -203,7 +209,7 @@ static inline void cache_put(struct cache_head *h, struct cache_detail *cd)
static inline int cache_is_expired(struct cache_detail *detail, struct cache_head *h)
{
return (h->expiry_time < seconds_since_boot()) ||
- (detail->flush_time > h->last_refresh);
+ (detail->flush_time >= h->last_refresh);
}
extern int cache_check(struct cache_detail *detail,
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index 7ccc961f33e9..f869807a0d0e 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -105,11 +105,9 @@ struct svc_rdma_chunk_sge {
};
struct svc_rdma_fastreg_mr {
struct ib_mr *mr;
- void *kva;
- struct ib_fast_reg_page_list *page_list;
- int page_list_len;
+ struct scatterlist *sg;
+ int sg_nents;
unsigned long access_flags;
- unsigned long map_len;
enum dma_data_direction direction;
struct list_head frmr_list;
};
@@ -228,9 +226,13 @@ extern void svc_rdma_put_frmr(struct svcxprt_rdma *,
struct svc_rdma_fastreg_mr *);
extern void svc_sq_reap(struct svcxprt_rdma *);
extern void svc_rq_reap(struct svcxprt_rdma *);
-extern struct svc_xprt_class svc_rdma_class;
extern void svc_rdma_prep_reply_hdr(struct svc_rqst *);
+extern struct svc_xprt_class svc_rdma_class;
+#ifdef CONFIG_SUNRPC_BACKCHANNEL
+extern struct svc_xprt_class svc_rdma_bc_class;
+#endif
+
/* svc_rdma.c */
extern int svc_rdma_init(void);
extern void svc_rdma_cleanup(void);
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 0fb9acbb4780..69ef5b3ab038 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -54,6 +54,8 @@ enum rpc_display_format_t {
struct rpc_task;
struct rpc_xprt;
struct seq_file;
+struct svc_serv;
+struct net;
/*
* This describes a complete RPC request
@@ -136,6 +138,12 @@ struct rpc_xprt_ops {
int (*enable_swap)(struct rpc_xprt *xprt);
void (*disable_swap)(struct rpc_xprt *xprt);
void (*inject_disconnect)(struct rpc_xprt *xprt);
+ int (*bc_setup)(struct rpc_xprt *xprt,
+ unsigned int min_reqs);
+ int (*bc_up)(struct svc_serv *serv, struct net *net);
+ void (*bc_free_rqst)(struct rpc_rqst *rqst);
+ void (*bc_destroy)(struct rpc_xprt *xprt,
+ unsigned int max_reqs);
};
/*
@@ -153,6 +161,7 @@ enum xprt_transports {
XPRT_TRANSPORT_TCP = IPPROTO_TCP,
XPRT_TRANSPORT_BC_TCP = IPPROTO_TCP | XPRT_TRANSPORT_BC,
XPRT_TRANSPORT_RDMA = 256,
+ XPRT_TRANSPORT_BC_RDMA = XPRT_TRANSPORT_RDMA | XPRT_TRANSPORT_BC,
XPRT_TRANSPORT_LOCAL = 257,
};
diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h
index 357e44c1a46b..0ece4ba06f06 100644
--- a/include/linux/sunrpc/xprtsock.h
+++ b/include/linux/sunrpc/xprtsock.h
@@ -44,6 +44,8 @@ struct sock_xprt {
*/
unsigned long sock_state;
struct delayed_work connect_worker;
+ struct work_struct recv_worker;
+ struct mutex recv_mutex;
struct sockaddr_storage srcaddr;
unsigned short srcport;
diff --git a/include/linux/sunxi-rsb.h b/include/linux/sunxi-rsb.h
new file mode 100644
index 000000000000..7e75bb0346d0
--- /dev/null
+++ b/include/linux/sunxi-rsb.h
@@ -0,0 +1,105 @@
+/*
+ * Allwinner Reduced Serial Bus Driver
+ *
+ * Copyright (c) 2015 Chen-Yu Tsai
+ *
+ * Author: Chen-Yu Tsai <wens@csie.org>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+#ifndef _SUNXI_RSB_H
+#define _SUNXI_RSB_H
+
+#include <linux/device.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+
+struct sunxi_rsb;
+
+/**
+ * struct sunxi_rsb_device - Basic representation of an RSB device
+ * @dev: Driver model representation of the device.
+ * @ctrl: RSB controller managing the bus hosting this device.
+ * @rtaddr: This device's runtime address
+ * @hwaddr: This device's hardware address
+ */
+struct sunxi_rsb_device {
+ struct device dev;
+ struct sunxi_rsb *rsb;
+ int irq;
+ u8 rtaddr;
+ u16 hwaddr;
+};
+
+static inline struct sunxi_rsb_device *to_sunxi_rsb_device(struct device *d)
+{
+ return container_of(d, struct sunxi_rsb_device, dev);
+}
+
+static inline void *sunxi_rsb_device_get_drvdata(const struct sunxi_rsb_device *rdev)
+{
+ return dev_get_drvdata(&rdev->dev);
+}
+
+static inline void sunxi_rsb_device_set_drvdata(struct sunxi_rsb_device *rdev,
+ void *data)
+{
+ dev_set_drvdata(&rdev->dev, data);
+}
+
+/**
+ * struct sunxi_rsb_driver - RSB slave device driver
+ * @driver: RSB device drivers should initialize name and owner field of
+ * this structure.
+ * @probe: binds this driver to a RSB device.
+ * @remove: unbinds this driver from the RSB device.
+ */
+struct sunxi_rsb_driver {
+ struct device_driver driver;
+ int (*probe)(struct sunxi_rsb_device *rdev);
+ int (*remove)(struct sunxi_rsb_device *rdev);
+};
+
+static inline struct sunxi_rsb_driver *to_sunxi_rsb_driver(struct device_driver *d)
+{
+ return container_of(d, struct sunxi_rsb_driver, driver);
+}
+
+int sunxi_rsb_driver_register(struct sunxi_rsb_driver *rdrv);
+
+/**
+ * sunxi_rsb_driver_unregister() - unregister an RSB client driver
+ * @rdrv: the driver to unregister
+ */
+static inline void sunxi_rsb_driver_unregister(struct sunxi_rsb_driver *rdrv)
+{
+ if (rdrv)
+ driver_unregister(&rdrv->driver);
+}
+
+#define module_sunxi_rsb_driver(__sunxi_rsb_driver) \
+ module_driver(__sunxi_rsb_driver, sunxi_rsb_driver_register, \
+ sunxi_rsb_driver_unregister)
+
+struct regmap *__devm_regmap_init_sunxi_rsb(struct sunxi_rsb_device *rdev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+
+/**
+ * devm_regmap_init_sunxi_rsb(): Initialise managed register map
+ *
+ * @rdev: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_sunxi_rsb(rdev, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_sunxi_rsb, #config, \
+ rdev, config)
+
+#endif /* _SUNXI_RSB_H */
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index a156b82dd14c..c2b66a277e98 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -524,7 +524,7 @@ asmlinkage long sys_chown(const char __user *filename,
asmlinkage long sys_lchown(const char __user *filename,
uid_t user, gid_t group);
asmlinkage long sys_fchown(unsigned int fd, uid_t user, gid_t group);
-#ifdef CONFIG_UID16
+#ifdef CONFIG_HAVE_UID16
asmlinkage long sys_chown16(const char __user *filename,
old_uid_t user, old_gid_t group);
asmlinkage long sys_lchown16(const char __user *filename,
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index ea090eaf468c..c6f0f0d0e17e 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -64,10 +64,18 @@ do { \
* a new subdirectory with this name.
* @is_visible: Optional: Function to return permissions associated with an
* attribute of the group. Will be called repeatedly for each
- * attribute in the group. Only read/write permissions as well as
- * SYSFS_PREALLOC are accepted. Must return 0 if an attribute is
- * not visible. The returned value will replace static permissions
- * defined in struct attribute or struct bin_attribute.
+ * non-binary attribute in the group. Only read/write
+ * permissions as well as SYSFS_PREALLOC are accepted. Must
+ * return 0 if an attribute is not visible. The returned value
+ * will replace static permissions defined in struct attribute.
+ * @is_bin_visible:
+ * Optional: Function to return permissions associated with a
+ * binary attribute of the group. Will be called repeatedly
+ * for each binary attribute in the group. Only read/write
+ * permissions as well as SYSFS_PREALLOC are accepted. Must
+ * return 0 if a binary attribute is not visible. The returned
+ * value will replace static permissions defined in
+ * struct bin_attribute.
* @attrs: Pointer to NULL terminated list of attributes.
* @bin_attrs: Pointer to NULL terminated list of binary attributes.
* Either attrs or bin_attrs or both must be provided.
@@ -76,6 +84,8 @@ struct attribute_group {
const char *name;
umode_t (*is_visible)(struct kobject *,
struct attribute *, int);
+ umode_t (*is_bin_visible)(struct kobject *,
+ struct bin_attribute *, int);
struct attribute **attrs;
struct bin_attribute **bin_attrs;
};
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index c906f4534581..b386361ba3e8 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -397,6 +397,13 @@ static inline void fastopen_queue_tune(struct sock *sk, int backlog)
queue->fastopenq.max_qlen = min_t(unsigned int, backlog, somaxconn);
}
+static inline void tcp_move_syn(struct tcp_sock *tp,
+ struct request_sock *req)
+{
+ tp->saved_syn = req->saved_syn;
+ req->saved_syn = NULL;
+}
+
static inline void tcp_saved_syn_free(struct tcp_sock *tp)
{
kfree(tp->saved_syn);
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index 157d366e761b..613c29bd6baf 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -44,9 +44,11 @@
#define THERMAL_WEIGHT_DEFAULT 0
/* Unit conversion macros */
-#define KELVIN_TO_CELSIUS(t) (long)(((long)t-2732 >= 0) ? \
- ((long)t-2732+5)/10 : ((long)t-2732-5)/10)
-#define CELSIUS_TO_KELVIN(t) ((t)*10+2732)
+#define DECI_KELVIN_TO_CELSIUS(t) ({ \
+ long _t = (t); \
+ ((_t-2732 >= 0) ? (_t-2732+5)/10 : (_t-2732-5)/10); \
+})
+#define CELSIUS_TO_DECI_KELVIN(t) ((t)*10+2732)
#define DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(t, off) (((t) - (off)) * 100)
#define DECI_KELVIN_TO_MILLICELSIUS(t) DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(t, 2732)
#define MILLICELSIUS_TO_DECI_KELVIN_WITH_OFFSET(t, off) (((t) / 100) + (off))
@@ -436,7 +438,8 @@ static inline void thermal_zone_device_unregister(
static inline int thermal_zone_bind_cooling_device(
struct thermal_zone_device *tz, int trip,
struct thermal_cooling_device *cdev,
- unsigned long upper, unsigned long lower)
+ unsigned long upper, unsigned long lower,
+ unsigned int weight)
{ return -ENODEV; }
static inline int thermal_zone_unbind_cooling_device(
struct thermal_zone_device *tz, int trip,
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 5b04b0a5375b..5e31f1b99037 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -607,7 +607,7 @@ extern void n_tty_inherit_ops(struct tty_ldisc_ops *ops);
/* tty_audit.c */
#ifdef CONFIG_AUDIT
-extern void tty_audit_add_data(struct tty_struct *tty, unsigned char *data,
+extern void tty_audit_add_data(struct tty_struct *tty, const void *data,
size_t size, unsigned icanon);
extern void tty_audit_exit(void);
extern void tty_audit_fork(struct signal_struct *sig);
@@ -615,8 +615,8 @@ extern void tty_audit_tiocsti(struct tty_struct *tty, char ch);
extern void tty_audit_push(struct tty_struct *tty);
extern int tty_audit_push_current(void);
#else
-static inline void tty_audit_add_data(struct tty_struct *tty,
- unsigned char *data, size_t size, unsigned icanon)
+static inline void tty_audit_add_data(struct tty_struct *tty, const void *data,
+ size_t size, unsigned icanon)
{
}
static inline void tty_audit_tiocsti(struct tty_struct *tty, char ch)
diff --git a/include/linux/types.h b/include/linux/types.h
index 70d8500bddf1..70dd3dfde631 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -35,7 +35,7 @@ typedef __kernel_gid16_t gid16_t;
typedef unsigned long uintptr_t;
-#ifdef CONFIG_UID16
+#ifdef CONFIG_HAVE_UID16
/* This is defined by include/asm-{arch}/posix_types.h */
typedef __kernel_old_uid_t old_uid_t;
typedef __kernel_old_gid_t old_gid_t;
diff --git a/include/linux/usb/gadget_configfs.h b/include/linux/usb/gadget_configfs.h
index d74c0ae989d5..c36e95730de1 100644
--- a/include/linux/usb/gadget_configfs.h
+++ b/include/linux/usb/gadget_configfs.h
@@ -7,9 +7,10 @@ int check_user_usb_string(const char *name,
struct usb_gadget_strings *stringtab_dev);
#define GS_STRINGS_W(__struct, __name) \
- static ssize_t __struct##_##__name##_store(struct __struct *gs, \
+static ssize_t __struct##_##__name##_store(struct config_item *item, \
const char *page, size_t len) \
{ \
+ struct __struct *gs = to_##__struct(item); \
int ret; \
\
ret = usb_string_copy(page, &gs->__name); \
@@ -19,30 +20,20 @@ int check_user_usb_string(const char *name,
}
#define GS_STRINGS_R(__struct, __name) \
- static ssize_t __struct##_##__name##_show(struct __struct *gs, \
- char *page) \
+static ssize_t __struct##_##__name##_show(struct config_item *item, char *page) \
{ \
+ struct __struct *gs = to_##__struct(item); \
return sprintf(page, "%s\n", gs->__name ?: ""); \
}
-#define GS_STRING_ITEM_ATTR(struct_name, name) \
- static struct struct_name##_attribute struct_name##_##name = \
- __CONFIGFS_ATTR(name, S_IRUGO | S_IWUSR, \
- struct_name##_##name##_show, \
- struct_name##_##name##_store)
-
#define GS_STRINGS_RW(struct_name, _name) \
GS_STRINGS_R(struct_name, _name) \
GS_STRINGS_W(struct_name, _name) \
- GS_STRING_ITEM_ATTR(struct_name, _name)
+ CONFIGFS_ATTR(struct_name##_, _name)
#define USB_CONFIG_STRING_RW_OPS(struct_in) \
- CONFIGFS_ATTR_OPS(struct_in); \
- \
static struct configfs_item_operations struct_in##_langid_item_ops = { \
.release = struct_in##_attr_release, \
- .show_attribute = struct_in##_attr_show, \
- .store_attribute = struct_in##_attr_store, \
}; \
\
static struct config_item_type struct_in##_langid_type = { \
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
index ddb440975382..610a86a892b8 100644
--- a/include/linux/vfio.h
+++ b/include/linux/vfio.h
@@ -44,6 +44,9 @@ struct vfio_device_ops {
void (*request)(void *device_data, unsigned int count);
};
+extern struct iommu_group *vfio_iommu_group_get(struct device *dev);
+extern void vfio_iommu_group_put(struct iommu_group *group, struct device *dev);
+
extern int vfio_add_group_dev(struct device *dev,
const struct vfio_device_ops *ops,
void *device_data);
diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
index b483abd34493..69e1d4a1f1b3 100644
--- a/include/linux/vga_switcheroo.h
+++ b/include/linux/vga_switcheroo.h
@@ -1,10 +1,31 @@
/*
+ * vga_switcheroo.h - Support for laptop with dual GPU using one set of outputs
+ *
* Copyright (c) 2010 Red Hat Inc.
* Author : Dave Airlie <airlied@redhat.com>
*
- * Licensed under GPLv2
+ * Copyright (c) 2015 Lukas Wunner <lukas@wunner.de>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS
+ * IN THE SOFTWARE.
*
- * vga_switcheroo.h - Support for laptop with dual GPU using one set of outputs
*/
#ifndef _LINUX_VGA_SWITCHEROO_H_
@@ -14,28 +35,85 @@
struct pci_dev;
+/**
+ * enum vga_switcheroo_state - client power state
+ * @VGA_SWITCHEROO_OFF: off
+ * @VGA_SWITCHEROO_ON: on
+ * @VGA_SWITCHEROO_NOT_FOUND: client has not registered with vga_switcheroo.
+ * Only used in vga_switcheroo_get_client_state() which in turn is only
+ * called from hda_intel.c
+ *
+ * Client power state.
+ */
enum vga_switcheroo_state {
VGA_SWITCHEROO_OFF,
VGA_SWITCHEROO_ON,
/* below are referred only from vga_switcheroo_get_client_state() */
- VGA_SWITCHEROO_INIT,
VGA_SWITCHEROO_NOT_FOUND,
};
+/**
+ * enum vga_switcheroo_client_id - client identifier
+ * @VGA_SWITCHEROO_UNKNOWN_ID: initial identifier assigned to vga clients.
+ * Determining the id requires the handler, so GPUs are given their
+ * true id in a delayed fashion in vga_switcheroo_enable()
+ * @VGA_SWITCHEROO_IGD: integrated graphics device
+ * @VGA_SWITCHEROO_DIS: discrete graphics device
+ * @VGA_SWITCHEROO_MAX_CLIENTS: currently no more than two GPUs are supported
+ *
+ * Client identifier. Audio clients use the same identifier & 0x100.
+ */
enum vga_switcheroo_client_id {
+ VGA_SWITCHEROO_UNKNOWN_ID = -1,
VGA_SWITCHEROO_IGD,
VGA_SWITCHEROO_DIS,
VGA_SWITCHEROO_MAX_CLIENTS,
};
+/**
+ * struct vga_switcheroo_handler - handler callbacks
+ * @init: initialize handler.
+ * Optional. This gets called when vga_switcheroo is enabled, i.e. when
+ * two vga clients have registered. It allows the handler to perform
+ * some delayed initialization that depends on the existence of the
+ * vga clients. Currently only the radeon and amdgpu drivers use this.
+ * The return value is ignored
+ * @switchto: switch outputs to given client.
+ * Mandatory. For muxless machines this should be a no-op. Returning 0
+ * denotes success, anything else failure (in which case the switch is
+ * aborted)
+ * @power_state: cut or reinstate power of given client.
+ * Optional. The return value is ignored
+ * @get_client_id: determine if given pci device is integrated or discrete GPU.
+ * Mandatory
+ *
+ * Handler callbacks. The multiplexer itself. The @switchto and @get_client_id
+ * methods are mandatory, all others may be set to NULL.
+ */
struct vga_switcheroo_handler {
+ int (*init)(void);
int (*switchto)(enum vga_switcheroo_client_id id);
int (*power_state)(enum vga_switcheroo_client_id id,
enum vga_switcheroo_state state);
- int (*init)(void);
- int (*get_client_id)(struct pci_dev *pdev);
+ enum vga_switcheroo_client_id (*get_client_id)(struct pci_dev *pdev);
};
+/**
+ * struct vga_switcheroo_client_ops - client callbacks
+ * @set_gpu_state: do the equivalent of suspend/resume for the card.
+ * Mandatory. This should not cut power to the discrete GPU,
+ * which is the job of the handler
+ * @reprobe: poll outputs.
+ * Optional. This gets called after waking the GPU and switching
+ * the outputs to it
+ * @can_switch: check if the device is in a position to switch now.
+ * Mandatory. The client should return false if a user space process
+ * has one of its device files open
+ *
+ * Client callbacks. A client can be either a GPU or an audio device on a GPU.
+ * The @set_gpu_state and @can_switch methods are mandatory, @reprobe may be
+ * set to NULL. For audio clients, the @reprobe member is bogus.
+ */
struct vga_switcheroo_client_ops {
void (*set_gpu_state)(struct pci_dev *dev, enum vga_switcheroo_state);
void (*reprobe)(struct pci_dev *dev);
@@ -49,17 +127,17 @@ int vga_switcheroo_register_client(struct pci_dev *dev,
bool driver_power_control);
int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
const struct vga_switcheroo_client_ops *ops,
- int id, bool active);
+ enum vga_switcheroo_client_id id);
void vga_switcheroo_client_fb_set(struct pci_dev *dev,
struct fb_info *info);
-int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler);
+int vga_switcheroo_register_handler(const struct vga_switcheroo_handler *handler);
void vga_switcheroo_unregister_handler(void);
int vga_switcheroo_process_delayed_switch(void);
-int vga_switcheroo_get_client_state(struct pci_dev *dev);
+enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev);
void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
@@ -72,13 +150,13 @@ static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
static inline int vga_switcheroo_register_client(struct pci_dev *dev,
const struct vga_switcheroo_client_ops *ops, bool driver_power_control) { return 0; }
static inline void vga_switcheroo_client_fb_set(struct pci_dev *dev, struct fb_info *info) {}
-static inline int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler) { return 0; }
+static inline int vga_switcheroo_register_handler(const struct vga_switcheroo_handler *handler) { return 0; }
static inline int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
const struct vga_switcheroo_client_ops *ops,
- int id, bool active) { return 0; }
+ enum vga_switcheroo_client_id id) { return 0; }
static inline void vga_switcheroo_unregister_handler(void) {}
static inline int vga_switcheroo_process_delayed_switch(void) { return 0; }
-static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return VGA_SWITCHEROO_ON; }
+static inline enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev) { return VGA_SWITCHEROO_ON; }
static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}
diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h
index d74a0e907b9e..027b1f43f12d 100644
--- a/include/linux/watchdog.h
+++ b/include/linux/watchdog.h
@@ -24,8 +24,8 @@ struct watchdog_device;
* @stop: The routine for stopping the watchdog device.
* @ping: The routine that sends a keepalive ping to the watchdog device.
* @status: The routine that shows the status of the watchdog device.
- * @set_timeout:The routine for setting the watchdog devices timeout value.
- * @get_timeleft:The routine that get's the time that's left before a reset.
+ * @set_timeout:The routine for setting the watchdog devices timeout value (in seconds).
+ * @get_timeleft:The routine that gets the time left before a reset (in seconds).
* @ref: The ref operation for dyn. allocated watchdog_device structs
* @unref: The unref operation for dyn. allocated watchdog_device structs
* @ioctl: The routines that handles extra ioctl calls.
@@ -33,7 +33,7 @@ struct watchdog_device;
* The watchdog_ops structure contains a list of low-level operations
* that control a watchdog device. It also contains the module that owns
* these operations. The start and stop function are mandatory, all other
- * functions are optonal.
+ * functions are optional.
*/
struct watchdog_ops {
struct module *owner;
@@ -59,9 +59,9 @@ struct watchdog_ops {
* @info: Pointer to a watchdog_info structure.
* @ops: Pointer to the list of watchdog operations.
* @bootstatus: Status of the watchdog device at boot.
- * @timeout: The watchdog devices timeout value.
- * @min_timeout:The watchdog devices minimum timeout value.
- * @max_timeout:The watchdog devices maximum timeout value.
+ * @timeout: The watchdog devices timeout value (in seconds).
+ * @min_timeout:The watchdog devices minimum timeout value (in seconds).
+ * @max_timeout:The watchdog devices maximum timeout value (in seconds).
* @driver-data:Pointer to the drivers private data.
* @lock: Lock for watchdog core internal use only.
* @status: Field that contains the devices internal status bits.
@@ -119,8 +119,15 @@ static inline void watchdog_set_nowayout(struct watchdog_device *wdd, bool noway
/* Use the following function to check if a timeout value is invalid */
static inline bool watchdog_timeout_invalid(struct watchdog_device *wdd, unsigned int t)
{
- return ((wdd->max_timeout != 0) &&
- (t < wdd->min_timeout || t > wdd->max_timeout));
+ /*
+ * The timeout is invalid if
+ * - the requested value is smaller than the configured minimum timeout,
+ * or
+ * - a maximum timeout is configured, and the requested value is larger
+ * than the maximum timeout.
+ */
+ return t < wdd->min_timeout ||
+ (wdd->max_timeout && t > wdd->max_timeout);
}
/* Use the following functions to manipulate watchdog driver specific data */
diff --git a/include/linux/xattr.h b/include/linux/xattr.h
index 91b0a68d38dc..89474b9d260c 100644
--- a/include/linux/xattr.h
+++ b/include/linux/xattr.h
@@ -21,15 +21,19 @@ struct dentry;
struct xattr_handler {
const char *prefix;
- int flags; /* fs private flags passed back to the handlers */
- size_t (*list)(struct dentry *dentry, char *list, size_t list_size,
- const char *name, size_t name_len, int handler_flags);
- int (*get)(struct dentry *dentry, const char *name, void *buffer,
- size_t size, int handler_flags);
- int (*set)(struct dentry *dentry, const char *name, const void *buffer,
- size_t size, int flags, int handler_flags);
+ int flags; /* fs private flags */
+ size_t (*list)(const struct xattr_handler *, struct dentry *dentry,
+ char *list, size_t list_size, const char *name,
+ size_t name_len);
+ int (*get)(const struct xattr_handler *, struct dentry *dentry,
+ const char *name, void *buffer, size_t size);
+ int (*set)(const struct xattr_handler *, struct dentry *dentry,
+ const char *name, const void *buffer, size_t size,
+ int flags);
};
+const char *xattr_full_name(const struct xattr_handler *, const char *);
+
struct xattr {
const char *name;
void *value;
diff --git a/include/linux/zpool.h b/include/linux/zpool.h
index 42f8ec992452..2e97b7707dff 100644
--- a/include/linux/zpool.h
+++ b/include/linux/zpool.h
@@ -38,10 +38,10 @@ enum zpool_mapmode {
bool zpool_has_pool(char *type);
-struct zpool *zpool_create_pool(char *type, char *name,
+struct zpool *zpool_create_pool(const char *type, const char *name,
gfp_t gfp, const struct zpool_ops *ops);
-char *zpool_get_type(struct zpool *pool);
+const char *zpool_get_type(struct zpool *pool);
void zpool_destroy_pool(struct zpool *pool);
@@ -83,7 +83,9 @@ struct zpool_driver {
atomic_t refcount;
struct list_head list;
- void *(*create)(char *name, gfp_t gfp, const struct zpool_ops *ops,
+ void *(*create)(const char *name,
+ gfp_t gfp,
+ const struct zpool_ops *ops,
struct zpool *zpool);
void (*destroy)(void *pool);
diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h
index 6398dfae53f1..34eb16098a33 100644
--- a/include/linux/zsmalloc.h
+++ b/include/linux/zsmalloc.h
@@ -41,7 +41,7 @@ struct zs_pool_stats {
struct zs_pool;
-struct zs_pool *zs_create_pool(char *name, gfp_t flags);
+struct zs_pool *zs_create_pool(const char *name, gfp_t flags);
void zs_destroy_pool(struct zs_pool *pool);
unsigned long zs_malloc(struct zs_pool *pool, size_t size);
diff --git a/include/linux/zutil.h b/include/linux/zutil.h
index 6adfa9a6ffe9..663689521759 100644
--- a/include/linux/zutil.h
+++ b/include/linux/zutil.h
@@ -68,10 +68,10 @@ typedef uLong (*check_func) (uLong check, const Byte *buf,
An Adler-32 checksum is almost as reliable as a CRC32 but can be computed
much faster. Usage example:
- uLong adler = adler32(0L, NULL, 0);
+ uLong adler = zlib_adler32(0L, NULL, 0);
while (read_buffer(buffer, length) != EOF) {
- adler = adler32(adler, buffer, length);
+ adler = zlib_adler32(adler, buffer, length);
}
if (adler != original_adler) error();
*/