summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/acpi.h4
-rw-r--r--include/linux/bitops.h2
-rw-r--r--include/linux/blkdev.h6
-rw-r--r--include/linux/bpf.h5
-rw-r--r--include/linux/brcmphy.h1
-rw-r--r--include/linux/cgroup-defs.h139
-rw-r--r--include/linux/cgroup.h113
-rw-r--r--include/linux/configfs.h10
-rw-r--r--include/linux/cpufreq.h1
-rw-r--r--include/linux/dns_resolver.h2
-rw-r--r--include/linux/enclosure.h4
-rw-r--r--include/linux/gfp.h2
-rw-r--r--include/linux/hashtable.h4
-rw-r--r--include/linux/hdlc.h2
-rw-r--r--include/linux/if_pppox.h1
-rw-r--r--include/linux/if_team.h1
-rw-r--r--include/linux/if_vlan.h4
-rw-r--r--include/linux/inet_diag.h9
-rw-r--r--include/linux/ipv6.h2
-rw-r--r--include/linux/irqchip/arm-gic-v3.h1
-rw-r--r--include/linux/jump_label.h2
-rw-r--r--include/linux/kernfs.h12
-rw-r--r--include/linux/kmemleak.h2
-rw-r--r--include/linux/kref.h33
-rw-r--r--include/linux/kvm_host.h11
-rw-r--r--include/linux/libata.h1
-rw-r--r--include/linux/lightnvm.h197
-rw-r--r--include/linux/lockdep.h2
-rw-r--r--include/linux/mlx4/device.h11
-rw-r--r--include/linux/mlx4/driver.h5
-rw-r--r--include/linux/mlx5/device.h44
-rw-r--r--include/linux/mlx5/driver.h30
-rw-r--r--include/linux/mlx5/flow_table.h54
-rw-r--r--include/linux/mlx5/fs.h93
-rw-r--r--include/linux/mlx5/mlx5_ifc.h206
-rw-r--r--include/linux/mlx5/vport.h37
-rw-r--r--include/linux/mmdebug.h1
-rw-r--r--include/linux/mroute.h76
-rw-r--r--include/linux/net.h13
-rw-r--r--include/linux/netdev_features.h14
-rw-r--r--include/linux/netdevice.h261
-rw-r--r--include/linux/netfilter/nf_conntrack_sctp.h13
-rw-r--r--include/linux/netfilter/nfnetlink.h2
-rw-r--r--include/linux/netlink.h2
-rw-r--r--include/linux/nfs_xdr.h1
-rw-r--r--include/linux/of_dma.h2
-rw-r--r--include/linux/of_irq.h19
-rw-r--r--include/linux/pci.h9
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/linux/perf_event.h6
-rw-r--r--include/linux/phy.h6
-rw-r--r--include/linux/pim.h5
-rw-r--r--include/linux/platform_data/edma.h2
-rw-r--r--include/linux/proportions.h2
-rw-r--r--include/linux/qed/common_hsi.h2
-rw-r--r--include/linux/qed/qed_chain.h3
-rw-r--r--include/linux/qed/qed_if.h17
-rw-r--r--include/linux/rhashtable.h100
-rw-r--r--include/linux/scpi_protocol.h2
-rw-r--r--include/linux/sh_eth.h2
-rw-r--r--include/linux/signal.h1
-rw-r--r--include/linux/skbuff.h154
-rw-r--r--include/linux/slab.h45
-rw-r--r--include/linux/soc/ti/knav_dma.h22
-rw-r--r--include/linux/sock_diag.h2
-rw-r--r--include/linux/stop_machine.h6
-rw-r--r--include/linux/syscalls.h2
-rw-r--r--include/linux/thermal.h3
-rw-r--r--include/linux/tty.h6
-rw-r--r--include/linux/types.h2
-rw-r--r--include/linux/uprobes.h2
-rw-r--r--include/linux/usb/cdc_ncm.h1
-rw-r--r--include/linux/usb/quirks.h3
-rw-r--r--include/linux/vfio.h3
-rw-r--r--include/linux/vmstat.h6
-rw-r--r--include/linux/wait.h31
76 files changed, 1407 insertions, 497 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 054833939995..1991aea2ec4c 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -870,8 +870,8 @@ static inline int acpi_dev_get_property(struct acpi_device *adev,
}
static inline int acpi_node_get_property_reference(struct fwnode_handle *fwnode,
- const char *name, const char *cells_name,
- size_t index, struct acpi_reference_args *args)
+ const char *name, size_t index,
+ struct acpi_reference_args *args)
{
return -ENXIO;
}
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index 2b8ed123ad36..defeaac0745f 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -107,7 +107,7 @@ static inline __u64 ror64(__u64 word, unsigned int shift)
*/
static inline __u32 rol32(__u32 word, unsigned int shift)
{
- return (word << shift) | (word >> (32 - shift));
+ return (word << shift) | (word >> ((-shift) & 31));
}
/**
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 3fe27f8d91f0..c70e3588a48c 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -254,6 +254,7 @@ struct queue_limits {
unsigned long virt_boundary_mask;
unsigned int max_hw_sectors;
+ unsigned int max_dev_sectors;
unsigned int chunk_sectors;
unsigned int max_sectors;
unsigned int max_segment_size;
@@ -773,7 +774,6 @@ extern void blk_rq_set_block_pc(struct request *);
extern void blk_requeue_request(struct request_queue *, struct request *);
extern void blk_add_request_payload(struct request *rq, struct page *page,
unsigned int len);
-extern int blk_rq_check_limits(struct request_queue *q, struct request *rq);
extern int blk_lld_busy(struct request_queue *q);
extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
struct bio_set *bs, gfp_t gfp_mask,
@@ -794,7 +794,10 @@ extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
struct scsi_ioctl_command __user *);
+extern int blk_queue_enter(struct request_queue *q, gfp_t gfp);
+extern void blk_queue_exit(struct request_queue *q);
extern void blk_start_queue(struct request_queue *q);
+extern void blk_start_queue_async(struct request_queue *q);
extern void blk_stop_queue(struct request_queue *q);
extern void blk_sync_queue(struct request_queue *q);
extern void __blk_stop_queue(struct request_queue *q);
@@ -958,7 +961,6 @@ extern struct request_queue *blk_init_allocated_queue(struct request_queue *,
extern void blk_cleanup_queue(struct request_queue *);
extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
extern void blk_queue_bounce_limit(struct request_queue *, u64);
-extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int);
extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
extern void blk_queue_max_segments(struct request_queue *, unsigned short);
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index de464e6683b6..83d1926c61e4 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -40,6 +40,7 @@ struct bpf_map {
struct user_struct *user;
const struct bpf_map_ops *ops;
struct work_struct work;
+ atomic_t usercnt;
};
struct bpf_map_type_list {
@@ -167,8 +168,10 @@ struct bpf_prog *bpf_prog_get(u32 ufd);
void bpf_prog_put(struct bpf_prog *prog);
void bpf_prog_put_rcu(struct bpf_prog *prog);
-struct bpf_map *bpf_map_get(u32 ufd);
+struct bpf_map *bpf_map_get_with_uref(u32 ufd);
struct bpf_map *__bpf_map_get(struct fd f);
+void bpf_map_inc(struct bpf_map *map, bool uref);
+void bpf_map_put_with_uref(struct bpf_map *map);
void bpf_map_put(struct bpf_map *map);
extern int sysctl_unprivileged_bpf_disabled;
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index 59f4a7304419..f0ba9c2ec639 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -26,6 +26,7 @@
#define PHY_ID_BCM7366 0x600d8490
#define PHY_ID_BCM7425 0x600d86b0
#define PHY_ID_BCM7429 0x600d8730
+#define PHY_ID_BCM7435 0x600d8750
#define PHY_ID_BCM7439 0x600d8480
#define PHY_ID_BCM7439_2 0xae025080
#define PHY_ID_BCM7445 0x600d8510
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 60d44b26276d..e5f4164cbd99 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -90,7 +90,6 @@ enum {
*/
struct cgroup_file {
/* do not access any fields from outside cgroup core */
- struct list_head node; /* anchored at css->files */
struct kernfs_node *kn;
};
@@ -134,9 +133,6 @@ struct cgroup_subsys_state {
*/
u64 serial_nr;
- /* all cgroup_files associated with this css */
- struct list_head files;
-
/* percpu_ref killing and RCU release */
struct rcu_head rcu_head;
struct work_struct destroy_work;
@@ -235,6 +231,14 @@ struct cgroup {
int id;
/*
+ * The depth this cgroup is at. The root is at depth zero and each
+ * step down the hierarchy increments the level. This along with
+ * ancestor_ids[] can determine whether a given cgroup is a
+ * descendant of another without traversing the hierarchy.
+ */
+ int level;
+
+ /*
* Each non-empty css_set associated with this cgroup contributes
* one to populated_cnt. All children with non-zero popuplated_cnt
* of their own contribute one. The count is zero iff there's no
@@ -289,6 +293,9 @@ struct cgroup {
/* used to schedule release agent */
struct work_struct release_agent_work;
+
+ /* ids of the ancestors at each level including self */
+ int ancestor_ids[];
};
/*
@@ -308,6 +315,9 @@ struct cgroup_root {
/* The root cgroup. Root is destroyed on its release. */
struct cgroup cgrp;
+ /* for cgrp->ancestor_ids[0] */
+ int cgrp_ancestor_id_storage;
+
/* Number of cgroups in the hierarchy, used only for /proc/cgroups */
atomic_t nr_cgrps;
@@ -426,12 +436,9 @@ struct cgroup_subsys {
void (*css_reset)(struct cgroup_subsys_state *css);
void (*css_e_css_changed)(struct cgroup_subsys_state *css);
- int (*can_attach)(struct cgroup_subsys_state *css,
- struct cgroup_taskset *tset);
- void (*cancel_attach)(struct cgroup_subsys_state *css,
- struct cgroup_taskset *tset);
- void (*attach)(struct cgroup_subsys_state *css,
- struct cgroup_taskset *tset);
+ int (*can_attach)(struct cgroup_taskset *tset);
+ void (*cancel_attach)(struct cgroup_taskset *tset);
+ void (*attach)(struct cgroup_taskset *tset);
int (*can_fork)(struct task_struct *task, void **priv_p);
void (*cancel_fork)(struct task_struct *task, void *priv);
void (*fork)(struct task_struct *task, void *priv);
@@ -528,4 +535,116 @@ static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) {}
#endif /* CONFIG_CGROUPS */
+#ifdef CONFIG_SOCK_CGROUP_DATA
+
+/*
+ * sock_cgroup_data is embedded at sock->sk_cgrp_data and contains
+ * per-socket cgroup information except for memcg association.
+ *
+ * On legacy hierarchies, net_prio and net_cls controllers directly set
+ * attributes on each sock which can then be tested by the network layer.
+ * On the default hierarchy, each sock is associated with the cgroup it was
+ * created in and the networking layer can match the cgroup directly.
+ *
+ * To avoid carrying all three cgroup related fields separately in sock,
+ * sock_cgroup_data overloads (prioidx, classid) and the cgroup pointer.
+ * On boot, sock_cgroup_data records the cgroup that the sock was created
+ * in so that cgroup2 matches can be made; however, once either net_prio or
+ * net_cls starts being used, the area is overriden to carry prioidx and/or
+ * classid. The two modes are distinguished by whether the lowest bit is
+ * set. Clear bit indicates cgroup pointer while set bit prioidx and
+ * classid.
+ *
+ * While userland may start using net_prio or net_cls at any time, once
+ * either is used, cgroup2 matching no longer works. There is no reason to
+ * mix the two and this is in line with how legacy and v2 compatibility is
+ * handled. On mode switch, cgroup references which are already being
+ * pointed to by socks may be leaked. While this can be remedied by adding
+ * synchronization around sock_cgroup_data, given that the number of leaked
+ * cgroups is bound and highly unlikely to be high, this seems to be the
+ * better trade-off.
+ */
+struct sock_cgroup_data {
+ union {
+#ifdef __LITTLE_ENDIAN
+ struct {
+ u8 is_data;
+ u8 padding;
+ u16 prioidx;
+ u32 classid;
+ } __packed;
+#else
+ struct {
+ u32 classid;
+ u16 prioidx;
+ u8 padding;
+ u8 is_data;
+ } __packed;
+#endif
+ u64 val;
+ };
+};
+
+/*
+ * There's a theoretical window where the following accessors race with
+ * updaters and return part of the previous pointer as the prioidx or
+ * classid. Such races are short-lived and the result isn't critical.
+ */
+static inline u16 sock_cgroup_prioidx(struct sock_cgroup_data *skcd)
+{
+ /* fallback to 1 which is always the ID of the root cgroup */
+ return (skcd->is_data & 1) ? skcd->prioidx : 1;
+}
+
+static inline u32 sock_cgroup_classid(struct sock_cgroup_data *skcd)
+{
+ /* fallback to 0 which is the unconfigured default classid */
+ return (skcd->is_data & 1) ? skcd->classid : 0;
+}
+
+/*
+ * If invoked concurrently, the updaters may clobber each other. The
+ * caller is responsible for synchronization.
+ */
+static inline void sock_cgroup_set_prioidx(struct sock_cgroup_data *skcd,
+ u16 prioidx)
+{
+ struct sock_cgroup_data skcd_buf = {{ .val = READ_ONCE(skcd->val) }};
+
+ if (sock_cgroup_prioidx(&skcd_buf) == prioidx)
+ return;
+
+ if (!(skcd_buf.is_data & 1)) {
+ skcd_buf.val = 0;
+ skcd_buf.is_data = 1;
+ }
+
+ skcd_buf.prioidx = prioidx;
+ WRITE_ONCE(skcd->val, skcd_buf.val); /* see sock_cgroup_ptr() */
+}
+
+static inline void sock_cgroup_set_classid(struct sock_cgroup_data *skcd,
+ u32 classid)
+{
+ struct sock_cgroup_data skcd_buf = {{ .val = READ_ONCE(skcd->val) }};
+
+ if (sock_cgroup_classid(&skcd_buf) == classid)
+ return;
+
+ if (!(skcd_buf.is_data & 1)) {
+ skcd_buf.val = 0;
+ skcd_buf.is_data = 1;
+ }
+
+ skcd_buf.classid = classid;
+ WRITE_ONCE(skcd->val, skcd_buf.val); /* see sock_cgroup_ptr() */
+}
+
+#else /* CONFIG_SOCK_CGROUP_DATA */
+
+struct sock_cgroup_data {
+};
+
+#endif /* CONFIG_SOCK_CGROUP_DATA */
+
#endif /* _LINUX_CGROUP_DEFS_H */
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 22e3754f89c5..322a28482745 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -81,13 +81,15 @@ struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup,
struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
struct cgroup_subsys *ss);
-bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor);
+struct cgroup *cgroup_get_from_path(const char *path);
+
int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
int cgroup_rm_cftypes(struct cftype *cfts);
+void cgroup_file_notify(struct cgroup_file *cfile);
char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry);
@@ -119,8 +121,10 @@ struct cgroup_subsys_state *css_rightmost_descendant(struct cgroup_subsys_state
struct cgroup_subsys_state *css_next_descendant_post(struct cgroup_subsys_state *pos,
struct cgroup_subsys_state *css);
-struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset);
-struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset);
+struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset,
+ struct cgroup_subsys_state **dst_cssp);
+struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset,
+ struct cgroup_subsys_state **dst_cssp);
void css_task_iter_start(struct cgroup_subsys_state *css,
struct css_task_iter *it);
@@ -235,30 +239,39 @@ void css_task_iter_end(struct css_task_iter *it);
/**
* cgroup_taskset_for_each - iterate cgroup_taskset
* @task: the loop cursor
+ * @dst_css: the destination css
* @tset: taskset to iterate
*
* @tset may contain multiple tasks and they may belong to multiple
- * processes. When there are multiple tasks in @tset, if a task of a
- * process is in @tset, all tasks of the process are in @tset. Also, all
- * are guaranteed to share the same source and destination csses.
+ * processes.
+ *
+ * On the v2 hierarchy, there may be tasks from multiple processes and they
+ * may not share the source or destination csses.
+ *
+ * On traditional hierarchies, when there are multiple tasks in @tset, if a
+ * task of a process is in @tset, all tasks of the process are in @tset.
+ * Also, all are guaranteed to share the same source and destination csses.
*
* Iteration is not in any specific order.
*/
-#define cgroup_taskset_for_each(task, tset) \
- for ((task) = cgroup_taskset_first((tset)); (task); \
- (task) = cgroup_taskset_next((tset)))
+#define cgroup_taskset_for_each(task, dst_css, tset) \
+ for ((task) = cgroup_taskset_first((tset), &(dst_css)); \
+ (task); \
+ (task) = cgroup_taskset_next((tset), &(dst_css)))
/**
* cgroup_taskset_for_each_leader - iterate group leaders in a cgroup_taskset
* @leader: the loop cursor
+ * @dst_css: the destination css
* @tset: takset to iterate
*
* Iterate threadgroup leaders of @tset. For single-task migrations, @tset
* may not contain any.
*/
-#define cgroup_taskset_for_each_leader(leader, tset) \
- for ((leader) = cgroup_taskset_first((tset)); (leader); \
- (leader) = cgroup_taskset_next((tset))) \
+#define cgroup_taskset_for_each_leader(leader, dst_css, tset) \
+ for ((leader) = cgroup_taskset_first((tset), &(dst_css)); \
+ (leader); \
+ (leader) = cgroup_taskset_next((tset), &(dst_css))) \
if ((leader) != (leader)->group_leader) \
; \
else
@@ -352,6 +365,11 @@ static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n)
percpu_ref_put_many(&css->refcnt, n);
}
+static inline void cgroup_put(struct cgroup *cgrp)
+{
+ css_put(&cgrp->self);
+}
+
/**
* task_css_set_check - obtain a task's css_set with extra access conditions
* @task: the task to obtain css_set for
@@ -459,6 +477,23 @@ static inline struct cgroup *task_cgroup(struct task_struct *task,
return task_css(task, subsys_id)->cgroup;
}
+/**
+ * cgroup_is_descendant - test ancestry
+ * @cgrp: the cgroup to be tested
+ * @ancestor: possible ancestor of @cgrp
+ *
+ * Test whether @cgrp is a descendant of @ancestor. It also returns %true
+ * if @cgrp == @ancestor. This function is safe to call as long as @cgrp
+ * and @ancestor are accessible.
+ */
+static inline bool cgroup_is_descendant(struct cgroup *cgrp,
+ struct cgroup *ancestor)
+{
+ if (cgrp->root != ancestor->root || cgrp->level < ancestor->level)
+ return false;
+ return cgrp->ancestor_ids[ancestor->level] == ancestor->id;
+}
+
/* no synchronization, the result can only be used as a hint */
static inline bool cgroup_is_populated(struct cgroup *cgrp)
{
@@ -516,19 +551,6 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
pr_cont_kernfs_path(cgrp->kn);
}
-/**
- * cgroup_file_notify - generate a file modified event for a cgroup_file
- * @cfile: target cgroup_file
- *
- * @cfile must have been obtained by setting cftype->file_offset.
- */
-static inline void cgroup_file_notify(struct cgroup_file *cfile)
-{
- /* might not have been created due to one of the CFTYPE selector flags */
- if (cfile->kn)
- kernfs_notify(cfile->kn);
-}
-
#else /* !CONFIG_CGROUPS */
struct cgroup_subsys_state;
@@ -555,4 +577,45 @@ static inline int cgroup_init(void) { return 0; }
#endif /* !CONFIG_CGROUPS */
+/*
+ * sock->sk_cgrp_data handling. For more info, see sock_cgroup_data
+ * definition in cgroup-defs.h.
+ */
+#ifdef CONFIG_SOCK_CGROUP_DATA
+
+#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
+extern spinlock_t cgroup_sk_update_lock;
+#endif
+
+void cgroup_sk_alloc_disable(void);
+void cgroup_sk_alloc(struct sock_cgroup_data *skcd);
+void cgroup_sk_free(struct sock_cgroup_data *skcd);
+
+static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd)
+{
+#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
+ unsigned long v;
+
+ /*
+ * @skcd->val is 64bit but the following is safe on 32bit too as we
+ * just need the lower ulong to be written and read atomically.
+ */
+ v = READ_ONCE(skcd->val);
+
+ if (v & 1)
+ return &cgrp_dfl_root.cgrp;
+
+ return (struct cgroup *)(unsigned long)v ?: &cgrp_dfl_root.cgrp;
+#else
+ return (struct cgroup *)(unsigned long)skcd->val;
+#endif
+}
+
+#else /* CONFIG_CGROUP_DATA */
+
+static inline void cgroup_sk_alloc(struct sock_cgroup_data *skcd) {}
+static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {}
+
+#endif /* CONFIG_CGROUP_DATA */
+
#endif /* _LINUX_CGROUP_H */
diff --git a/include/linux/configfs.h b/include/linux/configfs.h
index a8a335b7fce0..758a029011b1 100644
--- a/include/linux/configfs.h
+++ b/include/linux/configfs.h
@@ -197,6 +197,16 @@ static inline struct configfs_subsystem *to_configfs_subsystem(struct config_gro
int configfs_register_subsystem(struct configfs_subsystem *subsys);
void configfs_unregister_subsystem(struct configfs_subsystem *subsys);
+int configfs_register_group(struct config_group *parent_group,
+ struct config_group *group);
+void configfs_unregister_group(struct config_group *group);
+
+struct config_group *
+configfs_register_default_group(struct config_group *parent_group,
+ const char *name,
+ struct config_item_type *item_type);
+void configfs_unregister_default_group(struct config_group *group);
+
/* These functions can sleep and can alloc with GFP_KERNEL */
/* WARNING: These cannot be called underneath configfs callbacks!! */
int configfs_depend_item(struct configfs_subsystem *subsys, struct config_item *target);
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index ef4c5b1a860f..177c7680c1a8 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -77,6 +77,7 @@ struct cpufreq_policy {
unsigned int suspend_freq; /* freq to set during suspend */
unsigned int policy; /* see above */
+ unsigned int last_policy; /* policy before unplug */
struct cpufreq_governor *governor; /* see below */
void *governor_data;
bool governor_enabled; /* governor start/stop flag */
diff --git a/include/linux/dns_resolver.h b/include/linux/dns_resolver.h
index cc92268af89a..6ac3cad9aef1 100644
--- a/include/linux/dns_resolver.h
+++ b/include/linux/dns_resolver.h
@@ -27,7 +27,7 @@
#ifdef __KERNEL__
extern int dns_query(const char *type, const char *name, size_t namelen,
- const char *options, char **_result, time_t *_expiry);
+ const char *options, char **_result, time64_t *_expiry);
#endif /* KERNEL */
diff --git a/include/linux/enclosure.h b/include/linux/enclosure.h
index 7be22da321f3..a4cf57cd0f75 100644
--- a/include/linux/enclosure.h
+++ b/include/linux/enclosure.h
@@ -29,7 +29,11 @@
/* A few generic types ... taken from ses-2 */
enum enclosure_component_type {
ENCLOSURE_COMPONENT_DEVICE = 0x01,
+ ENCLOSURE_COMPONENT_CONTROLLER_ELECTRONICS = 0x07,
+ ENCLOSURE_COMPONENT_SCSI_TARGET_PORT = 0x14,
+ ENCLOSURE_COMPONENT_SCSI_INITIATOR_PORT = 0x15,
ENCLOSURE_COMPONENT_ARRAY_DEVICE = 0x17,
+ ENCLOSURE_COMPONENT_SAS_EXPANDER = 0x18,
};
/* ses-2 common element status */
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 6523109e136d..8942af0813e3 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -271,7 +271,7 @@ static inline int gfpflags_to_migratetype(const gfp_t gfp_flags)
static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
{
- return gfp_flags & __GFP_DIRECT_RECLAIM;
+ return (bool __force)(gfp_flags & __GFP_DIRECT_RECLAIM);
}
#ifdef CONFIG_HIGHMEM
diff --git a/include/linux/hashtable.h b/include/linux/hashtable.h
index 519b6e2d769e..661e5c2a8e2a 100644
--- a/include/linux/hashtable.h
+++ b/include/linux/hashtable.h
@@ -16,6 +16,10 @@
struct hlist_head name[1 << (bits)] = \
{ [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT }
+#define DEFINE_READ_MOSTLY_HASHTABLE(name, bits) \
+ struct hlist_head name[1 << (bits)] __read_mostly = \
+ { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT }
+
#define DECLARE_HASHTABLE(name, bits) \
struct hlist_head name[1 << (bits)]
diff --git a/include/linux/hdlc.h b/include/linux/hdlc.h
index 1acb1445e05f..e31bcd4c7859 100644
--- a/include/linux/hdlc.h
+++ b/include/linux/hdlc.h
@@ -101,7 +101,7 @@ netdev_tx_t hdlc_start_xmit(struct sk_buff *skb, struct net_device *dev);
int attach_hdlc_protocol(struct net_device *dev, struct hdlc_proto *proto,
size_t size);
/* May be used by hardware driver to gain control over HDLC device */
-void detach_hdlc_protocol(struct net_device *dev);
+int detach_hdlc_protocol(struct net_device *dev);
static __inline__ __be16 hdlc_type_trans(struct sk_buff *skb,
struct net_device *dev)
diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
index b49cf923becc..ba7a9b0c7c57 100644
--- a/include/linux/if_pppox.h
+++ b/include/linux/if_pppox.h
@@ -91,7 +91,6 @@ enum {
PPPOX_CONNECTED = 1, /* connection established ==TCP_ESTABLISHED */
PPPOX_BOUND = 2, /* bound to ppp device */
PPPOX_RELAY = 4, /* forwarding is enabled */
- PPPOX_ZOMBIE = 8, /* dead, but still bound to ppp device */
PPPOX_DEAD = 16 /* dead, useless, please clean me up!*/
};
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
index a6aa970758a2..b84e49c3a738 100644
--- a/include/linux/if_team.h
+++ b/include/linux/if_team.h
@@ -164,6 +164,7 @@ struct team_mode {
size_t priv_size;
size_t port_priv_size;
const struct team_mode_ops *ops;
+ enum netdev_lag_tx_type lag_tx_type;
};
#define TEAM_PORT_HASHBITS 4
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 67ce5bd3b56a..a5f6ce6b578c 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -73,7 +73,7 @@ static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
/* found in socket.c */
extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *));
-static inline bool is_vlan_dev(struct net_device *dev)
+static inline bool is_vlan_dev(const struct net_device *dev)
{
return dev->priv_flags & IFF_802_1Q_VLAN;
}
@@ -621,7 +621,7 @@ static inline netdev_features_t vlan_features_check(const struct sk_buff *skb,
NETIF_F_SG |
NETIF_F_HIGHDMA |
NETIF_F_FRAGLIST |
- NETIF_F_GEN_CSUM |
+ NETIF_F_HW_CSUM |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX);
diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h
index 0e707f0c1a3e..7c27fa1030e8 100644
--- a/include/linux/inet_diag.h
+++ b/include/linux/inet_diag.h
@@ -3,6 +3,7 @@
#include <uapi/linux/inet_diag.h>
+struct net;
struct sock;
struct inet_hashinfo;
struct nlattr;
@@ -23,6 +24,10 @@ struct inet_diag_handler {
void (*idiag_get_info)(struct sock *sk,
struct inet_diag_msg *r,
void *info);
+
+ int (*destroy)(struct sk_buff *in_skb,
+ const struct inet_diag_req_v2 *req);
+
__u16 idiag_type;
__u16 idiag_info_size;
};
@@ -41,6 +46,10 @@ int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo,
struct sk_buff *in_skb, const struct nlmsghdr *nlh,
const struct inet_diag_req_v2 *req);
+struct sock *inet_diag_find_one_icsk(struct net *net,
+ struct inet_hashinfo *hashinfo,
+ const struct inet_diag_req_v2 *req);
+
int inet_diag_bc_sk(const struct nlattr *_bc, struct sock *sk);
extern int inet_diag_register(const struct inet_diag_handler *handler);
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 0ef2a97ccdb5..402753bccafa 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -227,7 +227,7 @@ struct ipv6_pinfo {
struct ipv6_ac_socklist *ipv6_ac_list;
struct ipv6_fl_socklist __rcu *ipv6_fl_list;
- struct ipv6_txoptions *opt;
+ struct ipv6_txoptions __rcu *opt;
struct sk_buff *pktoptions;
struct sk_buff *rxpmtu;
struct inet6_cork cork;
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index c9ae0c6ec050..d5d798b35c1f 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -330,6 +330,7 @@ struct rdists {
};
struct irq_domain;
+struct device_node;
int its_cpu_init(void);
int its_init(struct device_node *node, struct rdists *rdists,
struct irq_domain *domain);
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 8dde55974f18..0536524bb9eb 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -5,7 +5,7 @@
* Jump label support
*
* Copyright (C) 2009-2012 Jason Baron <jbaron@redhat.com>
- * Copyright (C) 2011-2012 Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra
*
* DEPRECATED API:
*
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index 5d4e9c4b821d..af51df35d749 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -274,6 +274,8 @@ void pr_cont_kernfs_path(struct kernfs_node *kn);
struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn);
struct kernfs_node *kernfs_find_and_get_ns(struct kernfs_node *parent,
const char *name, const void *ns);
+struct kernfs_node *kernfs_walk_and_get_ns(struct kernfs_node *parent,
+ const char *path, const void *ns);
void kernfs_get(struct kernfs_node *kn);
void kernfs_put(struct kernfs_node *kn);
@@ -350,6 +352,10 @@ static inline struct kernfs_node *
kernfs_find_and_get_ns(struct kernfs_node *parent, const char *name,
const void *ns)
{ return NULL; }
+static inline struct kernfs_node *
+kernfs_walk_and_get_ns(struct kernfs_node *parent, const char *path,
+ const void *ns)
+{ return NULL; }
static inline void kernfs_get(struct kernfs_node *kn) { }
static inline void kernfs_put(struct kernfs_node *kn) { }
@@ -431,6 +437,12 @@ kernfs_find_and_get(struct kernfs_node *kn, const char *name)
}
static inline struct kernfs_node *
+kernfs_walk_and_get(struct kernfs_node *kn, const char *path)
+{
+ return kernfs_walk_and_get_ns(kn, path, NULL);
+}
+
+static inline struct kernfs_node *
kernfs_create_dir(struct kernfs_node *parent, const char *name, umode_t mode,
void *priv)
{
diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h
index d0a1f99e24e3..4894c6888bc6 100644
--- a/include/linux/kmemleak.h
+++ b/include/linux/kmemleak.h
@@ -25,7 +25,7 @@
#ifdef CONFIG_DEBUG_KMEMLEAK
-extern void kmemleak_init(void) __ref;
+extern void kmemleak_init(void) __init;
extern void kmemleak_alloc(const void *ptr, size_t size, int min_count,
gfp_t gfp) __ref;
extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
diff --git a/include/linux/kref.h b/include/linux/kref.h
index 484604d184be..e15828fd71f1 100644
--- a/include/linux/kref.h
+++ b/include/linux/kref.h
@@ -19,7 +19,6 @@
#include <linux/atomic.h>
#include <linux/kernel.h>
#include <linux/mutex.h>
-#include <linux/spinlock.h>
struct kref {
atomic_t refcount;
@@ -99,38 +98,6 @@ static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref)
return kref_sub(kref, 1, release);
}
-/**
- * kref_put_spinlock_irqsave - decrement refcount for object.
- * @kref: object.
- * @release: pointer to the function that will clean up the object when the
- * last reference to the object is released.
- * This pointer is required, and it is not acceptable to pass kfree
- * in as this function.
- * @lock: lock to take in release case
- *
- * Behaves identical to kref_put with one exception. If the reference count
- * drops to zero, the lock will be taken atomically wrt dropping the reference
- * count. The release function has to call spin_unlock() without _irqrestore.
- */
-static inline int kref_put_spinlock_irqsave(struct kref *kref,
- void (*release)(struct kref *kref),
- spinlock_t *lock)
-{
- unsigned long flags;
-
- WARN_ON(release == NULL);
- if (atomic_add_unless(&kref->refcount, -1, 1))
- return 0;
- spin_lock_irqsave(lock, flags);
- if (atomic_dec_and_test(&kref->refcount)) {
- release(kref);
- local_irq_restore(flags);
- return 1;
- }
- spin_unlock_irqrestore(lock, flags);
- return 0;
-}
-
static inline int kref_put_mutex(struct kref *kref,
void (*release)(struct kref *kref),
struct mutex *lock)
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 5706a2108f0a..c923350ca20a 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -460,6 +460,17 @@ static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
(vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
idx++)
+static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
+{
+ struct kvm_vcpu *vcpu;
+ int i;
+
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ if (vcpu->vcpu_id == id)
+ return vcpu;
+ return NULL;
+}
+
#define kvm_for_each_memslot(memslot, slots) \
for (memslot = &slots->memslots[0]; \
memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 83577f8fd15b..600c1e0626a5 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -210,6 +210,7 @@ enum {
ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */
/* (doesn't imply presence) */
ATA_FLAG_SATA = (1 << 1),
+ ATA_FLAG_NO_LOG_PAGE = (1 << 5), /* do not issue log page read */
ATA_FLAG_NO_ATAPI = (1 << 6), /* No ATAPI support */
ATA_FLAG_PIO_DMA = (1 << 7), /* PIO cmds via DMA */
ATA_FLAG_PIO_LBA48 = (1 << 8), /* Host DMA engine is LBA28 only */
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index 69c9057e1ab8..034117b3be5f 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -50,15 +50,21 @@ enum {
NVM_IO_DUAL_ACCESS = 0x1,
NVM_IO_QUAD_ACCESS = 0x2,
+ /* NAND Access Modes */
NVM_IO_SUSPEND = 0x80,
NVM_IO_SLC_MODE = 0x100,
NVM_IO_SCRAMBLE_DISABLE = 0x200,
+
+ /* Block Types */
+ NVM_BLK_T_FREE = 0x0,
+ NVM_BLK_T_BAD = 0x1,
+ NVM_BLK_T_DEV = 0x2,
+ NVM_BLK_T_HOST = 0x4,
};
struct nvm_id_group {
u8 mtype;
u8 fmtype;
- u16 res16;
u8 num_ch;
u8 num_lun;
u8 num_pln;
@@ -74,9 +80,9 @@ struct nvm_id_group {
u32 tbet;
u32 tbem;
u32 mpos;
+ u32 mccap;
u16 cpar;
- u8 res[913];
-} __packed;
+};
struct nvm_addr_format {
u8 ch_offset;
@@ -91,19 +97,15 @@ struct nvm_addr_format {
u8 pg_len;
u8 sect_offset;
u8 sect_len;
- u8 res[4];
};
struct nvm_id {
u8 ver_id;
u8 vmnt;
u8 cgrps;
- u8 res[5];
u32 cap;
u32 dom;
struct nvm_addr_format ppaf;
- u8 ppat;
- u8 resv[224];
struct nvm_id_group groups[4];
} __packed;
@@ -123,39 +125,28 @@ struct nvm_tgt_instance {
#define NVM_VERSION_MINOR 0
#define NVM_VERSION_PATCH 0
-#define NVM_SEC_BITS (8)
-#define NVM_PL_BITS (6)
-#define NVM_PG_BITS (16)
#define NVM_BLK_BITS (16)
-#define NVM_LUN_BITS (10)
+#define NVM_PG_BITS (16)
+#define NVM_SEC_BITS (8)
+#define NVM_PL_BITS (8)
+#define NVM_LUN_BITS (8)
#define NVM_CH_BITS (8)
struct ppa_addr {
+ /* Generic structure for all addresses */
union {
- /* Channel-based PPA format in nand 4x2x2x2x8x10 */
- struct {
- u64 ch : 4;
- u64 sec : 2; /* 4 sectors per page */
- u64 pl : 2; /* 4 planes per LUN */
- u64 lun : 2; /* 4 LUNs per channel */
- u64 pg : 8; /* 256 pages per block */
- u64 blk : 10;/* 1024 blocks per plane */
- u64 resved : 36;
- } chnl;
-
- /* Generic structure for all addresses */
struct {
+ u64 blk : NVM_BLK_BITS;
+ u64 pg : NVM_PG_BITS;
u64 sec : NVM_SEC_BITS;
u64 pl : NVM_PL_BITS;
- u64 pg : NVM_PG_BITS;
- u64 blk : NVM_BLK_BITS;
u64 lun : NVM_LUN_BITS;
u64 ch : NVM_CH_BITS;
} g;
u64 ppa;
};
-} __packed;
+};
struct nvm_rq {
struct nvm_tgt_instance *ins;
@@ -191,18 +182,18 @@ static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata)
struct nvm_block;
typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *);
-typedef int (nvm_bb_update_fn)(u32, void *, unsigned int, void *);
-typedef int (nvm_id_fn)(struct request_queue *, struct nvm_id *);
-typedef int (nvm_get_l2p_tbl_fn)(struct request_queue *, u64, u32,
+typedef int (nvm_bb_update_fn)(struct ppa_addr, int, u8 *, void *);
+typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *);
+typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32,
nvm_l2p_update_fn *, void *);
-typedef int (nvm_op_bb_tbl_fn)(struct request_queue *, int, unsigned int,
+typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, int,
nvm_bb_update_fn *, void *);
-typedef int (nvm_op_set_bb_fn)(struct request_queue *, struct nvm_rq *, int);
-typedef int (nvm_submit_io_fn)(struct request_queue *, struct nvm_rq *);
-typedef int (nvm_erase_blk_fn)(struct request_queue *, struct nvm_rq *);
-typedef void *(nvm_create_dma_pool_fn)(struct request_queue *, char *);
+typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct nvm_rq *, int);
+typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
+typedef int (nvm_erase_blk_fn)(struct nvm_dev *, struct nvm_rq *);
+typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *);
typedef void (nvm_destroy_dma_pool_fn)(void *);
-typedef void *(nvm_dev_dma_alloc_fn)(struct request_queue *, void *, gfp_t,
+typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t,
dma_addr_t *);
typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t);
@@ -210,7 +201,7 @@ struct nvm_dev_ops {
nvm_id_fn *identity;
nvm_get_l2p_tbl_fn *get_l2p_tbl;
nvm_op_bb_tbl_fn *get_bb_tbl;
- nvm_op_set_bb_fn *set_bb;
+ nvm_op_set_bb_fn *set_bb_tbl;
nvm_submit_io_fn *submit_io;
nvm_erase_blk_fn *erase_block;
@@ -220,7 +211,7 @@ struct nvm_dev_ops {
nvm_dev_dma_alloc_fn *dev_dma_alloc;
nvm_dev_dma_free_fn *dev_dma_free;
- uint8_t max_phys_sect;
+ unsigned int max_phys_sect;
};
struct nvm_lun {
@@ -229,7 +220,9 @@ struct nvm_lun {
int lun_id;
int chnl_id;
+ unsigned int nr_inuse_blocks; /* Number of used blocks */
unsigned int nr_free_blocks; /* Number of unused blocks */
+ unsigned int nr_bad_blocks; /* Number of bad blocks */
struct nvm_block *blocks;
spinlock_t lock;
@@ -263,8 +256,7 @@ struct nvm_dev {
int blks_per_lun;
int sec_size;
int oob_size;
- int addr_mode;
- struct nvm_addr_format addr_format;
+ struct nvm_addr_format ppaf;
/* Calculated/Cached values. These do not reflect the actual usable
* blocks at run-time.
@@ -290,118 +282,45 @@ struct nvm_dev {
char name[DISK_NAME_LEN];
};
-/* fallback conversion */
-static struct ppa_addr __generic_to_linear_addr(struct nvm_dev *dev,
- struct ppa_addr r)
+static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev,
+ struct ppa_addr r)
{
struct ppa_addr l;
- l.ppa = r.g.sec +
- r.g.pg * dev->sec_per_pg +
- r.g.blk * (dev->pgs_per_blk *
- dev->sec_per_pg) +
- r.g.lun * (dev->blks_per_lun *
- dev->pgs_per_blk *
- dev->sec_per_pg) +
- r.g.ch * (dev->blks_per_lun *
- dev->pgs_per_blk *
- dev->luns_per_chnl *
- dev->sec_per_pg);
+ l.ppa = ((u64)r.g.blk) << dev->ppaf.blk_offset;
+ l.ppa |= ((u64)r.g.pg) << dev->ppaf.pg_offset;
+ l.ppa |= ((u64)r.g.sec) << dev->ppaf.sect_offset;
+ l.ppa |= ((u64)r.g.pl) << dev->ppaf.pln_offset;
+ l.ppa |= ((u64)r.g.lun) << dev->ppaf.lun_offset;
+ l.ppa |= ((u64)r.g.ch) << dev->ppaf.ch_offset;
return l;
}
-/* fallback conversion */
-static struct ppa_addr __linear_to_generic_addr(struct nvm_dev *dev,
- struct ppa_addr r)
+static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev,
+ struct ppa_addr r)
{
struct ppa_addr l;
- int secs, pgs, blks, luns;
- sector_t ppa = r.ppa;
-
- l.ppa = 0;
-
- div_u64_rem(ppa, dev->sec_per_pg, &secs);
- l.g.sec = secs;
- sector_div(ppa, dev->sec_per_pg);
- div_u64_rem(ppa, dev->sec_per_blk, &pgs);
- l.g.pg = pgs;
-
- sector_div(ppa, dev->pgs_per_blk);
- div_u64_rem(ppa, dev->blks_per_lun, &blks);
- l.g.blk = blks;
-
- sector_div(ppa, dev->blks_per_lun);
- div_u64_rem(ppa, dev->luns_per_chnl, &luns);
- l.g.lun = luns;
-
- sector_div(ppa, dev->luns_per_chnl);
- l.g.ch = ppa;
-
- return l;
-}
-
-static struct ppa_addr __generic_to_chnl_addr(struct ppa_addr r)
-{
- struct ppa_addr l;
-
- l.ppa = 0;
-
- l.chnl.sec = r.g.sec;
- l.chnl.pl = r.g.pl;
- l.chnl.pg = r.g.pg;
- l.chnl.blk = r.g.blk;
- l.chnl.lun = r.g.lun;
- l.chnl.ch = r.g.ch;
-
- return l;
-}
-
-static struct ppa_addr __chnl_to_generic_addr(struct ppa_addr r)
-{
- struct ppa_addr l;
-
- l.ppa = 0;
-
- l.g.sec = r.chnl.sec;
- l.g.pl = r.chnl.pl;
- l.g.pg = r.chnl.pg;
- l.g.blk = r.chnl.blk;
- l.g.lun = r.chnl.lun;
- l.g.ch = r.chnl.ch;
+ /*
+ * (r.ppa << X offset) & X len bitmask. X eq. blk, pg, etc.
+ */
+ l.g.blk = (r.ppa >> dev->ppaf.blk_offset) &
+ (((1 << dev->ppaf.blk_len) - 1));
+ l.g.pg |= (r.ppa >> dev->ppaf.pg_offset) &
+ (((1 << dev->ppaf.pg_len) - 1));
+ l.g.sec |= (r.ppa >> dev->ppaf.sect_offset) &
+ (((1 << dev->ppaf.sect_len) - 1));
+ l.g.pl |= (r.ppa >> dev->ppaf.pln_offset) &
+ (((1 << dev->ppaf.pln_len) - 1));
+ l.g.lun |= (r.ppa >> dev->ppaf.lun_offset) &
+ (((1 << dev->ppaf.lun_len) - 1));
+ l.g.ch |= (r.ppa >> dev->ppaf.ch_offset) &
+ (((1 << dev->ppaf.ch_len) - 1));
return l;
}
-static inline struct ppa_addr addr_to_generic_mode(struct nvm_dev *dev,
- struct ppa_addr gppa)
-{
- switch (dev->addr_mode) {
- case NVM_ADDRMODE_LINEAR:
- return __linear_to_generic_addr(dev, gppa);
- case NVM_ADDRMODE_CHANNEL:
- return __chnl_to_generic_addr(gppa);
- default:
- BUG();
- }
- return gppa;
-}
-
-static inline struct ppa_addr generic_to_addr_mode(struct nvm_dev *dev,
- struct ppa_addr gppa)
-{
- switch (dev->addr_mode) {
- case NVM_ADDRMODE_LINEAR:
- return __generic_to_linear_addr(dev, gppa);
- case NVM_ADDRMODE_CHANNEL:
- return __generic_to_chnl_addr(gppa);
- default:
- BUG();
- }
- return gppa;
-}
-
static inline int ppa_empty(struct ppa_addr ppa_addr)
{
return (ppa_addr.ppa == ADDR_EMPTY);
@@ -468,7 +387,7 @@ typedef int (nvmm_end_io_fn)(struct nvm_rq *, int);
typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *,
unsigned long);
typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int);
-typedef void (nvmm_free_blocks_print_fn)(struct nvm_dev *);
+typedef void (nvmm_lun_info_print_fn)(struct nvm_dev *);
struct nvmm_type {
const char *name;
@@ -492,7 +411,7 @@ struct nvmm_type {
nvmm_get_lun_fn *get_lun;
/* Statistics */
- nvmm_free_blocks_print_fn *free_blocks_print;
+ nvmm_lun_info_print_fn *lun_info_print;
struct list_head list;
};
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 70400dc7660f..c57e424d914b 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -2,7 +2,7 @@
* Runtime locking correctness validator
*
* Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
- * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
*
* see Documentation/locking/lockdep-design.txt for more details.
*/
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 7501626ab529..d3133be12d92 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -427,6 +427,17 @@ enum {
};
enum {
+ /*
+ * Max wqe size for rdma read is 512 bytes, so this
+ * limits our max_sge_rd as the wqe needs to fit:
+ * - ctrl segment (16 bytes)
+ * - rdma segment (16 bytes)
+ * - scatter elements (16 bytes each)
+ */
+ MLX4_MAX_SGE_RD = (512 - 16 - 16) / 16
+};
+
+enum {
MLX4_DEV_PMC_SUBTYPE_GUID_INFO = 0x14,
MLX4_DEV_PMC_SUBTYPE_PORT_INFO = 0x15,
MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE = 0x16,
diff --git a/include/linux/mlx4/driver.h b/include/linux/mlx4/driver.h
index 5a06d969338e..2e8af001c5da 100644
--- a/include/linux/mlx4/driver.h
+++ b/include/linux/mlx4/driver.h
@@ -75,6 +75,11 @@ static inline int mlx4_is_bonded(struct mlx4_dev *dev)
return !!(dev->flags & MLX4_FLAG_BONDED);
}
+static inline int mlx4_is_mf_bonded(struct mlx4_dev *dev)
+{
+ return (mlx4_is_bonded(dev) && mlx4_is_mfunc(dev));
+}
+
struct mlx4_port_map {
u8 port1;
u8 port2;
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 0b473cbfa7ef..7d3a85faefb7 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -251,6 +251,7 @@ enum mlx5_event {
MLX5_EVENT_TYPE_PAGE_REQUEST = 0xb,
MLX5_EVENT_TYPE_PAGE_FAULT = 0xc,
+ MLX5_EVENT_TYPE_NIC_VPORT_CHANGE = 0xd,
};
enum {
@@ -520,6 +521,12 @@ struct mlx5_eqe_page_fault {
__be32 flags_qpn;
} __packed;
+struct mlx5_eqe_vport_change {
+ u8 rsvd0[2];
+ __be16 vport_num;
+ __be32 rsvd1[6];
+} __packed;
+
union ev_data {
__be32 raw[7];
struct mlx5_eqe_cmd cmd;
@@ -532,6 +539,7 @@ union ev_data {
struct mlx5_eqe_stall_vl stall_vl;
struct mlx5_eqe_page_req req_pages;
struct mlx5_eqe_page_fault page_fault;
+ struct mlx5_eqe_vport_change vport_change;
} __packed;
struct mlx5_eqe {
@@ -1067,6 +1075,12 @@ enum {
};
enum {
+ MLX5_ESW_VPORT_ADMIN_STATE_DOWN = 0x0,
+ MLX5_ESW_VPORT_ADMIN_STATE_UP = 0x1,
+ MLX5_ESW_VPORT_ADMIN_STATE_AUTO = 0x2,
+};
+
+enum {
MLX5_L3_PROT_TYPE_IPV4 = 0,
MLX5_L3_PROT_TYPE_IPV6 = 1,
};
@@ -1102,6 +1116,12 @@ enum {
MLX5_FLOW_CONTEXT_DEST_TYPE_TIR = 2,
};
+enum mlx5_list_type {
+ MLX5_NVPRT_LIST_TYPE_UC = 0x0,
+ MLX5_NVPRT_LIST_TYPE_MC = 0x1,
+ MLX5_NVPRT_LIST_TYPE_VLAN = 0x2,
+};
+
enum {
MLX5_RQC_RQ_TYPE_MEMORY_RQ_INLINE = 0x0,
MLX5_RQC_RQ_TYPE_MEMORY_RQ_RPM = 0x1,
@@ -1124,6 +1144,8 @@ enum mlx5_cap_type {
MLX5_CAP_IPOIB_OFFLOADS,
MLX5_CAP_EOIB_OFFLOADS,
MLX5_CAP_FLOW_TABLE,
+ MLX5_CAP_ESWITCH_FLOW_TABLE,
+ MLX5_CAP_ESWITCH,
/* NUM OF CAP Types */
MLX5_CAP_NUM
};
@@ -1161,6 +1183,28 @@ enum mlx5_cap_type {
#define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \
MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap)
+#define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
+ MLX5_GET(flow_table_eswitch_cap, \
+ mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
+
+#define MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, cap) \
+ MLX5_GET(flow_table_eswitch_cap, \
+ mdev->hca_caps_max[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
+
+#define MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) \
+ MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_nic_esw_fdb.cap)
+
+#define MLX5_CAP_ESW_FLOWTABLE_FDB_MAX(mdev, cap) \
+ MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_nic_esw_fdb.cap)
+
+#define MLX5_CAP_ESW(mdev, cap) \
+ MLX5_GET(e_switch_cap, \
+ mdev->hca_caps_cur[MLX5_CAP_ESWITCH], cap)
+
+#define MLX5_CAP_ESW_MAX(mdev, cap) \
+ MLX5_GET(e_switch_cap, \
+ mdev->hca_caps_max[MLX5_CAP_ESWITCH], cap)
+
#define MLX5_CAP_ODP(mdev, cap)\
MLX5_GET(odp_cap, mdev->hca_caps_cur[MLX5_CAP_ODP], cap)
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 5c857f2a20d7..2fd7019f69db 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -426,11 +426,23 @@ struct mlx5_mr_table {
struct radix_tree_root tree;
};
+struct mlx5_vf_context {
+ int enabled;
+};
+
+struct mlx5_core_sriov {
+ struct mlx5_vf_context *vfs_ctx;
+ int num_vfs;
+ int enabled_vfs;
+};
+
struct mlx5_irq_info {
cpumask_var_t mask;
char name[MLX5_MAX_IRQ_NAME];
};
+struct mlx5_eswitch;
+
struct mlx5_priv {
char name[MLX5_MAX_NAME_LEN];
struct mlx5_eq_table eq_table;
@@ -447,6 +459,7 @@ struct mlx5_priv {
int fw_pages;
atomic_t reg_pages;
struct list_head free_list;
+ int vfs_pages;
struct mlx5_core_health health;
@@ -485,6 +498,12 @@ struct mlx5_priv {
struct list_head dev_list;
struct list_head ctx_list;
spinlock_t ctx_lock;
+
+ struct mlx5_eswitch *eswitch;
+ struct mlx5_core_sriov sriov;
+ unsigned long pci_dev_data;
+ struct mlx5_flow_root_namespace *root_ns;
+ struct mlx5_flow_root_namespace *fdb_root_ns;
};
enum mlx5_device_state {
@@ -739,6 +758,8 @@ void mlx5_pagealloc_init(struct mlx5_core_dev *dev);
void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
int mlx5_pagealloc_start(struct mlx5_core_dev *dev);
void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
+int mlx5_sriov_init(struct mlx5_core_dev *dev);
+int mlx5_sriov_cleanup(struct mlx5_core_dev *dev);
void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
s32 npages);
int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
@@ -884,6 +905,15 @@ struct mlx5_profile {
} mr_cache[MAX_MR_CACHE_ENTRIES];
};
+enum {
+ MLX5_PCI_DEV_IS_VF = 1 << 0,
+};
+
+static inline int mlx5_core_is_pf(struct mlx5_core_dev *dev)
+{
+ return !(dev->priv.pci_dev_data & MLX5_PCI_DEV_IS_VF);
+}
+
static inline int mlx5_get_gid_table_len(u16 param)
{
if (param > 4) {
diff --git a/include/linux/mlx5/flow_table.h b/include/linux/mlx5/flow_table.h
deleted file mode 100644
index 5f922c6d4fc2..000000000000
--- a/include/linux/mlx5/flow_table.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef MLX5_FLOW_TABLE_H
-#define MLX5_FLOW_TABLE_H
-
-#include <linux/mlx5/driver.h>
-
-struct mlx5_flow_table_group {
- u8 log_sz;
- u8 match_criteria_enable;
- u32 match_criteria[MLX5_ST_SZ_DW(fte_match_param)];
-};
-
-void *mlx5_create_flow_table(struct mlx5_core_dev *dev, u8 level, u8 table_type,
- u16 num_groups,
- struct mlx5_flow_table_group *group);
-void mlx5_destroy_flow_table(void *flow_table);
-int mlx5_add_flow_table_entry(void *flow_table, u8 match_criteria_enable,
- void *match_criteria, void *flow_context,
- u32 *flow_index);
-void mlx5_del_flow_table_entry(void *flow_table, u32 flow_index);
-u32 mlx5_get_flow_table_id(void *flow_table);
-
-#endif /* MLX5_FLOW_TABLE_H */
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
new file mode 100644
index 000000000000..bc7ad019afde
--- /dev/null
+++ b/include/linux/mlx5/fs.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _MLX5_FS_
+#define _MLX5_FS_
+
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/mlx5_ifc.h>
+
+#define MLX5_FS_DEFAULT_FLOW_TAG 0x0
+
+enum mlx5_flow_namespace_type {
+ MLX5_FLOW_NAMESPACE_KERNEL,
+ MLX5_FLOW_NAMESPACE_FDB,
+};
+
+struct mlx5_flow_table;
+struct mlx5_flow_group;
+struct mlx5_flow_rule;
+struct mlx5_flow_namespace;
+
+struct mlx5_flow_destination {
+ enum mlx5_flow_destination_type type;
+ union {
+ u32 tir_num;
+ struct mlx5_flow_table *ft;
+ u32 vport_num;
+ };
+};
+
+struct mlx5_flow_namespace *
+mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
+ enum mlx5_flow_namespace_type type);
+
+struct mlx5_flow_table *
+mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
+ int prio,
+ int num_flow_table_entries);
+int mlx5_destroy_flow_table(struct mlx5_flow_table *ft);
+
+/* inbox should be set with the following values:
+ * start_flow_index
+ * end_flow_index
+ * match_criteria_enable
+ * match_criteria
+ */
+struct mlx5_flow_group *
+mlx5_create_flow_group(struct mlx5_flow_table *ft, u32 *in);
+void mlx5_destroy_flow_group(struct mlx5_flow_group *fg);
+
+/* Single destination per rule.
+ * Group ID is implied by the match criteria.
+ */
+struct mlx5_flow_rule *
+mlx5_add_flow_rule(struct mlx5_flow_table *ft,
+ u8 match_criteria_enable,
+ u32 *match_criteria,
+ u32 *match_value,
+ u32 action,
+ u32 flow_tag,
+ struct mlx5_flow_destination *dest);
+void mlx5_del_flow_rule(struct mlx5_flow_rule *fr);
+
+#endif
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 1565324eb620..131a2737cfa3 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -256,25 +256,27 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
struct mlx5_ifc_flow_table_prop_layout_bits {
u8 ft_support[0x1];
- u8 reserved_0[0x1f];
+ u8 reserved_0[0x2];
+ u8 flow_modify_en[0x1];
+ u8 reserved_1[0x1c];
- u8 reserved_1[0x2];
+ u8 reserved_2[0x2];
u8 log_max_ft_size[0x6];
- u8 reserved_2[0x10];
+ u8 reserved_3[0x10];
u8 max_ft_level[0x8];
- u8 reserved_3[0x20];
+ u8 reserved_4[0x20];
- u8 reserved_4[0x18];
+ u8 reserved_5[0x18];
u8 log_max_ft_num[0x8];
- u8 reserved_5[0x18];
+ u8 reserved_6[0x18];
u8 log_max_destination[0x8];
- u8 reserved_6[0x18];
+ u8 reserved_7[0x18];
u8 log_max_flow[0x8];
- u8 reserved_7[0x40];
+ u8 reserved_8[0x40];
struct mlx5_ifc_flow_table_fields_supported_bits ft_field_support;
@@ -447,6 +449,29 @@ struct mlx5_ifc_flow_table_nic_cap_bits {
u8 reserved_3[0x7200];
};
+struct mlx5_ifc_flow_table_eswitch_cap_bits {
+ u8 reserved_0[0x200];
+
+ struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_esw_fdb;
+
+ struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_esw_acl_ingress;
+
+ struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_esw_acl_egress;
+
+ u8 reserved_1[0x7800];
+};
+
+struct mlx5_ifc_e_switch_cap_bits {
+ u8 vport_svlan_strip[0x1];
+ u8 vport_cvlan_strip[0x1];
+ u8 vport_svlan_insert[0x1];
+ u8 vport_cvlan_insert_if_not_exist[0x1];
+ u8 vport_cvlan_insert_overwrite[0x1];
+ u8 reserved_0[0x1b];
+
+ u8 reserved_1[0x7e0];
+};
+
struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
u8 csum_cap[0x1];
u8 vlan_cap[0x1];
@@ -665,7 +690,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_17[0x1];
u8 ets[0x1];
u8 nic_flow_table[0x1];
- u8 reserved_18[0x4];
+ u8 eswitch_flow_table[0x1];
+ u8 early_vf_enable;
+ u8 reserved_18[0x2];
u8 local_ca_ack_delay[0x5];
u8 reserved_19[0x6];
u8 port_type[0x2];
@@ -787,27 +814,36 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_60[0x1b];
u8 log_max_wq_sz[0x5];
- u8 reserved_61[0xa0];
-
+ u8 nic_vport_change_event[0x1];
+ u8 reserved_61[0xa];
+ u8 log_max_vlan_list[0x5];
u8 reserved_62[0x3];
+ u8 log_max_current_mc_list[0x5];
+ u8 reserved_63[0x3];
+ u8 log_max_current_uc_list[0x5];
+
+ u8 reserved_64[0x80];
+
+ u8 reserved_65[0x3];
u8 log_max_l2_table[0x5];
- u8 reserved_63[0x8];
+ u8 reserved_66[0x8];
u8 log_uar_page_sz[0x10];
- u8 reserved_64[0x100];
+ u8 reserved_67[0xe0];
- u8 reserved_65[0x1f];
+ u8 reserved_68[0x1f];
u8 cqe_zip[0x1];
u8 cqe_zip_timeout[0x10];
u8 cqe_zip_max_num[0x10];
- u8 reserved_66[0x220];
+ u8 reserved_69[0x220];
};
-enum {
- MLX5_DEST_FORMAT_STRUCT_DESTINATION_TYPE_FLOW_TABLE_ = 0x1,
- MLX5_DEST_FORMAT_STRUCT_DESTINATION_TYPE_TIR = 0x2,
+enum mlx5_flow_destination_type {
+ MLX5_FLOW_DESTINATION_TYPE_VPORT = 0x0,
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1,
+ MLX5_FLOW_DESTINATION_TYPE_TIR = 0x2,
};
struct mlx5_ifc_dest_format_struct_bits {
@@ -900,6 +936,13 @@ struct mlx5_ifc_mac_address_layout_bits {
u8 mac_addr_31_0[0x20];
};
+struct mlx5_ifc_vlan_layout_bits {
+ u8 reserved_0[0x14];
+ u8 vlan[0x0c];
+
+ u8 reserved_1[0x20];
+};
+
struct mlx5_ifc_cong_control_r_roce_ecn_np_bits {
u8 reserved_0[0xa0];
@@ -1829,6 +1872,8 @@ union mlx5_ifc_hca_cap_union_bits {
struct mlx5_ifc_roce_cap_bits roce_cap;
struct mlx5_ifc_per_protocol_networking_offload_caps_bits per_protocol_networking_offload_caps;
struct mlx5_ifc_flow_table_nic_cap_bits flow_table_nic_cap;
+ struct mlx5_ifc_flow_table_eswitch_cap_bits flow_table_eswitch_cap;
+ struct mlx5_ifc_e_switch_cap_bits e_switch_cap;
u8 reserved_0[0x8000];
};
@@ -2133,24 +2178,35 @@ struct mlx5_ifc_rmpc_bits {
struct mlx5_ifc_wq_bits wq;
};
-enum {
- MLX5_NIC_VPORT_CONTEXT_ALLOWED_LIST_TYPE_CURRENT_UC_MAC_ADDRESS = 0x0,
-};
-
struct mlx5_ifc_nic_vport_context_bits {
u8 reserved_0[0x1f];
u8 roce_en[0x1];
- u8 reserved_1[0x760];
+ u8 arm_change_event[0x1];
+ u8 reserved_1[0x1a];
+ u8 event_on_mtu[0x1];
+ u8 event_on_promisc_change[0x1];
+ u8 event_on_vlan_change[0x1];
+ u8 event_on_mc_address_change[0x1];
+ u8 event_on_uc_address_change[0x1];
- u8 reserved_2[0x5];
+ u8 reserved_2[0xf0];
+
+ u8 mtu[0x10];
+
+ u8 reserved_3[0x640];
+
+ u8 promisc_uc[0x1];
+ u8 promisc_mc[0x1];
+ u8 promisc_all[0x1];
+ u8 reserved_4[0x2];
u8 allowed_list_type[0x3];
- u8 reserved_3[0xc];
+ u8 reserved_5[0xc];
u8 allowed_list_size[0xc];
struct mlx5_ifc_mac_address_layout_bits permanent_address;
- u8 reserved_4[0x20];
+ u8 reserved_6[0x20];
u8 current_uc_mac_address[0][0x40];
};
@@ -2263,6 +2319,26 @@ struct mlx5_ifc_hca_vport_context_bits {
u8 reserved_6[0xca0];
};
+struct mlx5_ifc_esw_vport_context_bits {
+ u8 reserved_0[0x3];
+ u8 vport_svlan_strip[0x1];
+ u8 vport_cvlan_strip[0x1];
+ u8 vport_svlan_insert[0x1];
+ u8 vport_cvlan_insert[0x2];
+ u8 reserved_1[0x18];
+
+ u8 reserved_2[0x20];
+
+ u8 svlan_cfi[0x1];
+ u8 svlan_pcp[0x3];
+ u8 svlan_id[0xc];
+ u8 cvlan_cfi[0x1];
+ u8 cvlan_pcp[0x3];
+ u8 cvlan_id[0xc];
+
+ u8 reserved_3[0x7a0];
+};
+
enum {
MLX5_EQC_STATUS_OK = 0x0,
MLX5_EQC_STATUS_EQ_WRITE_FAILURE = 0xa,
@@ -2769,6 +2845,13 @@ struct mlx5_ifc_set_hca_cap_in_bits {
union mlx5_ifc_hca_cap_union_bits capability;
};
+enum {
+ MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION = 0x0,
+ MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_TAG = 0x1,
+ MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST = 0x2,
+ MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS = 0x3
+};
+
struct mlx5_ifc_set_fte_out_bits {
u8 status[0x8];
u8 reserved_0[0x18];
@@ -2793,11 +2876,14 @@ struct mlx5_ifc_set_fte_in_bits {
u8 reserved_4[0x8];
u8 table_id[0x18];
- u8 reserved_5[0x40];
+ u8 reserved_5[0x18];
+ u8 modify_enable_mask[0x8];
+
+ u8 reserved_6[0x20];
u8 flow_index[0x20];
- u8 reserved_6[0xe0];
+ u8 reserved_7[0xe0];
struct mlx5_ifc_flow_context_bits flow_context;
};
@@ -2940,6 +3026,7 @@ struct mlx5_ifc_query_vport_state_out_bits {
enum {
MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT = 0x0,
+ MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT = 0x1,
};
struct mlx5_ifc_query_vport_state_in_bits {
@@ -3700,6 +3787,64 @@ struct mlx5_ifc_query_flow_group_in_bits {
u8 reserved_5[0x120];
};
+struct mlx5_ifc_query_esw_vport_context_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_esw_vport_context_bits esw_vport_context;
+};
+
+struct mlx5_ifc_query_esw_vport_context_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_2[0xf];
+ u8 vport_number[0x10];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_modify_esw_vport_context_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_esw_vport_context_fields_select_bits {
+ u8 reserved[0x1c];
+ u8 vport_cvlan_insert[0x1];
+ u8 vport_svlan_insert[0x1];
+ u8 vport_cvlan_strip[0x1];
+ u8 vport_svlan_strip[0x1];
+};
+
+struct mlx5_ifc_modify_esw_vport_context_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_2[0xf];
+ u8 vport_number[0x10];
+
+ struct mlx5_ifc_esw_vport_context_fields_select_bits field_select;
+
+ struct mlx5_ifc_esw_vport_context_bits esw_vport_context;
+};
+
struct mlx5_ifc_query_eq_out_bits {
u8 status[0x8];
u8 reserved_0[0x18];
@@ -4228,7 +4373,10 @@ struct mlx5_ifc_modify_nic_vport_context_out_bits {
};
struct mlx5_ifc_modify_nic_vport_field_select_bits {
- u8 reserved_0[0x1c];
+ u8 reserved_0[0x19];
+ u8 mtu[0x1];
+ u8 change_event[0x1];
+ u8 promisc[0x1];
u8 permanent_address[0x1];
u8 addresses_list[0x1];
u8 roce_en[0x1];
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index 967e0fd06e89..638f2ca7a527 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -34,9 +34,17 @@
#define __MLX5_VPORT_H__
#include <linux/mlx5/driver.h>
+#include <linux/mlx5/device.h>
-u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod);
-void mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, u8 *addr);
+u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport);
+u8 mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
+ u16 vport);
+int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
+ u16 vport, u8 state);
+int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
+ u16 vport, u8 *addr);
+int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev,
+ u16 vport, u8 *addr);
int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
u8 port_num, u16 vf_num, u16 gid_index,
union ib_gid *gid);
@@ -51,5 +59,30 @@ int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev,
u64 *sys_image_guid);
int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev,
u64 *node_guid);
+int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
+ u32 vport,
+ enum mlx5_list_type list_type,
+ u8 addr_list[][ETH_ALEN],
+ int *list_size);
+int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
+ enum mlx5_list_type list_type,
+ u8 addr_list[][ETH_ALEN],
+ int list_size);
+int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
+ u32 vport,
+ int *promisc_uc,
+ int *promisc_mc,
+ int *promisc_all);
+int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev,
+ int promisc_uc,
+ int promisc_mc,
+ int promisc_all);
+int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev,
+ u32 vport,
+ u16 vlans[],
+ int *size);
+int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
+ u16 vlans[],
+ int list_size);
#endif /* __MLX5_VPORT_H__ */
diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h
index 877ef226f90f..772362adf471 100644
--- a/include/linux/mmdebug.h
+++ b/include/linux/mmdebug.h
@@ -1,6 +1,7 @@
#ifndef LINUX_MM_DEBUG_H
#define LINUX_MM_DEBUG_H 1
+#include <linux/bug.h>
#include <linux/stringify.h>
struct page;
diff --git a/include/linux/mroute.h b/include/linux/mroute.h
index 79aaa9fc1a15..bf9b322cb0b0 100644
--- a/include/linux/mroute.h
+++ b/include/linux/mroute.h
@@ -9,38 +9,28 @@
#ifdef CONFIG_IP_MROUTE
static inline int ip_mroute_opt(int opt)
{
- return (opt >= MRT_BASE) && (opt <= MRT_MAX);
+ return opt >= MRT_BASE && opt <= MRT_MAX;
}
-#else
-static inline int ip_mroute_opt(int opt)
-{
- return 0;
-}
-#endif
-#ifdef CONFIG_IP_MROUTE
-extern int ip_mroute_setsockopt(struct sock *, int, char __user *, unsigned int);
-extern int ip_mroute_getsockopt(struct sock *, int, char __user *, int __user *);
-extern int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg);
-extern int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
-extern int ip_mr_init(void);
+int ip_mroute_setsockopt(struct sock *, int, char __user *, unsigned int);
+int ip_mroute_getsockopt(struct sock *, int, char __user *, int __user *);
+int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg);
+int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
+int ip_mr_init(void);
#else
-static inline
-int ip_mroute_setsockopt(struct sock *sock,
- int optname, char __user *optval, unsigned int optlen)
+static inline int ip_mroute_setsockopt(struct sock *sock, int optname,
+ char __user *optval, unsigned int optlen)
{
return -ENOPROTOOPT;
}
-static inline
-int ip_mroute_getsockopt(struct sock *sock,
- int optname, char __user *optval, int __user *optlen)
+static inline int ip_mroute_getsockopt(struct sock *sock, int optname,
+ char __user *optval, int __user *optlen)
{
return -ENOPROTOOPT;
}
-static inline
-int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
+static inline int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
{
return -ENOIOCTLCMD;
}
@@ -49,6 +39,11 @@ static inline int ip_mr_init(void)
{
return 0;
}
+
+static inline int ip_mroute_opt(int opt)
+{
+ return 0;
+}
#endif
struct vif_device {
@@ -64,6 +59,32 @@ struct vif_device {
#define VIFF_STATIC 0x8000
+#define VIF_EXISTS(_mrt, _idx) ((_mrt)->vif_table[_idx].dev != NULL)
+#define MFC_LINES 64
+
+struct mr_table {
+ struct list_head list;
+ possible_net_t net;
+ u32 id;
+ struct sock __rcu *mroute_sk;
+ struct timer_list ipmr_expire_timer;
+ struct list_head mfc_unres_queue;
+ struct list_head mfc_cache_array[MFC_LINES];
+ struct vif_device vif_table[MAXVIFS];
+ int maxvif;
+ atomic_t cache_resolve_queue_len;
+ bool mroute_do_assert;
+ bool mroute_do_pim;
+ int mroute_reg_vif_num;
+};
+
+/* mfc_flags:
+ * MFC_STATIC - the entry was added statically (not by a routing daemon)
+ */
+enum {
+ MFC_STATIC = BIT(0),
+};
+
struct mfc_cache {
struct list_head list;
__be32 mfc_mcastgrp; /* Group the entry belongs to */
@@ -89,19 +110,14 @@ struct mfc_cache {
struct rcu_head rcu;
};
-#define MFC_STATIC 1
-#define MFC_NOTIFY 2
-
-#define MFC_LINES 64
-
#ifdef __BIG_ENDIAN
#define MFC_HASH(a,b) (((((__force u32)(__be32)a)>>24)^(((__force u32)(__be32)b)>>26))&(MFC_LINES-1))
#else
#define MFC_HASH(a,b) ((((__force u32)(__be32)a)^(((__force u32)(__be32)b)>>2))&(MFC_LINES-1))
-#endif
+#endif
struct rtmsg;
-extern int ipmr_get_route(struct net *net, struct sk_buff *skb,
- __be32 saddr, __be32 daddr,
- struct rtmsg *rtm, int nowait);
+int ipmr_get_route(struct net *net, struct sk_buff *skb,
+ __be32 saddr, __be32 daddr,
+ struct rtmsg *rtm, int nowait);
#endif
diff --git a/include/linux/net.h b/include/linux/net.h
index 70ac5e28e6b7..0b4ac7da583a 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -34,8 +34,12 @@ struct inode;
struct file;
struct net;
-#define SOCK_ASYNC_NOSPACE 0
-#define SOCK_ASYNC_WAITDATA 1
+/* Historically, SOCKWQ_ASYNC_NOSPACE & SOCKWQ_ASYNC_WAITDATA were located
+ * in sock->flags, but moved into sk->sk_wq->flags to be RCU protected.
+ * Eventually all flags will be in sk->sk_wq_flags.
+ */
+#define SOCKWQ_ASYNC_NOSPACE 0
+#define SOCKWQ_ASYNC_WAITDATA 1
#define SOCK_NOSPACE 2
#define SOCK_PASSCRED 3
#define SOCK_PASSSEC 4
@@ -89,6 +93,7 @@ struct socket_wq {
/* Note: wait MUST be first field of socket_wq */
wait_queue_head_t wait;
struct fasync_struct *fasync_list;
+ unsigned long flags; /* %SOCKWQ_ASYNC_NOSPACE, etc */
struct rcu_head rcu;
} ____cacheline_aligned_in_smp;
@@ -96,7 +101,7 @@ struct socket_wq {
* struct socket - general BSD socket
* @state: socket state (%SS_CONNECTED, etc)
* @type: socket type (%SOCK_STREAM, etc)
- * @flags: socket flags (%SOCK_ASYNC_NOSPACE, etc)
+ * @flags: socket flags (%SOCK_NOSPACE, etc)
* @ops: protocol specific socket operations
* @file: File back pointer for gc
* @sk: internal networking protocol agnostic socket representation
@@ -202,7 +207,7 @@ enum {
SOCK_WAKE_URG,
};
-int sock_wake_async(struct socket *sk, int how, int band);
+int sock_wake_async(struct socket_wq *sk_wq, int how, int band);
int sock_register(const struct net_proto_family *fam);
void sock_unregister(int family);
int __sock_create(struct net *net, int family, int type, int proto,
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index f0d87347df19..d9654f0eecb3 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -52,7 +52,7 @@ enum {
NETIF_F_GSO_TUNNEL_REMCSUM_BIT,
NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */
- NETIF_F_SCTP_CSUM_BIT, /* SCTP checksum offload */
+ NETIF_F_SCTP_CRC_BIT, /* SCTP checksum offload */
NETIF_F_FCOE_MTU_BIT, /* Supports max FCoE MTU, 2158 bytes*/
NETIF_F_NTUPLE_BIT, /* N-tuple filters supported */
NETIF_F_RXHASH_BIT, /* Receive hashing offload */
@@ -103,7 +103,7 @@ enum {
#define NETIF_F_NTUPLE __NETIF_F(NTUPLE)
#define NETIF_F_RXCSUM __NETIF_F(RXCSUM)
#define NETIF_F_RXHASH __NETIF_F(RXHASH)
-#define NETIF_F_SCTP_CSUM __NETIF_F(SCTP_CSUM)
+#define NETIF_F_SCTP_CRC __NETIF_F(SCTP_CRC)
#define NETIF_F_SG __NETIF_F(SG)
#define NETIF_F_TSO6 __NETIF_F(TSO6)
#define NETIF_F_TSO_ECN __NETIF_F(TSO_ECN)
@@ -146,10 +146,12 @@ enum {
#define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | \
NETIF_F_TSO6 | NETIF_F_UFO)
-#define NETIF_F_GEN_CSUM NETIF_F_HW_CSUM
-#define NETIF_F_V4_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM)
-#define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM)
-#define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM)
+/* List of IP checksum features. Note that NETIF_F_ HW_CSUM should not be
+ * set in features when NETIF_F_IP_CSUM or NETIF_F_IPV6_CSUM are set--
+ * this would be contradictory
+ */
+#define NETIF_F_CSUM_MASK (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | \
+ NETIF_F_HW_CSUM)
#define NETIF_F_ALL_TSO (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 67bfac1abfc1..c20b814e46a0 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -132,7 +132,9 @@ static inline bool dev_xmit_complete(int rc)
* used.
*/
-#if defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
+#if defined(CONFIG_HYPERV_NET)
+# define LL_MAX_HEADER 128
+#elif defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
# if defined(CONFIG_MAC80211_MESH)
# define LL_MAX_HEADER 128
# else
@@ -326,7 +328,8 @@ enum {
NAPI_STATE_SCHED, /* Poll is scheduled */
NAPI_STATE_DISABLE, /* Disable pending */
NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
- NAPI_STATE_HASHED, /* In NAPI hash */
+ NAPI_STATE_HASHED, /* In NAPI hash (busy polling possible) */
+ NAPI_STATE_NO_BUSY_POLL,/* Do not add in napi_hash, no busy polling */
};
enum gro_result {
@@ -461,19 +464,13 @@ static inline void napi_complete(struct napi_struct *n)
}
/**
- * napi_by_id - lookup a NAPI by napi_id
- * @napi_id: hashed napi_id
- *
- * lookup @napi_id in napi_hash table
- * must be called under rcu_read_lock()
- */
-struct napi_struct *napi_by_id(unsigned int napi_id);
-
-/**
* napi_hash_add - add a NAPI to global hashtable
* @napi: napi context
*
* generate a new napi_id and store a @napi under it in napi_hash
+ * Used for busy polling (CONFIG_NET_RX_BUSY_POLL)
+ * Note: This is normally automatically done from netif_napi_add(),
+ * so might disappear in a future linux version.
*/
void napi_hash_add(struct napi_struct *napi);
@@ -482,9 +479,14 @@ void napi_hash_add(struct napi_struct *napi);
* @napi: napi context
*
* Warning: caller must observe rcu grace period
- * before freeing memory containing @napi
+ * before freeing memory containing @napi, if
+ * this function returns true.
+ * Note: core networking stack automatically calls it
+ * from netif_napi_del()
+ * Drivers might want to call this helper to combine all
+ * the needed rcu grace periods into a single one.
*/
-void napi_hash_del(struct napi_struct *napi);
+bool napi_hash_del(struct napi_struct *napi);
/**
* napi_disable - prevent NAPI from scheduling
@@ -1011,6 +1013,19 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
* a new port starts listening. The operation is protected by the
* vxlan_net->sock_lock.
*
+ * void (*ndo_add_geneve_port)(struct net_device *dev,
+ * sa_family_t sa_family, __be16 port);
+ * Called by geneve to notify a driver about the UDP port and socket
+ * address family that geneve is listnening to. It is called only when
+ * a new port starts listening. The operation is protected by the
+ * geneve_net->sock_lock.
+ *
+ * void (*ndo_del_geneve_port)(struct net_device *dev,
+ * sa_family_t sa_family, __be16 port);
+ * Called by geneve to notify the driver about a UDP port and socket
+ * address family that geneve is not listening to anymore. The operation
+ * is protected by the geneve_net->sock_lock.
+ *
* void (*ndo_del_vxlan_port)(struct net_device *dev,
* sa_family_t sa_family, __be16 port);
* Called by vxlan to notify the driver about a UDP port and socket
@@ -1215,7 +1230,12 @@ struct net_device_ops {
void (*ndo_del_vxlan_port)(struct net_device *dev,
sa_family_t sa_family,
__be16 port);
-
+ void (*ndo_add_geneve_port)(struct net_device *dev,
+ sa_family_t sa_family,
+ __be16 port);
+ void (*ndo_del_geneve_port)(struct net_device *dev,
+ sa_family_t sa_family,
+ __be16 port);
void* (*ndo_dfwd_add_station)(struct net_device *pdev,
struct net_device *dev);
void (*ndo_dfwd_del_station)(struct net_device *pdev,
@@ -1271,6 +1291,7 @@ struct net_device_ops {
* @IFF_NO_QUEUE: device can run without qdisc attached
* @IFF_OPENVSWITCH: device is a Open vSwitch master
* @IFF_L3MDEV_SLAVE: device is enslaved to an L3 master device
+ * @IFF_TEAM: device is a team device
*/
enum netdev_priv_flags {
IFF_802_1Q_VLAN = 1<<0,
@@ -1297,6 +1318,7 @@ enum netdev_priv_flags {
IFF_NO_QUEUE = 1<<21,
IFF_OPENVSWITCH = 1<<22,
IFF_L3MDEV_SLAVE = 1<<23,
+ IFF_TEAM = 1<<24,
};
#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
@@ -1323,6 +1345,7 @@ enum netdev_priv_flags {
#define IFF_NO_QUEUE IFF_NO_QUEUE
#define IFF_OPENVSWITCH IFF_OPENVSWITCH
#define IFF_L3MDEV_SLAVE IFF_L3MDEV_SLAVE
+#define IFF_TEAM IFF_TEAM
/**
* struct net_device - The DEVICE structure.
@@ -1398,7 +1421,8 @@ enum netdev_priv_flags {
* @dma: DMA channel
* @mtu: Interface MTU value
* @type: Interface hardware type
- * @hard_header_len: Hardware header length
+ * @hard_header_len: Hardware header length, which means that this is the
+ * minimum size of a packet.
*
* @needed_headroom: Extra headroom the hardware may need, but not in all
* cases can this be guaranteed
@@ -1948,6 +1972,26 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
int (*poll)(struct napi_struct *, int), int weight);
/**
+ * netif_tx_napi_add - initialize a napi context
+ * @dev: network device
+ * @napi: napi context
+ * @poll: polling function
+ * @weight: default weight
+ *
+ * This variant of netif_napi_add() should be used from drivers using NAPI
+ * to exclusively poll a TX queue.
+ * This will avoid we add it into napi_hash[], thus polluting this hash table.
+ */
+static inline void netif_tx_napi_add(struct net_device *dev,
+ struct napi_struct *napi,
+ int (*poll)(struct napi_struct *, int),
+ int weight)
+{
+ set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state);
+ netif_napi_add(dev, napi, poll, weight);
+}
+
+/**
* netif_napi_del - remove a napi context
* @napi: napi context
*
@@ -2083,7 +2127,25 @@ struct pcpu_sw_netstats {
})
#define netdev_alloc_pcpu_stats(type) \
- __netdev_alloc_pcpu_stats(type, GFP_KERNEL);
+ __netdev_alloc_pcpu_stats(type, GFP_KERNEL)
+
+enum netdev_lag_tx_type {
+ NETDEV_LAG_TX_TYPE_UNKNOWN,
+ NETDEV_LAG_TX_TYPE_RANDOM,
+ NETDEV_LAG_TX_TYPE_BROADCAST,
+ NETDEV_LAG_TX_TYPE_ROUNDROBIN,
+ NETDEV_LAG_TX_TYPE_ACTIVEBACKUP,
+ NETDEV_LAG_TX_TYPE_HASH,
+};
+
+struct netdev_lag_upper_info {
+ enum netdev_lag_tx_type tx_type;
+};
+
+struct netdev_lag_lower_state_info {
+ u8 link_up : 1,
+ tx_enabled : 1;
+};
#include <linux/notifier.h>
@@ -2120,6 +2182,7 @@ struct pcpu_sw_netstats {
#define NETDEV_CHANGEINFODATA 0x0018
#define NETDEV_BONDING_INFO 0x0019
#define NETDEV_PRECHANGEUPPER 0x001A
+#define NETDEV_CHANGELOWERSTATE 0x001B
int register_netdevice_notifier(struct notifier_block *nb);
int unregister_netdevice_notifier(struct notifier_block *nb);
@@ -2138,6 +2201,12 @@ struct netdev_notifier_changeupper_info {
struct net_device *upper_dev; /* new upper dev */
bool master; /* is upper dev master */
bool linking; /* is the nofication for link or unlink */
+ void *upper_info; /* upper dev info */
+};
+
+struct netdev_notifier_changelowerstate_info {
+ struct netdev_notifier_info info; /* must be first */
+ void *lower_state_info; /* is lower dev state */
};
static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
@@ -2471,6 +2540,71 @@ static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
remcsum_unadjust((__sum16 *)ptr, grc->delta);
}
+struct skb_csum_offl_spec {
+ __u16 ipv4_okay:1,
+ ipv6_okay:1,
+ encap_okay:1,
+ ip_options_okay:1,
+ ext_hdrs_okay:1,
+ tcp_okay:1,
+ udp_okay:1,
+ sctp_okay:1,
+ vlan_okay:1,
+ no_encapped_ipv6:1,
+ no_not_encapped:1;
+};
+
+bool __skb_csum_offload_chk(struct sk_buff *skb,
+ const struct skb_csum_offl_spec *spec,
+ bool *csum_encapped,
+ bool csum_help);
+
+static inline bool skb_csum_offload_chk(struct sk_buff *skb,
+ const struct skb_csum_offl_spec *spec,
+ bool *csum_encapped,
+ bool csum_help)
+{
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return false;
+
+ return __skb_csum_offload_chk(skb, spec, csum_encapped, csum_help);
+}
+
+static inline bool skb_csum_offload_chk_help(struct sk_buff *skb,
+ const struct skb_csum_offl_spec *spec)
+{
+ bool csum_encapped;
+
+ return skb_csum_offload_chk(skb, spec, &csum_encapped, true);
+}
+
+static inline bool skb_csum_off_chk_help_cmn(struct sk_buff *skb)
+{
+ static const struct skb_csum_offl_spec csum_offl_spec = {
+ .ipv4_okay = 1,
+ .ip_options_okay = 1,
+ .ipv6_okay = 1,
+ .vlan_okay = 1,
+ .tcp_okay = 1,
+ .udp_okay = 1,
+ };
+
+ return skb_csum_offload_chk_help(skb, &csum_offl_spec);
+}
+
+static inline bool skb_csum_off_chk_help_cmn_v4_only(struct sk_buff *skb)
+{
+ static const struct skb_csum_offl_spec csum_offl_spec = {
+ .ipv4_okay = 1,
+ .ip_options_okay = 1,
+ .tcp_okay = 1,
+ .udp_okay = 1,
+ .vlan_okay = 1,
+ };
+
+ return skb_csum_offload_chk_help(skb, &csum_offl_spec);
+}
+
static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type,
const void *daddr, const void *saddr,
@@ -3594,15 +3728,15 @@ struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev);
int netdev_master_upper_dev_link(struct net_device *dev,
- struct net_device *upper_dev);
-int netdev_master_upper_dev_link_private(struct net_device *dev,
- struct net_device *upper_dev,
- void *private);
+ struct net_device *upper_dev,
+ void *upper_priv, void *upper_info);
void netdev_upper_dev_unlink(struct net_device *dev,
struct net_device *upper_dev);
void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
void *netdev_lower_dev_get_private(struct net_device *dev,
struct net_device *lower_dev);
+void netdev_lower_state_changed(struct net_device *lower_dev,
+ void *lower_state_info);
/* RSS keys are 40 or 52 bytes long */
#define NETDEV_RSS_KEY_LEN 52
@@ -3610,7 +3744,7 @@ extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN];
void netdev_rss_key_fill(void *buffer, size_t len);
int dev_get_nest_level(struct net_device *dev,
- bool (*type_check)(struct net_device *dev));
+ bool (*type_check)(const struct net_device *dev));
int skb_checksum_help(struct sk_buff *skb);
struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
netdev_features_t features, bool tx_path);
@@ -3640,13 +3774,37 @@ __be16 skb_network_protocol(struct sk_buff *skb, int *depth);
static inline bool can_checksum_protocol(netdev_features_t features,
__be16 protocol)
{
- return ((features & NETIF_F_GEN_CSUM) ||
- ((features & NETIF_F_V4_CSUM) &&
- protocol == htons(ETH_P_IP)) ||
- ((features & NETIF_F_V6_CSUM) &&
- protocol == htons(ETH_P_IPV6)) ||
- ((features & NETIF_F_FCOE_CRC) &&
- protocol == htons(ETH_P_FCOE)));
+ if (protocol == htons(ETH_P_FCOE))
+ return !!(features & NETIF_F_FCOE_CRC);
+
+ /* Assume this is an IP checksum (not SCTP CRC) */
+
+ if (features & NETIF_F_HW_CSUM) {
+ /* Can checksum everything */
+ return true;
+ }
+
+ switch (protocol) {
+ case htons(ETH_P_IP):
+ return !!(features & NETIF_F_IP_CSUM);
+ case htons(ETH_P_IPV6):
+ return !!(features & NETIF_F_IPV6_CSUM);
+ default:
+ return false;
+ }
+}
+
+/* Map an ethertype into IP protocol if possible */
+static inline int eproto_to_ipproto(int eproto)
+{
+ switch (eproto) {
+ case htons(ETH_P_IP):
+ return IPPROTO_IP;
+ case htons(ETH_P_IPV6):
+ return IPPROTO_IPV6;
+ default:
+ return -1;
+ }
}
#ifdef CONFIG_BUG
@@ -3711,15 +3869,14 @@ void linkwatch_run_queue(void);
static inline netdev_features_t netdev_intersect_features(netdev_features_t f1,
netdev_features_t f2)
{
- if (f1 & NETIF_F_GEN_CSUM)
- f1 |= (NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
- if (f2 & NETIF_F_GEN_CSUM)
- f2 |= (NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
- f1 &= f2;
- if (f1 & NETIF_F_GEN_CSUM)
- f1 &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
+ if ((f1 ^ f2) & NETIF_F_HW_CSUM) {
+ if (f1 & NETIF_F_HW_CSUM)
+ f1 |= (NETIF_F_IP_CSUM|NETIF_F_IP_CSUM);
+ else
+ f2 |= (NETIF_F_IP_CSUM|NETIF_F_IP_CSUM);
+ }
- return f1;
+ return f1 & f2;
}
static inline netdev_features_t netdev_get_wanted_features(
@@ -3807,32 +3964,32 @@ static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
skb->mac_len = mac_len;
}
-static inline bool netif_is_macvlan(struct net_device *dev)
+static inline bool netif_is_macvlan(const struct net_device *dev)
{
return dev->priv_flags & IFF_MACVLAN;
}
-static inline bool netif_is_macvlan_port(struct net_device *dev)
+static inline bool netif_is_macvlan_port(const struct net_device *dev)
{
return dev->priv_flags & IFF_MACVLAN_PORT;
}
-static inline bool netif_is_ipvlan(struct net_device *dev)
+static inline bool netif_is_ipvlan(const struct net_device *dev)
{
return dev->priv_flags & IFF_IPVLAN_SLAVE;
}
-static inline bool netif_is_ipvlan_port(struct net_device *dev)
+static inline bool netif_is_ipvlan_port(const struct net_device *dev)
{
return dev->priv_flags & IFF_IPVLAN_MASTER;
}
-static inline bool netif_is_bond_master(struct net_device *dev)
+static inline bool netif_is_bond_master(const struct net_device *dev)
{
return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
}
-static inline bool netif_is_bond_slave(struct net_device *dev)
+static inline bool netif_is_bond_slave(const struct net_device *dev)
{
return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
}
@@ -3867,6 +4024,26 @@ static inline bool netif_is_ovs_master(const struct net_device *dev)
return dev->priv_flags & IFF_OPENVSWITCH;
}
+static inline bool netif_is_team_master(const struct net_device *dev)
+{
+ return dev->priv_flags & IFF_TEAM;
+}
+
+static inline bool netif_is_team_port(const struct net_device *dev)
+{
+ return dev->priv_flags & IFF_TEAM_PORT;
+}
+
+static inline bool netif_is_lag_master(const struct net_device *dev)
+{
+ return netif_is_bond_master(dev) || netif_is_team_master(dev);
+}
+
+static inline bool netif_is_lag_port(const struct net_device *dev)
+{
+ return netif_is_bond_slave(dev) || netif_is_team_port(dev);
+}
+
/* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */
static inline void netif_keep_dst(struct net_device *dev)
{
diff --git a/include/linux/netfilter/nf_conntrack_sctp.h b/include/linux/netfilter/nf_conntrack_sctp.h
new file mode 100644
index 000000000000..22a16a23cd8a
--- /dev/null
+++ b/include/linux/netfilter/nf_conntrack_sctp.h
@@ -0,0 +1,13 @@
+#ifndef _NF_CONNTRACK_SCTP_H
+#define _NF_CONNTRACK_SCTP_H
+/* SCTP tracking. */
+
+#include <uapi/linux/netfilter/nf_conntrack_sctp.h>
+
+struct ip_ct_sctp {
+ enum sctp_conntrack state;
+
+ __be32 vtag[IP_CT_DIR_MAX];
+};
+
+#endif /* _NF_CONNTRACK_SCTP_H */
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index 249d1bb01e03..5646b24bfc64 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -14,7 +14,7 @@ struct nfnl_callback {
int (*call_rcu)(struct sock *nl, struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const cda[]);
- int (*call_batch)(struct sock *nl, struct sk_buff *skb,
+ int (*call_batch)(struct net *net, struct sock *nl, struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const cda[]);
const struct nla_policy *policy; /* netlink attribute policy */
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 639e9b8b0e4d..0b41959aab9f 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -131,6 +131,7 @@ netlink_skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
struct netlink_callback {
struct sk_buff *skb;
const struct nlmsghdr *nlh;
+ int (*start)(struct netlink_callback *);
int (*dump)(struct sk_buff * skb,
struct netlink_callback *cb);
int (*done)(struct netlink_callback *cb);
@@ -153,6 +154,7 @@ struct nlmsghdr *
__nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags);
struct netlink_dump_control {
+ int (*start)(struct netlink_callback *);
int (*dump)(struct sk_buff *skb, struct netlink_callback *);
int (*done)(struct netlink_callback *);
void *data;
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 570d630f98ae..11bbae44f4cb 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -251,6 +251,7 @@ struct nfs4_layoutget {
struct nfs4_layoutget_res res;
struct rpc_cred *cred;
gfp_t gfp_flags;
+ long timeout;
};
struct nfs4_getdeviceinfo_args {
diff --git a/include/linux/of_dma.h b/include/linux/of_dma.h
index 36112cdd665a..b90d8ec57c1f 100644
--- a/include/linux/of_dma.h
+++ b/include/linux/of_dma.h
@@ -80,7 +80,7 @@ static inline int of_dma_router_register(struct device_node *np,
static inline struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
const char *name)
{
- return NULL;
+ return ERR_PTR(-ENODEV);
}
static inline struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec,
diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h
index 039f2eec49ce..1e0deb8e8494 100644
--- a/include/linux/of_irq.h
+++ b/include/linux/of_irq.h
@@ -46,12 +46,14 @@ extern int of_irq_get(struct device_node *dev, int index);
extern int of_irq_get_byname(struct device_node *dev, const char *name);
extern int of_irq_to_resource_table(struct device_node *dev,
struct resource *res, int nr_irqs);
+extern struct device_node *of_irq_find_parent(struct device_node *child);
extern struct irq_domain *of_msi_get_domain(struct device *dev,
struct device_node *np,
enum irq_domain_bus_token token);
extern struct irq_domain *of_msi_map_get_device_domain(struct device *dev,
u32 rid);
extern void of_msi_configure(struct device *dev, struct device_node *np);
+u32 of_msi_map_rid(struct device *dev, struct device_node *msi_np, u32 rid_in);
#else
static inline int of_irq_count(struct device_node *dev)
{
@@ -70,6 +72,11 @@ static inline int of_irq_to_resource_table(struct device_node *dev,
{
return 0;
}
+static inline void *of_irq_find_parent(struct device_node *child)
+{
+ return NULL;
+}
+
static inline struct irq_domain *of_msi_get_domain(struct device *dev,
struct device_node *np,
enum irq_domain_bus_token token)
@@ -84,6 +91,11 @@ static inline struct irq_domain *of_msi_map_get_device_domain(struct device *dev
static inline void of_msi_configure(struct device *dev, struct device_node *np)
{
}
+static inline u32 of_msi_map_rid(struct device *dev,
+ struct device_node *msi_np, u32 rid_in)
+{
+ return rid_in;
+}
#endif
#if defined(CONFIG_OF_IRQ) || defined(CONFIG_SPARC)
@@ -93,7 +105,6 @@ static inline void of_msi_configure(struct device *dev, struct device_node *np)
* so declare it here regardless of the CONFIG_OF_IRQ setting.
*/
extern unsigned int irq_of_parse_and_map(struct device_node *node, int index);
-u32 of_msi_map_rid(struct device *dev, struct device_node *msi_np, u32 rid_in);
#else /* !CONFIG_OF && !CONFIG_SPARC */
static inline unsigned int irq_of_parse_and_map(struct device_node *dev,
@@ -101,12 +112,6 @@ static inline unsigned int irq_of_parse_and_map(struct device_node *dev,
{
return 0;
}
-
-static inline u32 of_msi_map_rid(struct device *dev,
- struct device_node *msi_np, u32 rid_in)
-{
- return rid_in;
-}
#endif /* !CONFIG_OF */
#endif /* __OF_IRQ_H */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index e828e7b4afec..6ae25aae88fd 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -412,9 +412,18 @@ struct pci_host_bridge {
void (*release_fn)(struct pci_host_bridge *);
void *release_data;
unsigned int ignore_reset_delay:1; /* for entire hierarchy */
+ /* Resource alignment requirements */
+ resource_size_t (*align_resource)(struct pci_dev *dev,
+ const struct resource *res,
+ resource_size_t start,
+ resource_size_t size,
+ resource_size_t align);
};
#define to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev)
+
+struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus);
+
void pci_set_host_bridge_release(struct pci_host_bridge *bridge,
void (*release_fn)(struct pci_host_bridge *),
void *release_data);
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index d9ba49cedc5d..1acbefc4bbda 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2495,6 +2495,8 @@
#define PCI_DEVICE_ID_KORENIX_JETCARDF2 0x1700
#define PCI_DEVICE_ID_KORENIX_JETCARDF3 0x17ff
+#define PCI_VENDOR_ID_NETRONOME 0x19ee
+
#define PCI_VENDOR_ID_QMI 0x1a32
#define PCI_VENDOR_ID_AZWAVE 0x1a3b
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index d841d33bcdc9..f9828a48f16a 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -697,9 +697,11 @@ struct perf_cgroup {
* if there is no cgroup event for the current CPU context.
*/
static inline struct perf_cgroup *
-perf_cgroup_from_task(struct task_struct *task)
+perf_cgroup_from_task(struct task_struct *task, struct perf_event_context *ctx)
{
- return container_of(task_css(task, perf_event_cgrp_id),
+ return container_of(task_css_check(task, perf_event_cgrp_id,
+ ctx ? lockdep_is_held(&ctx->lock)
+ : true),
struct perf_cgroup, css);
}
#endif /* CONFIG_CGROUP_PERF */
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 05fde31b6dc6..a89cb0eef911 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -589,6 +589,12 @@ struct phy_driver {
int (*module_eeprom)(struct phy_device *dev,
struct ethtool_eeprom *ee, u8 *data);
+ /* Get statistics from the phy using ethtool */
+ int (*get_sset_count)(struct phy_device *dev);
+ void (*get_strings)(struct phy_device *dev, u8 *data);
+ void (*get_stats)(struct phy_device *dev,
+ struct ethtool_stats *stats, u64 *data);
+
struct device_driver driver;
};
#define to_phy_driver(d) container_of(d, struct phy_driver, driver)
diff --git a/include/linux/pim.h b/include/linux/pim.h
index 252bf6644c51..e1d756f81348 100644
--- a/include/linux/pim.h
+++ b/include/linux/pim.h
@@ -13,6 +13,11 @@
#define PIM_NULL_REGISTER cpu_to_be32(0x40000000)
+static inline bool ipmr_pimsm_enabled(void)
+{
+ return IS_BUILTIN(CONFIG_IP_PIMSM_V1) || IS_BUILTIN(CONFIG_IP_PIMSM_V2);
+}
+
/* PIMv2 register message header layout (ietf-draft-idmr-pimvsm-v2-00.ps */
struct pimreghdr
{
diff --git a/include/linux/platform_data/edma.h b/include/linux/platform_data/edma.h
index e2878baeb90e..4299f4ba03bd 100644
--- a/include/linux/platform_data/edma.h
+++ b/include/linux/platform_data/edma.h
@@ -72,7 +72,7 @@ struct edma_soc_info {
struct edma_rsv_info *rsv;
/* List of channels allocated for memcpy, terminated with -1 */
- s16 *memcpy_channels;
+ s32 *memcpy_channels;
s8 (*queue_priority_mapping)[2];
const s16 (*xbar_chans)[2];
diff --git a/include/linux/proportions.h b/include/linux/proportions.h
index 5440f64d2942..21221338ad18 100644
--- a/include/linux/proportions.h
+++ b/include/linux/proportions.h
@@ -1,7 +1,7 @@
/*
* FLoating proportions
*
- * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
*
* This file contains the public data structure and API definitions.
*/
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h
index 6a4347639c03..1d1ba2c5ee7a 100644
--- a/include/linux/qed/common_hsi.h
+++ b/include/linux/qed/common_hsi.h
@@ -9,6 +9,8 @@
#ifndef __COMMON_HSI__
#define __COMMON_HSI__
+#define CORE_SPQE_PAGE_SIZE_BYTES 4096
+
#define FW_MAJOR_VERSION 8
#define FW_MINOR_VERSION 4
#define FW_REVISION_VERSION 2
diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h
index b920c3605c46..41b9049b57e2 100644
--- a/include/linux/qed/qed_chain.h
+++ b/include/linux/qed/qed_chain.h
@@ -111,7 +111,8 @@ static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain)
used = ((u32)0x10000u + (u32)(p_chain->prod_idx)) -
(u32)p_chain->cons_idx;
if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
- used -= (used / p_chain->elem_per_page);
+ used -= p_chain->prod_idx / p_chain->elem_per_page -
+ p_chain->cons_idx / p_chain->elem_per_page;
return p_chain->capacity - used;
}
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index dc9a1353f971..d4a32e878180 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -25,6 +25,12 @@
#include <linux/qed/common_hsi.h>
#include <linux/qed/qed_chain.h>
+enum qed_led_mode {
+ QED_LED_MODE_OFF,
+ QED_LED_MODE_ON,
+ QED_LED_MODE_RESTORE
+};
+
#define DIRECT_REG_WR(reg_addr, val) writel((u32)val, \
(void __iomem *)(reg_addr))
@@ -252,6 +258,17 @@ struct qed_common_ops {
void (*chain_free)(struct qed_dev *cdev,
struct qed_chain *p_chain);
+
+/**
+ * @brief set_led - Configure LED mode
+ *
+ * @param cdev
+ * @param mode - LED mode
+ *
+ * @return 0 on success, error otherwise.
+ */
+ int (*set_led)(struct qed_dev *cdev,
+ enum qed_led_mode mode);
};
/**
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 843ceca9a21e..63bd7601b6de 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -19,6 +19,7 @@
#include <linux/atomic.h>
#include <linux/compiler.h>
+#include <linux/err.h>
#include <linux/errno.h>
#include <linux/jhash.h>
#include <linux/list_nulls.h>
@@ -339,10 +340,11 @@ static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl,
int rhashtable_init(struct rhashtable *ht,
const struct rhashtable_params *params);
-int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
- struct rhash_head *obj,
- struct bucket_table *old_tbl);
-int rhashtable_insert_rehash(struct rhashtable *ht);
+struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht,
+ const void *key,
+ struct rhash_head *obj,
+ struct bucket_table *old_tbl);
+int rhashtable_insert_rehash(struct rhashtable *ht, struct bucket_table *tbl);
int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter);
void rhashtable_walk_exit(struct rhashtable_iter *iter);
@@ -598,9 +600,11 @@ restart:
new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
if (unlikely(new_tbl)) {
- err = rhashtable_insert_slow(ht, key, obj, new_tbl);
- if (err == -EAGAIN)
+ tbl = rhashtable_insert_slow(ht, key, obj, new_tbl);
+ if (!IS_ERR_OR_NULL(tbl))
goto slow_path;
+
+ err = PTR_ERR(tbl);
goto out;
}
@@ -611,7 +615,7 @@ restart:
if (unlikely(rht_grow_above_100(ht, tbl))) {
slow_path:
spin_unlock_bh(lock);
- err = rhashtable_insert_rehash(ht);
+ err = rhashtable_insert_rehash(ht, tbl);
rcu_read_unlock();
if (err)
return err;
@@ -819,4 +823,86 @@ out:
return err;
}
+/* Internal function, please use rhashtable_replace_fast() instead */
+static inline int __rhashtable_replace_fast(
+ struct rhashtable *ht, struct bucket_table *tbl,
+ struct rhash_head *obj_old, struct rhash_head *obj_new,
+ const struct rhashtable_params params)
+{
+ struct rhash_head __rcu **pprev;
+ struct rhash_head *he;
+ spinlock_t *lock;
+ unsigned int hash;
+ int err = -ENOENT;
+
+ /* Minimally, the old and new objects must have same hash
+ * (which should mean identifiers are the same).
+ */
+ hash = rht_head_hashfn(ht, tbl, obj_old, params);
+ if (hash != rht_head_hashfn(ht, tbl, obj_new, params))
+ return -EINVAL;
+
+ lock = rht_bucket_lock(tbl, hash);
+
+ spin_lock_bh(lock);
+
+ pprev = &tbl->buckets[hash];
+ rht_for_each(he, tbl, hash) {
+ if (he != obj_old) {
+ pprev = &he->next;
+ continue;
+ }
+
+ rcu_assign_pointer(obj_new->next, obj_old->next);
+ rcu_assign_pointer(*pprev, obj_new);
+ err = 0;
+ break;
+ }
+
+ spin_unlock_bh(lock);
+
+ return err;
+}
+
+/**
+ * rhashtable_replace_fast - replace an object in hash table
+ * @ht: hash table
+ * @obj_old: pointer to hash head inside object being replaced
+ * @obj_new: pointer to hash head inside object which is new
+ * @params: hash table parameters
+ *
+ * Replacing an object doesn't affect the number of elements in the hash table
+ * or bucket, so we don't need to worry about shrinking or expanding the
+ * table here.
+ *
+ * Returns zero on success, -ENOENT if the entry could not be found,
+ * -EINVAL if hash is not the same for the old and new objects.
+ */
+static inline int rhashtable_replace_fast(
+ struct rhashtable *ht, struct rhash_head *obj_old,
+ struct rhash_head *obj_new,
+ const struct rhashtable_params params)
+{
+ struct bucket_table *tbl;
+ int err;
+
+ rcu_read_lock();
+
+ tbl = rht_dereference_rcu(ht->tbl, ht);
+
+ /* Because we have already taken (and released) the bucket
+ * lock in old_tbl, if we find that future_tbl is not yet
+ * visible then that guarantees the entry to still be in
+ * the old tbl if it exists.
+ */
+ while ((err = __rhashtable_replace_fast(ht, tbl, obj_old,
+ obj_new, params)) &&
+ (tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
+ ;
+
+ rcu_read_unlock();
+
+ return err;
+}
+
#endif /* _LINUX_RHASHTABLE_H */
diff --git a/include/linux/scpi_protocol.h b/include/linux/scpi_protocol.h
index 80af3cd35ae4..72ce932c69b2 100644
--- a/include/linux/scpi_protocol.h
+++ b/include/linux/scpi_protocol.h
@@ -71,7 +71,7 @@ struct scpi_ops {
int (*sensor_get_value)(u16, u32 *);
};
-#if IS_ENABLED(CONFIG_ARM_SCPI_PROTOCOL)
+#if IS_REACHABLE(CONFIG_ARM_SCPI_PROTOCOL)
struct scpi_ops *get_scpi_ops(void);
#else
static inline struct scpi_ops *get_scpi_ops(void) { return NULL; }
diff --git a/include/linux/sh_eth.h b/include/linux/sh_eth.h
index 8c9131db2b25..f2e27e078362 100644
--- a/include/linux/sh_eth.h
+++ b/include/linux/sh_eth.h
@@ -4,7 +4,7 @@
#include <linux/phy.h>
#include <linux/if_ether.h>
-enum {EDMAC_LITTLE_ENDIAN, EDMAC_BIG_ENDIAN};
+enum {EDMAC_LITTLE_ENDIAN};
struct sh_eth_plat_data {
int phy;
diff --git a/include/linux/signal.h b/include/linux/signal.h
index ab1e0392b5ac..92557bbce7e7 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -239,7 +239,6 @@ extern int sigprocmask(int, sigset_t *, sigset_t *);
extern void set_current_blocked(sigset_t *);
extern void __set_current_blocked(const sigset_t *);
extern int show_unhandled_signals;
-extern int sigsuspend(sigset_t *);
struct sigaction {
#ifndef __ARCH_HAS_IRIX_SIGACTION
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 4355129fff91..6b6bd42d6134 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -39,11 +39,55 @@
#include <linux/in6.h>
#include <net/flow.h>
-/* A. Checksumming of received packets by device.
+/* The interface for checksum offload between the stack and networking drivers
+ * is as follows...
+ *
+ * A. IP checksum related features
+ *
+ * Drivers advertise checksum offload capabilities in the features of a device.
+ * From the stack's point of view these are capabilities offered by the driver,
+ * a driver typically only advertises features that it is capable of offloading
+ * to its device.
+ *
+ * The checksum related features are:
+ *
+ * NETIF_F_HW_CSUM - The driver (or its device) is able to compute one
+ * IP (one's complement) checksum for any combination
+ * of protocols or protocol layering. The checksum is
+ * computed and set in a packet per the CHECKSUM_PARTIAL
+ * interface (see below).
+ *
+ * NETIF_F_IP_CSUM - Driver (device) is only able to checksum plain
+ * TCP or UDP packets over IPv4. These are specifically
+ * unencapsulated packets of the form IPv4|TCP or
+ * IPv4|UDP where the Protocol field in the IPv4 header
+ * is TCP or UDP. The IPv4 header may contain IP options
+ * This feature cannot be set in features for a device
+ * with NETIF_F_HW_CSUM also set. This feature is being
+ * DEPRECATED (see below).
+ *
+ * NETIF_F_IPV6_CSUM - Driver (device) is only able to checksum plain
+ * TCP or UDP packets over IPv6. These are specifically
+ * unencapsulated packets of the form IPv6|TCP or
+ * IPv4|UDP where the Next Header field in the IPv6
+ * header is either TCP or UDP. IPv6 extension headers
+ * are not supported with this feature. This feature
+ * cannot be set in features for a device with
+ * NETIF_F_HW_CSUM also set. This feature is being
+ * DEPRECATED (see below).
+ *
+ * NETIF_F_RXCSUM - Driver (device) performs receive checksum offload.
+ * This flag is used only used to disable the RX checksum
+ * feature for a device. The stack will accept receive
+ * checksum indication in packets received on a device
+ * regardless of whether NETIF_F_RXCSUM is set.
+ *
+ * B. Checksumming of received packets by device. Indication of checksum
+ * verification is in set skb->ip_summed. Possible values are:
*
* CHECKSUM_NONE:
*
- * Device failed to checksum this packet e.g. due to lack of capabilities.
+ * Device did not checksum this packet e.g. due to lack of capabilities.
* The packet contains full (though not verified) checksum in packet but
* not in skb->csum. Thus, skb->csum is undefined in this case.
*
@@ -53,9 +97,8 @@
* (as in CHECKSUM_COMPLETE), but it does parse headers and verify checksums
* for specific protocols. For such packets it will set CHECKSUM_UNNECESSARY
* if their checksums are okay. skb->csum is still undefined in this case
- * though. It is a bad option, but, unfortunately, nowadays most vendors do
- * this. Apparently with the secret goal to sell you new devices, when you
- * will add new protocol to your host, f.e. IPv6 8)
+ * though. A driver or device must never modify the checksum field in the
+ * packet even if checksum is verified.
*
* CHECKSUM_UNNECESSARY is applicable to following protocols:
* TCP: IPv6 and IPv4.
@@ -96,40 +139,77 @@
* packet that are after the checksum being offloaded are not considered to
* be verified.
*
- * B. Checksumming on output.
- *
- * CHECKSUM_NONE:
- *
- * The skb was already checksummed by the protocol, or a checksum is not
- * required.
+ * C. Checksumming on transmit for non-GSO. The stack requests checksum offload
+ * in the skb->ip_summed for a packet. Values are:
*
* CHECKSUM_PARTIAL:
*
- * The device is required to checksum the packet as seen by hard_start_xmit()
+ * The driver is required to checksum the packet as seen by hard_start_xmit()
* from skb->csum_start up to the end, and to record/write the checksum at
- * offset skb->csum_start + skb->csum_offset.
+ * offset skb->csum_start + skb->csum_offset. A driver may verify that the
+ * csum_start and csum_offset values are valid values given the length and
+ * offset of the packet, however they should not attempt to validate that the
+ * checksum refers to a legitimate transport layer checksum-- it is the
+ * purview of the stack to validate that csum_start and csum_offset are set
+ * correctly.
+ *
+ * When the stack requests checksum offload for a packet, the driver MUST
+ * ensure that the checksum is set correctly. A driver can either offload the
+ * checksum calculation to the device, or call skb_checksum_help (in the case
+ * that the device does not support offload for a particular checksum).
+ *
+ * NETIF_F_IP_CSUM and NETIF_F_IPV6_CSUM are being deprecated in favor of
+ * NETIF_F_HW_CSUM. New devices should use NETIF_F_HW_CSUM to indicate
+ * checksum offload capability. If a device has limited checksum capabilities
+ * (for instance can only perform NETIF_F_IP_CSUM or NETIF_F_IPV6_CSUM as
+ * described above) a helper function can be called to resolve
+ * CHECKSUM_PARTIAL. The helper functions are skb_csum_off_chk*. The helper
+ * function takes a spec argument that describes the protocol layer that is
+ * supported for checksum offload and can be called for each packet. If a
+ * packet does not match the specification for offload, skb_checksum_help
+ * is called to resolve the checksum.
*
- * The device must show its capabilities in dev->features, set up at device
- * setup time, e.g. netdev_features.h:
+ * CHECKSUM_NONE:
*
- * NETIF_F_HW_CSUM - It's a clever device, it's able to checksum everything.
- * NETIF_F_IP_CSUM - Device is dumb, it's able to checksum only TCP/UDP over
- * IPv4. Sigh. Vendors like this way for an unknown reason.
- * Though, see comment above about CHECKSUM_UNNECESSARY. 8)
- * NETIF_F_IPV6_CSUM - About as dumb as the last one but does IPv6 instead.
- * NETIF_F_... - Well, you get the picture.
+ * The skb was already checksummed by the protocol, or a checksum is not
+ * required.
*
* CHECKSUM_UNNECESSARY:
*
- * Normally, the device will do per protocol specific checksumming. Protocol
- * implementations that do not want the NIC to perform the checksum
- * calculation should use this flag in their outgoing skbs.
+ * This has the same meaning on as CHECKSUM_NONE for checksum offload on
+ * output.
*
- * NETIF_F_FCOE_CRC - This indicates that the device can do FCoE FC CRC
- * offload. Correspondingly, the FCoE protocol driver
- * stack should use CHECKSUM_UNNECESSARY.
- *
- * Any questions? No questions, good. --ANK
+ * CHECKSUM_COMPLETE:
+ * Not used in checksum output. If a driver observes a packet with this value
+ * set in skbuff, if should treat as CHECKSUM_NONE being set.
+ *
+ * D. Non-IP checksum (CRC) offloads
+ *
+ * NETIF_F_SCTP_CRC - This feature indicates that a device is capable of
+ * offloading the SCTP CRC in a packet. To perform this offload the stack
+ * will set ip_summed to CHECKSUM_PARTIAL and set csum_start and csum_offset
+ * accordingly. Note the there is no indication in the skbuff that the
+ * CHECKSUM_PARTIAL refers to an SCTP checksum, a driver that supports
+ * both IP checksum offload and SCTP CRC offload must verify which offload
+ * is configured for a packet presumably by inspecting packet headers.
+ *
+ * NETIF_F_FCOE_CRC - This feature indicates that a device is capable of
+ * offloading the FCOE CRC in a packet. To perform this offload the stack
+ * will set ip_summed to CHECKSUM_PARTIAL and set csum_start and csum_offset
+ * accordingly. Note the there is no indication in the skbuff that the
+ * CHECKSUM_PARTIAL refers to an FCOE checksum, a driver that supports
+ * both IP checksum offload and FCOE CRC offload must verify which offload
+ * is configured for a packet presumably by inspecting packet headers.
+ *
+ * E. Checksumming on output with GSO.
+ *
+ * In the case of a GSO packet (skb_is_gso(skb) is true), checksum offload
+ * is implied by the SKB_GSO_* flags in gso_type. Most obviously, if the
+ * gso_type is SKB_GSO_TCPV4 or SKB_GSO_TCPV6, TCP checksum offload as
+ * part of the GSO operation is implied. If a checksum is being offloaded
+ * with GSO then ip_summed is CHECKSUM_PARTIAL, csum_start and csum_offset
+ * are set to refer to the outermost checksum being offload (two offloaded
+ * checksums are possible with UDP encapsulation).
*/
/* Don't change this without changing skb_csum_unnecessary! */
@@ -833,7 +913,7 @@ struct sk_buff_fclones {
* skb_fclone_busy - check if fclone is busy
* @skb: buffer
*
- * Returns true is skb is a fast clone, and its clone is not freed.
+ * Returns true if skb is a fast clone, and its clone is not freed.
* Some drivers call skb_orphan() in their ndo_start_xmit(),
* so we also check that this didnt happen.
*/
@@ -1082,9 +1162,6 @@ static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
static inline void skb_sender_cpu_clear(struct sk_buff *skb)
{
-#ifdef CONFIG_XPS
- skb->sender_cpu = 0;
-#endif
}
#ifdef NET_SKBUFF_DATA_USES_OFFSET
@@ -1942,6 +2019,11 @@ static inline unsigned char *skb_inner_transport_header(const struct sk_buff
return skb->head + skb->inner_transport_header;
}
+static inline int skb_inner_transport_offset(const struct sk_buff *skb)
+{
+ return skb_inner_transport_header(skb) - skb->data;
+}
+
static inline void skb_reset_inner_transport_header(struct sk_buff *skb)
{
skb->inner_transport_header = skb->data - skb->head;
@@ -2788,6 +2870,12 @@ static inline void skb_frag_list_init(struct sk_buff *skb)
#define skb_walk_frags(skb, iter) \
for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
+
+int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p,
+ const struct sk_buff *skb);
+struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned flags,
+ int *peeked, int *off, int *err,
+ struct sk_buff **last);
struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
int *peeked, int *off, int *err);
struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 7c82e3b307a3..2037a861e367 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -158,6 +158,24 @@ size_t ksize(const void *);
#endif
/*
+ * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
+ * Intended for arches that get misalignment faults even for 64 bit integer
+ * aligned buffers.
+ */
+#ifndef ARCH_SLAB_MINALIGN
+#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
+#endif
+
+/*
+ * kmalloc and friends return ARCH_KMALLOC_MINALIGN aligned
+ * pointers. kmem_cache_alloc and friends return ARCH_SLAB_MINALIGN
+ * aligned pointers.
+ */
+#define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
+#define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
+#define __assume_page_alignment __assume_aligned(PAGE_SIZE)
+
+/*
* Kmalloc array related definitions
*/
@@ -286,8 +304,8 @@ static __always_inline int kmalloc_index(size_t size)
}
#endif /* !CONFIG_SLOB */
-void *__kmalloc(size_t size, gfp_t flags);
-void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
+void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment;
+void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment;
void kmem_cache_free(struct kmem_cache *, void *);
/*
@@ -298,11 +316,11 @@ void kmem_cache_free(struct kmem_cache *, void *);
* Note that interrupts must be enabled when calling these functions.
*/
void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
-bool kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
+int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
#ifdef CONFIG_NUMA
-void *__kmalloc_node(size_t size, gfp_t flags, int node);
-void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
+void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment;
+void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment;
#else
static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
{
@@ -316,12 +334,12 @@ static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t f
#endif
#ifdef CONFIG_TRACING
-extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
+extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment;
#ifdef CONFIG_NUMA
extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
gfp_t gfpflags,
- int node, size_t size);
+ int node, size_t size) __assume_slab_alignment;
#else
static __always_inline void *
kmem_cache_alloc_node_trace(struct kmem_cache *s,
@@ -354,10 +372,10 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
}
#endif /* CONFIG_TRACING */
-extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order);
+extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment;
#ifdef CONFIG_TRACING
-extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order);
+extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment;
#else
static __always_inline void *
kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
@@ -482,15 +500,6 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
return __kmalloc_node(size, flags, node);
}
-/*
- * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
- * Intended for arches that get misalignment faults even for 64 bit integer
- * aligned buffers.
- */
-#ifndef ARCH_SLAB_MINALIGN
-#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
-#endif
-
struct memcg_cache_array {
struct rcu_head rcu;
struct kmem_cache *entries[0];
diff --git a/include/linux/soc/ti/knav_dma.h b/include/linux/soc/ti/knav_dma.h
index dad035c16d94..343c13ac4f71 100644
--- a/include/linux/soc/ti/knav_dma.h
+++ b/include/linux/soc/ti/knav_dma.h
@@ -144,17 +144,17 @@ struct knav_dma_cfg {
* @psdata: Protocol specific
*/
struct knav_dma_desc {
- u32 desc_info;
- u32 tag_info;
- u32 packet_info;
- u32 buff_len;
- u32 buff;
- u32 next_desc;
- u32 orig_len;
- u32 orig_buff;
- u32 epib[KNAV_DMA_NUM_EPIB_WORDS];
- u32 psdata[KNAV_DMA_NUM_PS_WORDS];
- u32 pad[4];
+ __le32 desc_info;
+ __le32 tag_info;
+ __le32 packet_info;
+ __le32 buff_len;
+ __le32 buff;
+ __le32 next_desc;
+ __le32 orig_len;
+ __le32 orig_buff;
+ __le32 epib[KNAV_DMA_NUM_EPIB_WORDS];
+ __le32 psdata[KNAV_DMA_NUM_PS_WORDS];
+ __le32 pad[4];
} ____cacheline_aligned;
#if IS_ENABLED(CONFIG_KEYSTONE_NAVIGATOR_DMA)
diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
index fddebc617469..4018b48f2b3b 100644
--- a/include/linux/sock_diag.h
+++ b/include/linux/sock_diag.h
@@ -15,6 +15,7 @@ struct sock_diag_handler {
__u8 family;
int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
int (*get_info)(struct sk_buff *skb, struct sock *sk);
+ int (*destroy)(struct sk_buff *skb, struct nlmsghdr *nlh);
};
int sock_diag_register(const struct sock_diag_handler *h);
@@ -68,4 +69,5 @@ bool sock_diag_has_destroy_listeners(const struct sock *sk)
}
void sock_diag_broadcast_destroy(struct sock *sk);
+int sock_diag_destroy(struct sock *sk, int err);
#endif
diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h
index 0adedca24c5b..0e1b1540597a 100644
--- a/include/linux/stop_machine.h
+++ b/include/linux/stop_machine.h
@@ -99,7 +99,7 @@ static inline int try_stop_cpus(const struct cpumask *cpumask,
* grabbing every spinlock (and more). So the "read" side to such a
* lock is anything which disables preemption.
*/
-#if defined(CONFIG_STOP_MACHINE) && defined(CONFIG_SMP)
+#if defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU)
/**
* stop_machine: freeze the machine on all CPUs and run this function
@@ -118,7 +118,7 @@ int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus);
int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
const struct cpumask *cpus);
-#else /* CONFIG_STOP_MACHINE && CONFIG_SMP */
+#else /* CONFIG_SMP || CONFIG_HOTPLUG_CPU */
static inline int stop_machine(cpu_stop_fn_t fn, void *data,
const struct cpumask *cpus)
@@ -137,5 +137,5 @@ static inline int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
return stop_machine(fn, data, cpus);
}
-#endif /* CONFIG_STOP_MACHINE && CONFIG_SMP */
+#endif /* CONFIG_SMP || CONFIG_HOTPLUG_CPU */
#endif /* _LINUX_STOP_MACHINE */
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index a156b82dd14c..c2b66a277e98 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -524,7 +524,7 @@ asmlinkage long sys_chown(const char __user *filename,
asmlinkage long sys_lchown(const char __user *filename,
uid_t user, gid_t group);
asmlinkage long sys_fchown(unsigned int fd, uid_t user, gid_t group);
-#ifdef CONFIG_UID16
+#ifdef CONFIG_HAVE_UID16
asmlinkage long sys_chown16(const char __user *filename,
old_uid_t user, old_gid_t group);
asmlinkage long sys_lchown16(const char __user *filename,
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index 4014a59828fc..613c29bd6baf 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -438,7 +438,8 @@ static inline void thermal_zone_device_unregister(
static inline int thermal_zone_bind_cooling_device(
struct thermal_zone_device *tz, int trip,
struct thermal_cooling_device *cdev,
- unsigned long upper, unsigned long lower)
+ unsigned long upper, unsigned long lower,
+ unsigned int weight)
{ return -ENODEV; }
static inline int thermal_zone_unbind_cooling_device(
struct thermal_zone_device *tz, int trip,
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 5b04b0a5375b..5e31f1b99037 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -607,7 +607,7 @@ extern void n_tty_inherit_ops(struct tty_ldisc_ops *ops);
/* tty_audit.c */
#ifdef CONFIG_AUDIT
-extern void tty_audit_add_data(struct tty_struct *tty, unsigned char *data,
+extern void tty_audit_add_data(struct tty_struct *tty, const void *data,
size_t size, unsigned icanon);
extern void tty_audit_exit(void);
extern void tty_audit_fork(struct signal_struct *sig);
@@ -615,8 +615,8 @@ extern void tty_audit_tiocsti(struct tty_struct *tty, char ch);
extern void tty_audit_push(struct tty_struct *tty);
extern int tty_audit_push_current(void);
#else
-static inline void tty_audit_add_data(struct tty_struct *tty,
- unsigned char *data, size_t size, unsigned icanon)
+static inline void tty_audit_add_data(struct tty_struct *tty, const void *data,
+ size_t size, unsigned icanon)
{
}
static inline void tty_audit_tiocsti(struct tty_struct *tty, char ch)
diff --git a/include/linux/types.h b/include/linux/types.h
index 70d8500bddf1..70dd3dfde631 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -35,7 +35,7 @@ typedef __kernel_gid16_t gid16_t;
typedef unsigned long uintptr_t;
-#ifdef CONFIG_UID16
+#ifdef CONFIG_HAVE_UID16
/* This is defined by include/asm-{arch}/posix_types.h */
typedef __kernel_old_uid_t old_uid_t;
typedef __kernel_old_gid_t old_gid_t;
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
index 0bdc72f36905..4a29c75b146e 100644
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -21,7 +21,7 @@
* Authors:
* Srikar Dronamraju
* Jim Keniston
- * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra
*/
#include <linux/errno.h>
diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h
index 1f6526c76ee8..3a375d07d0dc 100644
--- a/include/linux/usb/cdc_ncm.h
+++ b/include/linux/usb/cdc_ncm.h
@@ -138,6 +138,7 @@ struct cdc_ncm_ctx {
};
u8 cdc_ncm_select_altsetting(struct usb_interface *intf);
+int cdc_ncm_change_mtu(struct net_device *net, int new_mtu);
int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting, int drvflags);
void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf);
struct sk_buff *cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign);
diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
index 9948c874e3f1..1d0043dc34e4 100644
--- a/include/linux/usb/quirks.h
+++ b/include/linux/usb/quirks.h
@@ -47,4 +47,7 @@
/* device generates spurious wakeup, ignore remote wakeup capability */
#define USB_QUIRK_IGNORE_REMOTE_WAKEUP BIT(9)
+/* device can't handle Link Power Management */
+#define USB_QUIRK_NO_LPM BIT(10)
+
#endif /* __LINUX_USB_QUIRKS_H */
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
index 610a86a892b8..ddb440975382 100644
--- a/include/linux/vfio.h
+++ b/include/linux/vfio.h
@@ -44,9 +44,6 @@ struct vfio_device_ops {
void (*request)(void *device_data, unsigned int count);
};
-extern struct iommu_group *vfio_iommu_group_get(struct device *dev);
-extern void vfio_iommu_group_put(struct iommu_group *group, struct device *dev);
-
extern int vfio_add_group_dev(struct device *dev,
const struct vfio_device_ops *ops,
void *device_data);
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 5dbc8b0ee567..3e5d9075960f 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -176,11 +176,11 @@ extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp);
#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
#ifdef CONFIG_SMP
-void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
+void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long);
void __inc_zone_page_state(struct page *, enum zone_stat_item);
void __dec_zone_page_state(struct page *, enum zone_stat_item);
-void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
+void mod_zone_page_state(struct zone *, enum zone_stat_item, long);
void inc_zone_page_state(struct page *, enum zone_stat_item);
void dec_zone_page_state(struct page *, enum zone_stat_item);
@@ -205,7 +205,7 @@ void set_pgdat_percpu_threshold(pg_data_t *pgdat,
* The functions directly modify the zone and global counters.
*/
static inline void __mod_zone_page_state(struct zone *zone,
- enum zone_stat_item item, int delta)
+ enum zone_stat_item item, long delta)
{
zone_page_state_add(delta, zone, item);
}
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 1e1bf9f963a9..0496c31aaf06 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -107,6 +107,27 @@ static inline int waitqueue_active(wait_queue_head_t *q)
return !list_empty(&q->task_list);
}
+/**
+ * wq_has_sleeper - check if there are any waiting processes
+ * @wq: wait queue head
+ *
+ * Returns true if wq has waiting processes
+ *
+ * Please refer to the comment for waitqueue_active.
+ */
+static inline bool wq_has_sleeper(wait_queue_head_t *wq)
+{
+ /*
+ * We need to be sure we are in sync with the
+ * add_wait_queue modifications to the wait queue.
+ *
+ * This memory barrier should be paired with one on the
+ * waiting side.
+ */
+ smp_mb();
+ return waitqueue_active(wq);
+}
+
extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
@@ -145,7 +166,7 @@ __remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
list_del(&old->task_list);
}
-typedef int wait_bit_action_f(struct wait_bit_key *);
+typedef int wait_bit_action_f(struct wait_bit_key *, int mode);
void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
@@ -960,10 +981,10 @@ int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
} while (0)
-extern int bit_wait(struct wait_bit_key *);
-extern int bit_wait_io(struct wait_bit_key *);
-extern int bit_wait_timeout(struct wait_bit_key *);
-extern int bit_wait_io_timeout(struct wait_bit_key *);
+extern int bit_wait(struct wait_bit_key *, int);
+extern int bit_wait_io(struct wait_bit_key *, int);
+extern int bit_wait_timeout(struct wait_bit_key *, int);
+extern int bit_wait_io_timeout(struct wait_bit_key *, int);
/**
* wait_on_bit - wait for a bit to be cleared