summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/acpi.h5
-rw-r--r--include/linux/alarmtimer.h5
-rw-r--r--include/linux/amba/pl061.h16
-rw-r--r--include/linux/backing-dev-defs.h3
-rw-r--r--include/linux/bpf-cgroup.h92
-rw-r--r--include/linux/bpf.h19
-rw-r--r--include/linux/bpf_verifier.h15
-rw-r--r--include/linux/brcmphy.h27
-rw-r--r--include/linux/bug.h17
-rw-r--r--include/linux/ceph/osd_client.h2
-rw-r--r--include/linux/cgroup-defs.h4
-rw-r--r--include/linux/clk-provider.h2
-rw-r--r--include/linux/clk.h29
-rw-r--r--include/linux/clk/renesas.h4
-rw-r--r--include/linux/clocksource.h5
-rw-r--r--include/linux/cma.h3
-rw-r--r--include/linux/compiler-gcc.h6
-rw-r--r--include/linux/console.h6
-rw-r--r--include/linux/cpu.h15
-rw-r--r--include/linux/cpufreq.h4
-rw-r--r--include/linux/cpuhotplug.h22
-rw-r--r--include/linux/debugfs.h3
-rw-r--r--include/linux/devfreq_cooling.h9
-rw-r--r--include/linux/device.h21
-rw-r--r--include/linux/dma-buf.h4
-rw-r--r--include/linux/dma-fence-array.h86
-rw-r--r--include/linux/dma-fence.h438
-rw-r--r--include/linux/drbd_genl.h2
-rw-r--r--include/linux/edac.h8
-rw-r--r--include/linux/efi.h46
-rw-r--r--include/linux/fddidevice.h1
-rw-r--r--include/linux/fence-array.h83
-rw-r--r--include/linux/fence.h378
-rw-r--r--include/linux/filter.h37
-rw-r--r--include/linux/frontswap.h5
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/fsl/guts.h125
-rw-r--r--include/linux/ftrace.h2
-rw-r--r--include/linux/genl_magic_func.h29
-rw-r--r--include/linux/gpio/driver.h34
-rw-r--r--include/linux/hdlc.h2
-rw-r--r--include/linux/hdmi.h2
-rw-r--r--include/linux/hippidevice.h1
-rw-r--r--include/linux/huge_mm.h4
-rw-r--r--include/linux/hyperv.h7
-rw-r--r--include/linux/ieee80211.h26
-rw-r--r--include/linux/if_arp.h16
-rw-r--r--include/linux/if_vlan.h16
-rw-r--r--include/linux/intel-iommu.h1
-rw-r--r--include/linux/interrupt.h20
-rw-r--r--include/linux/io.h22
-rw-r--r--include/linux/iomap.h17
-rw-r--r--include/linux/ipv6.h27
-rw-r--r--include/linux/irqchip/arm-gic-v3.h6
-rw-r--r--include/linux/kasan.h2
-rw-r--r--include/linux/kconfig.h5
-rw-r--r--include/linux/kernel_stat.h4
-rw-r--r--include/linux/kthread.h3
-rw-r--r--include/linux/kvm_host.h3
-rw-r--r--include/linux/leds.h25
-rw-r--r--include/linux/libnvdimm.h2
-rw-r--r--include/linux/list.h37
-rw-r--r--include/linux/mbus.h8
-rw-r--r--include/linux/mc146818rtc.h1
-rw-r--r--include/linux/mdev.h168
-rw-r--r--include/linux/mempolicy.h8
-rw-r--r--include/linux/mfd/max77620.h2
-rw-r--r--include/linux/mfd/tmio.h5
-rw-r--r--include/linux/mii.h4
-rw-r--r--include/linux/mlx4/device.h4
-rw-r--r--include/linux/mlx5/device.h16
-rw-r--r--include/linux/mlx5/driver.h76
-rw-r--r--include/linux/mlx5/fs.h43
-rw-r--r--include/linux/mlx5/mlx5_ifc.h319
-rw-r--r--include/linux/mlx5/port.h9
-rw-r--r--include/linux/mlx5/srq.h2
-rw-r--r--include/linux/mlx5/vport.h10
-rw-r--r--include/linux/mm.h25
-rw-r--r--include/linux/mmc/card.h14
-rw-r--r--include/linux/mmc/core.h16
-rw-r--r--include/linux/mmc/dw_mmc.h6
-rw-r--r--include/linux/mmc/host.h17
-rw-r--r--include/linux/mmc/mmc.h17
-rw-r--r--include/linux/mmc/slot-gpio.h1
-rw-r--r--include/linux/mmzone.h30
-rw-r--r--include/linux/mtd/nand.h2
-rw-r--r--include/linux/mutex-debug.h24
-rw-r--r--include/linux/mutex.h77
-rw-r--r--include/linux/netdevice.h266
-rw-r--r--include/linux/netfilter.h89
-rw-r--r--include/linux/netfilter/ipset/ip_set.h136
-rw-r--r--include/linux/netfilter/ipset/ip_set_bitmap.h2
-rw-r--r--include/linux/netfilter/ipset/ip_set_comment.h11
-rw-r--r--include/linux/netfilter/ipset/ip_set_counter.h75
-rw-r--r--include/linux/netfilter/ipset/ip_set_skbinfo.h46
-rw-r--r--include/linux/netfilter/ipset/ip_set_timeout.h4
-rw-r--r--include/linux/netfilter/nf_conntrack_dccp.h2
-rw-r--r--include/linux/netfilter/x_tables.h86
-rw-r--r--include/linux/netfilter_ingress.h9
-rw-r--r--include/linux/netpoll.h13
-rw-r--r--include/linux/nvme.h49
-rw-r--r--include/linux/of_fdt.h1
-rw-r--r--include/linux/of_mdio.h4
-rw-r--r--include/linux/pagemap.h21
-rw-r--r--include/linux/pci.h39
-rw-r--r--include/linux/perf_event.h1
-rw-r--r--include/linux/phy.h44
-rw-r--r--include/linux/phy/phy.h7
-rw-r--r--include/linux/phy_led_triggers.h51
-rw-r--r--include/linux/pim.h81
-rw-r--r--include/linux/pm-trace.h9
-rw-r--r--include/linux/pm.h2
-rw-r--r--include/linux/preempt.h21
-rw-r--r--include/linux/printk.h17
-rw-r--r--include/linux/proc_fs.h4
-rw-r--r--include/linux/pstore.h76
-rw-r--r--include/linux/pstore_ram.h14
-rw-r--r--include/linux/ptp_clock_kernel.h73
-rw-r--r--include/linux/qed/qed_chain.h144
-rw-r--r--include/linux/qed/qed_eth_if.h61
-rw-r--r--include/linux/qed/qed_if.h50
-rw-r--r--include/linux/qed/qed_iscsi_if.h229
-rw-r--r--include/linux/qed/qede_roce.h2
-rw-r--r--include/linux/radix-tree.h34
-rw-r--r--include/linux/rculist.h8
-rw-r--r--include/linux/regmap.h11
-rw-r--r--include/linux/regulator/consumer.h27
-rw-r--r--include/linux/regulator/driver.h4
-rw-r--r--include/linux/remoteproc.h36
-rw-r--r--include/linux/remoteproc/st_slim_rproc.h58
-rw-r--r--include/linux/reservation.h41
-rw-r--r--include/linux/ring_buffer.h6
-rw-r--r--include/linux/rmap.h10
-rw-r--r--include/linux/rpmsg.h125
-rw-r--r--include/linux/rpmsg/qcom_smd.h33
-rw-r--r--include/linux/sched.h117
-rw-r--r--include/linux/sched/sysctl.h1
-rw-r--r--include/linux/seg6.h6
-rw-r--r--include/linux/seg6_genl.h6
-rw-r--r--include/linux/seg6_hmac.h6
-rw-r--r--include/linux/seg6_iptunnel.h6
-rw-r--r--include/linux/seqno-fence.h20
-rw-r--r--include/linux/skbuff.h24
-rw-r--r--include/linux/smc91x.h1
-rw-r--r--include/linux/soc/renesas/rcar-rst.h6
-rw-r--r--include/linux/stmmac.h5
-rw-r--r--include/linux/sunrpc/svc_xprt.h1
-rw-r--r--include/linux/swap.h34
-rw-r--r--include/linux/sync_file.h14
-rw-r--r--include/linux/sys_soc.h9
-rw-r--r--include/linux/tcp.h24
-rw-r--r--include/linux/thermal.h2
-rw-r--r--include/linux/thread_info.h11
-rw-r--r--include/linux/time.h2
-rw-r--r--include/linux/timekeeping.h1
-rw-r--r--include/linux/udp.h3
-rw-r--r--include/linux/uprobes.h1
-rw-r--r--include/linux/usb/cdc_ncm.h3
-rw-r--r--include/linux/vfio.h48
-rw-r--r--include/linux/virtio_net.h4
-rw-r--r--include/linux/vmalloc.h1
-rw-r--r--include/linux/workqueue.h35
-rw-r--r--include/linux/ww_mutex.h2
163 files changed, 3720 insertions, 1571 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index ddbeda6dbdc8..051023756520 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -326,6 +326,7 @@ struct pci_dev;
int acpi_pci_irq_enable (struct pci_dev *dev);
void acpi_penalize_isa_irq(int irq, int active);
bool acpi_isa_irq_available(int irq);
+void acpi_penalize_sci_irq(int irq, int trigger, int polarity);
void acpi_pci_irq_disable (struct pci_dev *dev);
extern int ec_read(u8 addr, u8 *val);
@@ -468,6 +469,7 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context);
#define OSC_SB_CPCV2_SUPPORT 0x00000040
#define OSC_SB_PCLPI_SUPPORT 0x00000080
#define OSC_SB_OSLPI_SUPPORT 0x00000100
+#define OSC_SB_CPC_DIVERSE_HIGH_SUPPORT 0x00001000
extern bool osc_sb_apei_support_acked;
extern bool osc_pc_lpi_support_confirmed;
@@ -554,7 +556,8 @@ int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *);
int acpi_device_modalias(struct device *, char *, int);
void acpi_walk_dep_device_list(acpi_handle handle);
-struct platform_device *acpi_create_platform_device(struct acpi_device *);
+struct platform_device *acpi_create_platform_device(struct acpi_device *,
+ struct property_entry *);
#define ACPI_PTR(_ptr) (_ptr)
static inline void acpi_device_set_enumerated(struct acpi_device *adev)
diff --git a/include/linux/alarmtimer.h b/include/linux/alarmtimer.h
index 9d8031257a90..c70aac13244a 100644
--- a/include/linux/alarmtimer.h
+++ b/include/linux/alarmtimer.h
@@ -10,7 +10,12 @@ enum alarmtimer_type {
ALARM_REALTIME,
ALARM_BOOTTIME,
+ /* Supported types end here */
ALARM_NUMTYPE,
+
+ /* Used for tracing information. No usable types. */
+ ALARM_REALTIME_FREEZER,
+ ALARM_BOOTTIME_FREEZER,
};
enum alarmtimer_restart {
diff --git a/include/linux/amba/pl061.h b/include/linux/amba/pl061.h
deleted file mode 100644
index fb83c0453489..000000000000
--- a/include/linux/amba/pl061.h
+++ /dev/null
@@ -1,16 +0,0 @@
-#include <linux/types.h>
-
-/* platform data for the PL061 GPIO driver */
-
-struct pl061_platform_data {
- /* number of the first GPIO */
- unsigned gpio_base;
-
- /* number of the first IRQ.
- * If the IRQ functionality in not desired this must be set to 0.
- */
- unsigned irq_base;
-
- u8 directions; /* startup directions, 1: out, 0: in */
- u8 values; /* startup values */
-};
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index dc5f76d7f648..e850e76acaaf 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -138,12 +138,13 @@ struct bdi_writeback {
struct backing_dev_info {
struct list_head bdi_list;
unsigned long ra_pages; /* max readahead in PAGE_SIZE units */
- unsigned int capabilities; /* Device capabilities */
+ unsigned long io_pages; /* max allowed IO size */
congested_fn *congested_fn; /* Function pointer if device is md/dm */
void *congested_data; /* Pointer to aux data for congested func */
char *name;
+ unsigned int capabilities; /* Device capabilities */
unsigned int min_ratio;
unsigned int max_ratio, max_prop_frac;
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
new file mode 100644
index 000000000000..7b6e5d168c95
--- /dev/null
+++ b/include/linux/bpf-cgroup.h
@@ -0,0 +1,92 @@
+#ifndef _BPF_CGROUP_H
+#define _BPF_CGROUP_H
+
+#include <linux/jump_label.h>
+#include <uapi/linux/bpf.h>
+
+struct sock;
+struct cgroup;
+struct sk_buff;
+
+#ifdef CONFIG_CGROUP_BPF
+
+extern struct static_key_false cgroup_bpf_enabled_key;
+#define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
+
+struct cgroup_bpf {
+ /*
+ * Store two sets of bpf_prog pointers, one for programs that are
+ * pinned directly to this cgroup, and one for those that are effective
+ * when this cgroup is accessed.
+ */
+ struct bpf_prog *prog[MAX_BPF_ATTACH_TYPE];
+ struct bpf_prog *effective[MAX_BPF_ATTACH_TYPE];
+};
+
+void cgroup_bpf_put(struct cgroup *cgrp);
+void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent);
+
+void __cgroup_bpf_update(struct cgroup *cgrp,
+ struct cgroup *parent,
+ struct bpf_prog *prog,
+ enum bpf_attach_type type);
+
+/* Wrapper for __cgroup_bpf_update() protected by cgroup_mutex */
+void cgroup_bpf_update(struct cgroup *cgrp,
+ struct bpf_prog *prog,
+ enum bpf_attach_type type);
+
+int __cgroup_bpf_run_filter_skb(struct sock *sk,
+ struct sk_buff *skb,
+ enum bpf_attach_type type);
+
+int __cgroup_bpf_run_filter_sk(struct sock *sk,
+ enum bpf_attach_type type);
+
+/* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
+#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
+({ \
+ int __ret = 0; \
+ if (cgroup_bpf_enabled) \
+ __ret = __cgroup_bpf_run_filter_skb(sk, skb, \
+ BPF_CGROUP_INET_INGRESS); \
+ \
+ __ret; \
+})
+
+#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \
+({ \
+ int __ret = 0; \
+ if (cgroup_bpf_enabled && sk && sk == skb->sk) { \
+ typeof(sk) __sk = sk_to_full_sk(sk); \
+ if (sk_fullsock(__sk)) \
+ __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \
+ BPF_CGROUP_INET_EGRESS); \
+ } \
+ __ret; \
+})
+
+#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \
+({ \
+ int __ret = 0; \
+ if (cgroup_bpf_enabled && sk) { \
+ __ret = __cgroup_bpf_run_filter_sk(sk, \
+ BPF_CGROUP_INET_SOCK_CREATE); \
+ } \
+ __ret; \
+})
+
+#else
+
+struct cgroup_bpf {};
+static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
+static inline void cgroup_bpf_inherit(struct cgroup *cgrp,
+ struct cgroup *parent) {}
+
+#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
+
+#endif /* CONFIG_CGROUP_BPF */
+
+#endif /* _BPF_CGROUP_H */
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index c201017b5730..8796ff03f472 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -216,6 +216,7 @@ u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5);
u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
+void bpf_prog_calc_digest(struct bpf_prog *fp);
const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
@@ -233,13 +234,14 @@ void bpf_register_map_type(struct bpf_map_type_list *tl);
struct bpf_prog *bpf_prog_get(u32 ufd);
struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type);
-struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i);
-struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog);
+struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, int i);
+void bpf_prog_sub(struct bpf_prog *prog, int i);
+struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog);
void bpf_prog_put(struct bpf_prog *prog);
struct bpf_map *bpf_map_get_with_uref(u32 ufd);
struct bpf_map *__bpf_map_get(struct fd f);
-struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref);
+struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref);
void bpf_map_put_with_uref(struct bpf_map *map);
void bpf_map_put(struct bpf_map *map);
int bpf_map_precharge_memlock(u32 pages);
@@ -298,15 +300,21 @@ static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
{
return ERR_PTR(-EOPNOTSUPP);
}
-static inline struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i)
+static inline struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog,
+ int i)
{
return ERR_PTR(-EOPNOTSUPP);
}
+static inline void bpf_prog_sub(struct bpf_prog *prog, int i)
+{
+}
+
static inline void bpf_prog_put(struct bpf_prog *prog)
{
}
-static inline struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
+
+static inline struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog)
{
return ERR_PTR(-EOPNOTSUPP);
}
@@ -319,6 +327,7 @@ extern const struct bpf_func_proto bpf_map_delete_elem_proto;
extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
+extern const struct bpf_func_proto bpf_get_numa_node_id_proto;
extern const struct bpf_func_proto bpf_tail_call_proto;
extern const struct bpf_func_proto bpf_ktime_get_ns_proto;
extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto;
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 7035b997aaa5..a13b031dc6b8 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -14,22 +14,16 @@
* are obviously wrong for any sort of memory access.
*/
#define BPF_REGISTER_MAX_RANGE (1024 * 1024 * 1024)
-#define BPF_REGISTER_MIN_RANGE -(1024 * 1024 * 1024)
+#define BPF_REGISTER_MIN_RANGE -1
struct bpf_reg_state {
enum bpf_reg_type type;
- /*
- * Used to determine if any memory access using this register will
- * result in a bad access.
- */
- u64 min_value, max_value;
union {
/* valid when type == CONST_IMM | PTR_TO_STACK | UNKNOWN_VALUE */
s64 imm;
/* valid when type == PTR_TO_PACKET* */
struct {
- u32 id;
u16 off;
u16 range;
};
@@ -39,6 +33,13 @@ struct bpf_reg_state {
*/
struct bpf_map *map_ptr;
};
+ u32 id;
+ /* Used to determine if any memory access using this register will
+ * result in a bad access. These two fields must be last.
+ * See states_equal()
+ */
+ s64 min_value;
+ u64 max_value;
};
enum bpf_stack_slot_type {
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index e3354b74286c..4f7d8be9ddbf 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -13,11 +13,13 @@
#define PHY_ID_BCM5241 0x0143bc30
#define PHY_ID_BCMAC131 0x0143bc70
#define PHY_ID_BCM5481 0x0143bca0
+#define PHY_ID_BCM54810 0x03625d00
#define PHY_ID_BCM5482 0x0143bcb0
#define PHY_ID_BCM5411 0x00206070
#define PHY_ID_BCM5421 0x002060e0
#define PHY_ID_BCM5464 0x002060b0
#define PHY_ID_BCM5461 0x002060c0
+#define PHY_ID_BCM54612E 0x03625e60
#define PHY_ID_BCM54616S 0x03625d10
#define PHY_ID_BCM57780 0x03625d90
@@ -55,6 +57,7 @@
#define PHY_BRCM_EXT_IBND_TX_ENABLE 0x00002000
#define PHY_BRCM_CLEAR_RGMII_MODE 0x00004000
#define PHY_BRCM_DIS_TXCRXC_NOENRGY 0x00008000
+
/* Broadcom BCM7xxx specific workarounds */
#define PHY_BRCM_7XXX_REV(x) (((x) >> 8) & 0xff)
#define PHY_BRCM_7XXX_PATCH(x) ((x) & 0xff)
@@ -105,11 +108,15 @@
#define MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA 0x0800
#define MII_BCM54XX_AUXCTL_MISC_WREN 0x8000
+#define MII_BCM54XX_AUXCTL_MISC_RXD_RXC_SKEW 0x0100
#define MII_BCM54XX_AUXCTL_MISC_FORCE_AMDIX 0x0200
#define MII_BCM54XX_AUXCTL_MISC_RDSEL_MISC 0x7000
#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC 0x0007
+#define MII_BCM54XX_AUXCTL_SHDWSEL_READ_SHIFT 12
+#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN (1 << 8)
+#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_WIRESPEED_EN (1 << 4)
-#define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x0000
+#define MII_BCM54XX_AUXCTL_SHDWSEL_MASK 0x0007
/*
* Broadcom LED source encodings. These are used in BCM5461, BCM5481,
@@ -124,6 +131,7 @@
#define BCM_LED_SRC_INTR 0x6
#define BCM_LED_SRC_QUALITY 0x7
#define BCM_LED_SRC_RCVLED 0x8
+#define BCM_LED_SRC_WIRESPEED 0x9
#define BCM_LED_SRC_MULTICOLOR1 0xa
#define BCM_LED_SRC_OPENSHORT 0xb
#define BCM_LED_SRC_OFF 0xe /* Tied high */
@@ -135,6 +143,14 @@
* Shadow values go into bits [14:10] of register 0x1c to select a shadow
* register to access.
*/
+
+/* 00100: Reserved control register 2 */
+#define BCM54XX_SHD_SCR2 0x04
+#define BCM54XX_SHD_SCR2_WSPD_RTRY_DIS 0x100
+#define BCM54XX_SHD_SCR2_WSPD_RTRY_LMT_SHIFT 2
+#define BCM54XX_SHD_SCR2_WSPD_RTRY_LMT_OFFSET 2
+#define BCM54XX_SHD_SCR2_WSPD_RTRY_LMT_MASK 0x7
+
/* 00101: Spare Control Register 3 */
#define BCM54XX_SHD_SCR3 0x05
#define BCM54XX_SHD_SCR3_DEF_CLK125 0x0001
@@ -189,6 +205,12 @@
#define BCM5482_SSD_SGMII_SLAVE_EN 0x0002 /* Slave mode enable */
#define BCM5482_SSD_SGMII_SLAVE_AD 0x0001 /* Slave auto-detection */
+/* BCM54810 Registers */
+#define BCM54810_EXP_BROADREACH_LRE_MISC_CTL (MII_BCM54XX_EXP_SEL_ER + 0x90)
+#define BCM54810_EXP_BROADREACH_LRE_MISC_CTL_EN (1 << 0)
+#define BCM54810_SHD_CLK_CTL 0x3
+#define BCM54810_SHD_CLK_CTL_GTXCLK_EN (1 << 9)
+
/*****************************************************************************/
/* Fast Ethernet Transceiver definitions. */
@@ -222,6 +244,9 @@
#define LPI_FEATURE_EN_DIG1000X 0x4000
/* Core register definitions*/
+#define MII_BRCM_CORE_BASE12 0x12
+#define MII_BRCM_CORE_BASE13 0x13
+#define MII_BRCM_CORE_BASE14 0x14
#define MII_BRCM_CORE_BASE1E 0x1E
#define MII_BRCM_CORE_EXPB0 0xB0
#define MII_BRCM_CORE_EXPB1 0xB1
diff --git a/include/linux/bug.h b/include/linux/bug.h
index 292d6a10b0c2..baff2e8fc8a8 100644
--- a/include/linux/bug.h
+++ b/include/linux/bug.h
@@ -121,4 +121,21 @@ static inline enum bug_trap_type report_bug(unsigned long bug_addr,
}
#endif /* CONFIG_GENERIC_BUG */
+
+/*
+ * Since detected data corruption should stop operation on the affected
+ * structures, this returns false if the corruption condition is found.
+ */
+#define CHECK_DATA_CORRUPTION(condition, fmt, ...) \
+ do { \
+ if (unlikely(condition)) { \
+ if (IS_ENABLED(CONFIG_BUG_ON_DATA_CORRUPTION)) { \
+ pr_err(fmt, ##__VA_ARGS__); \
+ BUG(); \
+ } else \
+ WARN(1, fmt, ##__VA_ARGS__); \
+ return false; \
+ } \
+ } while (0)
+
#endif /* _LINUX_BUG_H */
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 96337b15a60d..a8e66344bacc 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -258,6 +258,8 @@ struct ceph_watch_item {
struct ceph_entity_addr addr;
};
+#define CEPH_LINGER_ID_START 0xffff000000000000ULL
+
struct ceph_osd_client {
struct ceph_client *client;
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 5b17de62c962..861b4677fc5b 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -16,6 +16,7 @@
#include <linux/percpu-refcount.h>
#include <linux/percpu-rwsem.h>
#include <linux/workqueue.h>
+#include <linux/bpf-cgroup.h>
#ifdef CONFIG_CGROUPS
@@ -300,6 +301,9 @@ struct cgroup {
/* used to schedule release agent */
struct work_struct release_agent_work;
+ /* used to store eBPF programs */
+ struct cgroup_bpf bpf;
+
/* ids of the ancestors at each level including self */
int ancestor_ids[];
};
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index af596381fa0f..a428aec36ace 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -785,7 +785,7 @@ extern struct of_device_id __clk_of_table;
* routines, one at of_clk_init(), and one at platform device probe
*/
#define CLK_OF_DECLARE_DRIVER(name, compat, fn) \
- static void name##_of_clk_init_driver(struct device_node *np) \
+ static void __init name##_of_clk_init_driver(struct device_node *np) \
{ \
of_node_clear_flag(np, OF_POPULATED); \
fn(np); \
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 123c02788807..e9d36b3e49de 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -17,8 +17,9 @@
#include <linux/notifier.h>
struct device;
-
struct clk;
+struct device_node;
+struct of_phandle_args;
/**
* DOC: clk notifier callback types
@@ -249,6 +250,23 @@ struct clk *clk_get(struct device *dev, const char *id);
struct clk *devm_clk_get(struct device *dev, const char *id);
/**
+ * devm_get_clk_from_child - lookup and obtain a managed reference to a
+ * clock producer from child node.
+ * @dev: device for clock "consumer"
+ * @np: pointer to clock consumer node
+ * @con_id: clock consumer ID
+ *
+ * This function parses the clocks, and uses them to look up the
+ * struct clk from the registered list of clock providers by using
+ * @np and @con_id
+ *
+ * The clock will automatically be freed when the device is unbound
+ * from the bus.
+ */
+struct clk *devm_get_clk_from_child(struct device *dev,
+ struct device_node *np, const char *con_id);
+
+/**
* clk_enable - inform the system when the clock source should be running.
* @clk: clock source
*
@@ -432,6 +450,12 @@ static inline struct clk *devm_clk_get(struct device *dev, const char *id)
return NULL;
}
+static inline struct clk *devm_get_clk_from_child(struct device *dev,
+ struct device_node *np, const char *con_id)
+{
+ return NULL;
+}
+
static inline void clk_put(struct clk *clk) {}
static inline void devm_clk_put(struct device *dev, struct clk *clk) {}
@@ -501,9 +525,6 @@ static inline void clk_disable_unprepare(struct clk *clk)
clk_unprepare(clk);
}
-struct device_node;
-struct of_phandle_args;
-
#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
struct clk *of_clk_get(struct device_node *np, int index);
struct clk *of_clk_get_by_name(struct device_node *np, const char *name);
diff --git a/include/linux/clk/renesas.h b/include/linux/clk/renesas.h
index ba6fa4148515..9ebf1f8243bb 100644
--- a/include/linux/clk/renesas.h
+++ b/include/linux/clk/renesas.h
@@ -20,10 +20,6 @@ struct device;
struct device_node;
struct generic_pm_domain;
-void r8a7778_clocks_init(u32 mode);
-void r8a7779_clocks_init(u32 mode);
-void rcar_gen2_clocks_init(u32 mode);
-
void cpg_mstp_add_clk_domain(struct device_node *np);
#ifdef CONFIG_CLK_RENESAS_CPG_MSTP
int cpg_mstp_attach_dev(struct generic_pm_domain *unused, struct device *dev);
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 08398182f56e..65602d395a52 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -169,7 +169,10 @@ static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant)
* @mult: cycle to nanosecond multiplier
* @shift: cycle to nanosecond divisor (power of two)
*
- * Converts cycles to nanoseconds, using the given mult and shift.
+ * Converts clocksource cycles to nanoseconds, using the given @mult and @shift.
+ * The code is optimized for performance and is not intended to work
+ * with absolute clocksource cycles (as those will easily overflow),
+ * but is only intended to be used with relative (delta) clocksource cycles.
*
* XXX - This could use some mult_lxl_ll() asm optimization
*/
diff --git a/include/linux/cma.h b/include/linux/cma.h
index 29f9e774ab76..6f0a91b37f68 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -1,6 +1,9 @@
#ifndef __CMA_H__
#define __CMA_H__
+#include <linux/init.h>
+#include <linux/types.h>
+
/*
* There is always at least global CMA area and a few optional
* areas configured in kernel .config.
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 432f5c97e18f..0444b1336268 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -21,7 +21,7 @@
* clobbered. The issue is as follows: while the inline asm might
* access any memory it wants, the compiler could have fit all of
* @ptr into memory registers instead, and since @ptr never escaped
- * from that, it proofed that the inline asm wasn't touching any of
+ * from that, it proved that the inline asm wasn't touching any of
* it. This version works well with both compilers, i.e. we're telling
* the compiler that the inline asm absolutely may see the contents
* of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495
@@ -263,7 +263,9 @@
#endif
#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP && !__CHECKER__ */
-#if GCC_VERSION >= 50000
+#if GCC_VERSION >= 70000
+#define KASAN_ABI_VERSION 5
+#elif GCC_VERSION >= 50000
#define KASAN_ABI_VERSION 4
#elif GCC_VERSION >= 40902
#define KASAN_ABI_VERSION 3
diff --git a/include/linux/console.h b/include/linux/console.h
index 3672809234a7..d530c4627e54 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -173,12 +173,6 @@ static inline void console_sysfs_notify(void)
#endif
extern bool console_suspend_enabled;
-#ifdef CONFIG_OF
-extern void console_set_by_of(void);
-#else
-static inline void console_set_by_of(void) {}
-#endif
-
/* Suspend and resume console messages over PM events */
extern void suspend_console(void);
extern void resume_console(void);
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index b886dc17f2f3..e571128ad99a 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -93,22 +93,16 @@ extern bool cpuhp_tasks_frozen;
{ .notifier_call = fn, .priority = pri }; \
__register_cpu_notifier(&fn##_nb); \
}
-#else /* #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
-#define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
-#define __cpu_notifier(fn, pri) do { (void)(fn); } while (0)
-#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
-#ifdef CONFIG_HOTPLUG_CPU
extern int register_cpu_notifier(struct notifier_block *nb);
extern int __register_cpu_notifier(struct notifier_block *nb);
extern void unregister_cpu_notifier(struct notifier_block *nb);
extern void __unregister_cpu_notifier(struct notifier_block *nb);
-#else
-#ifndef MODULE
-extern int register_cpu_notifier(struct notifier_block *nb);
-extern int __register_cpu_notifier(struct notifier_block *nb);
-#else
+#else /* #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
+#define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
+#define __cpu_notifier(fn, pri) do { (void)(fn); } while (0)
+
static inline int register_cpu_notifier(struct notifier_block *nb)
{
return 0;
@@ -118,7 +112,6 @@ static inline int __register_cpu_notifier(struct notifier_block *nb)
{
return 0;
}
-#endif
static inline void unregister_cpu_notifier(struct notifier_block *nb)
{
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 5fa55fc56e18..32dc0cbd51ca 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -677,10 +677,10 @@ static inline int cpufreq_table_find_index_dl(struct cpufreq_policy *policy,
if (best == table - 1)
return pos - table;
- return best - pos;
+ return best - table;
}
- return best - pos;
+ return best - table;
}
/* Works only on sorted freq-tables */
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 9b207a8c5af3..22acee76cf4c 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -16,9 +16,11 @@ enum cpuhp_state {
CPUHP_PERF_SUPERH,
CPUHP_X86_HPET_DEAD,
CPUHP_X86_APB_DEAD,
+ CPUHP_X86_MCE_DEAD,
CPUHP_VIRT_NET_DEAD,
CPUHP_SLUB_DEAD,
CPUHP_MM_WRITEBACK_DEAD,
+ CPUHP_MM_VMSTAT_DEAD,
CPUHP_SOFTIRQ_DEAD,
CPUHP_NET_MVNETA_DEAD,
CPUHP_CPUIDLE_DEAD,
@@ -30,6 +32,15 @@ enum cpuhp_state {
CPUHP_ACPI_CPUDRV_DEAD,
CPUHP_S390_PFAULT_DEAD,
CPUHP_BLK_MQ_DEAD,
+ CPUHP_FS_BUFF_DEAD,
+ CPUHP_PRINTK_DEAD,
+ CPUHP_MM_MEMCQ_DEAD,
+ CPUHP_PERCPU_CNT_DEAD,
+ CPUHP_RADIX_DEAD,
+ CPUHP_PAGE_ALLOC_DEAD,
+ CPUHP_NET_DEV_DEAD,
+ CPUHP_PCI_XGENE_DEAD,
+ CPUHP_IOMMU_INTEL_DEAD,
CPUHP_WORKQUEUE_PREP,
CPUHP_POWER_NUMA_PREPARE,
CPUHP_HRTIMERS_PREPARE,
@@ -49,6 +60,16 @@ enum cpuhp_state {
CPUHP_ARM_SHMOBILE_SCU_PREPARE,
CPUHP_SH_SH3X_PREPARE,
CPUHP_BLK_MQ_PREPARE,
+ CPUHP_NET_FLOW_PREPARE,
+ CPUHP_TOPOLOGY_PREPARE,
+ CPUHP_NET_IUCV_PREPARE,
+ CPUHP_ARM_BL_PREPARE,
+ CPUHP_TRACE_RB_PREPARE,
+ CPUHP_MM_ZS_PREPARE,
+ CPUHP_MM_ZSWP_MEM_PREPARE,
+ CPUHP_MM_ZSWP_POOL_PREPARE,
+ CPUHP_KVM_PPC_BOOK3S_PREPARE,
+ CPUHP_ZCOMP_PREPARE,
CPUHP_TIMERS_DEAD,
CPUHP_NOTF_ERR_INJ_PREPARE,
CPUHP_MIPS_SOC_PREPARE,
@@ -81,6 +102,7 @@ enum cpuhp_state {
CPUHP_AP_ARM_ARCH_TIMER_STARTING,
CPUHP_AP_ARM_GLOBAL_TIMER_STARTING,
CPUHP_AP_DUMMY_TIMER_STARTING,
+ CPUHP_AP_JCORE_TIMER_STARTING,
CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
CPUHP_AP_ARM_TWD_STARTING,
CPUHP_AP_METAG_TIMER_STARTING,
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index 4d3f0d1aec73..bf1907d96097 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -52,7 +52,8 @@ extern struct srcu_struct debugfs_srcu;
* Must only be called under the protection established by
* debugfs_use_file_start().
*/
-static inline const struct file_operations *debugfs_real_fops(struct file *filp)
+static inline const struct file_operations *
+debugfs_real_fops(const struct file *filp)
__must_hold(&debugfs_srcu)
{
/*
diff --git a/include/linux/devfreq_cooling.h b/include/linux/devfreq_cooling.h
index 7adf6cc4b305..c35d0c0e0ada 100644
--- a/include/linux/devfreq_cooling.h
+++ b/include/linux/devfreq_cooling.h
@@ -20,7 +20,6 @@
#include <linux/devfreq.h>
#include <linux/thermal.h>
-#ifdef CONFIG_DEVFREQ_THERMAL
/**
* struct devfreq_cooling_power - Devfreq cooling power ops
@@ -37,12 +36,16 @@
* @dyn_power_coeff * frequency * voltage^2
*/
struct devfreq_cooling_power {
- unsigned long (*get_static_power)(unsigned long voltage);
- unsigned long (*get_dynamic_power)(unsigned long freq,
+ unsigned long (*get_static_power)(struct devfreq *devfreq,
+ unsigned long voltage);
+ unsigned long (*get_dynamic_power)(struct devfreq *devfreq,
+ unsigned long freq,
unsigned long voltage);
unsigned long dyn_power_coeff;
};
+#ifdef CONFIG_DEVFREQ_THERMAL
+
struct thermal_cooling_device *
of_devfreq_cooling_register_power(struct device_node *np, struct devfreq *df,
struct devfreq_cooling_power *dfc_power);
diff --git a/include/linux/device.h b/include/linux/device.h
index bc41e87a969b..94926d3ad6c6 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -698,6 +698,25 @@ static inline int devm_add_action_or_reset(struct device *dev,
return ret;
}
+/**
+ * devm_alloc_percpu - Resource-managed alloc_percpu
+ * @dev: Device to allocate per-cpu memory for
+ * @type: Type to allocate per-cpu memory for
+ *
+ * Managed alloc_percpu. Per-cpu memory allocated with this function is
+ * automatically freed on driver detach.
+ *
+ * RETURNS:
+ * Pointer to allocated memory on success, NULL on failure.
+ */
+#define devm_alloc_percpu(dev, type) \
+ ((typeof(type) __percpu *)__devm_alloc_percpu((dev), sizeof(type), \
+ __alignof__(type)))
+
+void __percpu *__devm_alloc_percpu(struct device *dev, size_t size,
+ size_t align);
+void devm_free_percpu(struct device *dev, void __percpu *pdata);
+
struct device_dma_parameters {
/*
* a low level driver may set these to teach IOMMU code about
@@ -733,7 +752,7 @@ struct device_dma_parameters {
* minimizes board-specific #ifdefs in drivers.
* @driver_data: Private pointer for driver specific info.
* @power: For device power management.
- * See Documentation/power/devices.txt for details.
+ * See Documentation/power/admin-guide/devices.rst for details.
* @pm_domain: Provide callbacks that are executed during system suspend,
* hibernation, system resume and during runtime PM transitions
* along with subsystem-level and driver-level callbacks.
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index e0b0741ae671..8daeb3ce0016 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -30,7 +30,7 @@
#include <linux/list.h>
#include <linux/dma-mapping.h>
#include <linux/fs.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include <linux/wait.h>
struct device;
@@ -143,7 +143,7 @@ struct dma_buf {
wait_queue_head_t poll;
struct dma_buf_poll_cb_t {
- struct fence_cb cb;
+ struct dma_fence_cb cb;
wait_queue_head_t *poll;
unsigned long active;
diff --git a/include/linux/dma-fence-array.h b/include/linux/dma-fence-array.h
new file mode 100644
index 000000000000..5900945f962d
--- /dev/null
+++ b/include/linux/dma-fence-array.h
@@ -0,0 +1,86 @@
+/*
+ * fence-array: aggregates fence to be waited together
+ *
+ * Copyright (C) 2016 Collabora Ltd
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ * Authors:
+ * Gustavo Padovan <gustavo@padovan.org>
+ * Christian König <christian.koenig@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __LINUX_DMA_FENCE_ARRAY_H
+#define __LINUX_DMA_FENCE_ARRAY_H
+
+#include <linux/dma-fence.h>
+
+/**
+ * struct dma_fence_array_cb - callback helper for fence array
+ * @cb: fence callback structure for signaling
+ * @array: reference to the parent fence array object
+ */
+struct dma_fence_array_cb {
+ struct dma_fence_cb cb;
+ struct dma_fence_array *array;
+};
+
+/**
+ * struct dma_fence_array - fence to represent an array of fences
+ * @base: fence base class
+ * @lock: spinlock for fence handling
+ * @num_fences: number of fences in the array
+ * @num_pending: fences in the array still pending
+ * @fences: array of the fences
+ */
+struct dma_fence_array {
+ struct dma_fence base;
+
+ spinlock_t lock;
+ unsigned num_fences;
+ atomic_t num_pending;
+ struct dma_fence **fences;
+};
+
+extern const struct dma_fence_ops dma_fence_array_ops;
+
+/**
+ * dma_fence_is_array - check if a fence is from the array subsclass
+ * @fence: fence to test
+ *
+ * Return true if it is a dma_fence_array and false otherwise.
+ */
+static inline bool dma_fence_is_array(struct dma_fence *fence)
+{
+ return fence->ops == &dma_fence_array_ops;
+}
+
+/**
+ * to_dma_fence_array - cast a fence to a dma_fence_array
+ * @fence: fence to cast to a dma_fence_array
+ *
+ * Returns NULL if the fence is not a dma_fence_array,
+ * or the dma_fence_array otherwise.
+ */
+static inline struct dma_fence_array *
+to_dma_fence_array(struct dma_fence *fence)
+{
+ if (fence->ops != &dma_fence_array_ops)
+ return NULL;
+
+ return container_of(fence, struct dma_fence_array, base);
+}
+
+struct dma_fence_array *dma_fence_array_create(int num_fences,
+ struct dma_fence **fences,
+ u64 context, unsigned seqno,
+ bool signal_on_any);
+
+#endif /* __LINUX_DMA_FENCE_ARRAY_H */
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
new file mode 100644
index 000000000000..d51a7d23c358
--- /dev/null
+++ b/include/linux/dma-fence.h
@@ -0,0 +1,438 @@
+/*
+ * Fence mechanism for dma-buf to allow for asynchronous dma access
+ *
+ * Copyright (C) 2012 Canonical Ltd
+ * Copyright (C) 2012 Texas Instruments
+ *
+ * Authors:
+ * Rob Clark <robdclark@gmail.com>
+ * Maarten Lankhorst <maarten.lankhorst@canonical.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __LINUX_DMA_FENCE_H
+#define __LINUX_DMA_FENCE_H
+
+#include <linux/err.h>
+#include <linux/wait.h>
+#include <linux/list.h>
+#include <linux/bitops.h>
+#include <linux/kref.h>
+#include <linux/sched.h>
+#include <linux/printk.h>
+#include <linux/rcupdate.h>
+
+struct dma_fence;
+struct dma_fence_ops;
+struct dma_fence_cb;
+
+/**
+ * struct dma_fence - software synchronization primitive
+ * @refcount: refcount for this fence
+ * @ops: dma_fence_ops associated with this fence
+ * @rcu: used for releasing fence with kfree_rcu
+ * @cb_list: list of all callbacks to call
+ * @lock: spin_lock_irqsave used for locking
+ * @context: execution context this fence belongs to, returned by
+ * dma_fence_context_alloc()
+ * @seqno: the sequence number of this fence inside the execution context,
+ * can be compared to decide which fence would be signaled later.
+ * @flags: A mask of DMA_FENCE_FLAG_* defined below
+ * @timestamp: Timestamp when the fence was signaled.
+ * @status: Optional, only valid if < 0, must be set before calling
+ * dma_fence_signal, indicates that the fence has completed with an error.
+ *
+ * the flags member must be manipulated and read using the appropriate
+ * atomic ops (bit_*), so taking the spinlock will not be needed most
+ * of the time.
+ *
+ * DMA_FENCE_FLAG_SIGNALED_BIT - fence is already signaled
+ * DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT - enable_signaling might have been called
+ * DMA_FENCE_FLAG_USER_BITS - start of the unused bits, can be used by the
+ * implementer of the fence for its own purposes. Can be used in different
+ * ways by different fence implementers, so do not rely on this.
+ *
+ * Since atomic bitops are used, this is not guaranteed to be the case.
+ * Particularly, if the bit was set, but dma_fence_signal was called right
+ * before this bit was set, it would have been able to set the
+ * DMA_FENCE_FLAG_SIGNALED_BIT, before enable_signaling was called.
+ * Adding a check for DMA_FENCE_FLAG_SIGNALED_BIT after setting
+ * DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT closes this race, and makes sure that
+ * after dma_fence_signal was called, any enable_signaling call will have either
+ * been completed, or never called at all.
+ */
+struct dma_fence {
+ struct kref refcount;
+ const struct dma_fence_ops *ops;
+ struct rcu_head rcu;
+ struct list_head cb_list;
+ spinlock_t *lock;
+ u64 context;
+ unsigned seqno;
+ unsigned long flags;
+ ktime_t timestamp;
+ int status;
+};
+
+enum dma_fence_flag_bits {
+ DMA_FENCE_FLAG_SIGNALED_BIT,
+ DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+ DMA_FENCE_FLAG_USER_BITS, /* must always be last member */
+};
+
+typedef void (*dma_fence_func_t)(struct dma_fence *fence,
+ struct dma_fence_cb *cb);
+
+/**
+ * struct dma_fence_cb - callback for dma_fence_add_callback
+ * @node: used by dma_fence_add_callback to append this struct to fence::cb_list
+ * @func: dma_fence_func_t to call
+ *
+ * This struct will be initialized by dma_fence_add_callback, additional
+ * data can be passed along by embedding dma_fence_cb in another struct.
+ */
+struct dma_fence_cb {
+ struct list_head node;
+ dma_fence_func_t func;
+};
+
+/**
+ * struct dma_fence_ops - operations implemented for fence
+ * @get_driver_name: returns the driver name.
+ * @get_timeline_name: return the name of the context this fence belongs to.
+ * @enable_signaling: enable software signaling of fence.
+ * @signaled: [optional] peek whether the fence is signaled, can be null.
+ * @wait: custom wait implementation, or dma_fence_default_wait.
+ * @release: [optional] called on destruction of fence, can be null
+ * @fill_driver_data: [optional] callback to fill in free-form debug info
+ * Returns amount of bytes filled, or -errno.
+ * @fence_value_str: [optional] fills in the value of the fence as a string
+ * @timeline_value_str: [optional] fills in the current value of the timeline
+ * as a string
+ *
+ * Notes on enable_signaling:
+ * For fence implementations that have the capability for hw->hw
+ * signaling, they can implement this op to enable the necessary
+ * irqs, or insert commands into cmdstream, etc. This is called
+ * in the first wait() or add_callback() path to let the fence
+ * implementation know that there is another driver waiting on
+ * the signal (ie. hw->sw case).
+ *
+ * This function can be called called from atomic context, but not
+ * from irq context, so normal spinlocks can be used.
+ *
+ * A return value of false indicates the fence already passed,
+ * or some failure occurred that made it impossible to enable
+ * signaling. True indicates successful enabling.
+ *
+ * fence->status may be set in enable_signaling, but only when false is
+ * returned.
+ *
+ * Calling dma_fence_signal before enable_signaling is called allows
+ * for a tiny race window in which enable_signaling is called during,
+ * before, or after dma_fence_signal. To fight this, it is recommended
+ * that before enable_signaling returns true an extra reference is
+ * taken on the fence, to be released when the fence is signaled.
+ * This will mean dma_fence_signal will still be called twice, but
+ * the second time will be a noop since it was already signaled.
+ *
+ * Notes on signaled:
+ * May set fence->status if returning true.
+ *
+ * Notes on wait:
+ * Must not be NULL, set to dma_fence_default_wait for default implementation.
+ * the dma_fence_default_wait implementation should work for any fence, as long
+ * as enable_signaling works correctly.
+ *
+ * Must return -ERESTARTSYS if the wait is intr = true and the wait was
+ * interrupted, and remaining jiffies if fence has signaled, or 0 if wait
+ * timed out. Can also return other error values on custom implementations,
+ * which should be treated as if the fence is signaled. For example a hardware
+ * lockup could be reported like that.
+ *
+ * Notes on release:
+ * Can be NULL, this function allows additional commands to run on
+ * destruction of the fence. Can be called from irq context.
+ * If pointer is set to NULL, kfree will get called instead.
+ */
+
+struct dma_fence_ops {
+ const char * (*get_driver_name)(struct dma_fence *fence);
+ const char * (*get_timeline_name)(struct dma_fence *fence);
+ bool (*enable_signaling)(struct dma_fence *fence);
+ bool (*signaled)(struct dma_fence *fence);
+ signed long (*wait)(struct dma_fence *fence,
+ bool intr, signed long timeout);
+ void (*release)(struct dma_fence *fence);
+
+ int (*fill_driver_data)(struct dma_fence *fence, void *data, int size);
+ void (*fence_value_str)(struct dma_fence *fence, char *str, int size);
+ void (*timeline_value_str)(struct dma_fence *fence,
+ char *str, int size);
+};
+
+void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
+ spinlock_t *lock, u64 context, unsigned seqno);
+
+void dma_fence_release(struct kref *kref);
+void dma_fence_free(struct dma_fence *fence);
+
+/**
+ * dma_fence_put - decreases refcount of the fence
+ * @fence: [in] fence to reduce refcount of
+ */
+static inline void dma_fence_put(struct dma_fence *fence)
+{
+ if (fence)
+ kref_put(&fence->refcount, dma_fence_release);
+}
+
+/**
+ * dma_fence_get - increases refcount of the fence
+ * @fence: [in] fence to increase refcount of
+ *
+ * Returns the same fence, with refcount increased by 1.
+ */
+static inline struct dma_fence *dma_fence_get(struct dma_fence *fence)
+{
+ if (fence)
+ kref_get(&fence->refcount);
+ return fence;
+}
+
+/**
+ * dma_fence_get_rcu - get a fence from a reservation_object_list with
+ * rcu read lock
+ * @fence: [in] fence to increase refcount of
+ *
+ * Function returns NULL if no refcount could be obtained, or the fence.
+ */
+static inline struct dma_fence *dma_fence_get_rcu(struct dma_fence *fence)
+{
+ if (kref_get_unless_zero(&fence->refcount))
+ return fence;
+ else
+ return NULL;
+}
+
+/**
+ * dma_fence_get_rcu_safe - acquire a reference to an RCU tracked fence
+ * @fencep: [in] pointer to fence to increase refcount of
+ *
+ * Function returns NULL if no refcount could be obtained, or the fence.
+ * This function handles acquiring a reference to a fence that may be
+ * reallocated within the RCU grace period (such as with SLAB_DESTROY_BY_RCU),
+ * so long as the caller is using RCU on the pointer to the fence.
+ *
+ * An alternative mechanism is to employ a seqlock to protect a bunch of
+ * fences, such as used by struct reservation_object. When using a seqlock,
+ * the seqlock must be taken before and checked after a reference to the
+ * fence is acquired (as shown here).
+ *
+ * The caller is required to hold the RCU read lock.
+ */
+static inline struct dma_fence *
+dma_fence_get_rcu_safe(struct dma_fence * __rcu *fencep)
+{
+ do {
+ struct dma_fence *fence;
+
+ fence = rcu_dereference(*fencep);
+ if (!fence || !dma_fence_get_rcu(fence))
+ return NULL;
+
+ /* The atomic_inc_not_zero() inside dma_fence_get_rcu()
+ * provides a full memory barrier upon success (such as now).
+ * This is paired with the write barrier from assigning
+ * to the __rcu protected fence pointer so that if that
+ * pointer still matches the current fence, we know we
+ * have successfully acquire a reference to it. If it no
+ * longer matches, we are holding a reference to some other
+ * reallocated pointer. This is possible if the allocator
+ * is using a freelist like SLAB_DESTROY_BY_RCU where the
+ * fence remains valid for the RCU grace period, but it
+ * may be reallocated. When using such allocators, we are
+ * responsible for ensuring the reference we get is to
+ * the right fence, as below.
+ */
+ if (fence == rcu_access_pointer(*fencep))
+ return rcu_pointer_handoff(fence);
+
+ dma_fence_put(fence);
+ } while (1);
+}
+
+int dma_fence_signal(struct dma_fence *fence);
+int dma_fence_signal_locked(struct dma_fence *fence);
+signed long dma_fence_default_wait(struct dma_fence *fence,
+ bool intr, signed long timeout);
+int dma_fence_add_callback(struct dma_fence *fence,
+ struct dma_fence_cb *cb,
+ dma_fence_func_t func);
+bool dma_fence_remove_callback(struct dma_fence *fence,
+ struct dma_fence_cb *cb);
+void dma_fence_enable_sw_signaling(struct dma_fence *fence);
+
+/**
+ * dma_fence_is_signaled_locked - Return an indication if the fence
+ * is signaled yet.
+ * @fence: [in] the fence to check
+ *
+ * Returns true if the fence was already signaled, false if not. Since this
+ * function doesn't enable signaling, it is not guaranteed to ever return
+ * true if dma_fence_add_callback, dma_fence_wait or
+ * dma_fence_enable_sw_signaling haven't been called before.
+ *
+ * This function requires fence->lock to be held.
+ */
+static inline bool
+dma_fence_is_signaled_locked(struct dma_fence *fence)
+{
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ return true;
+
+ if (fence->ops->signaled && fence->ops->signaled(fence)) {
+ dma_fence_signal_locked(fence);
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * dma_fence_is_signaled - Return an indication if the fence is signaled yet.
+ * @fence: [in] the fence to check
+ *
+ * Returns true if the fence was already signaled, false if not. Since this
+ * function doesn't enable signaling, it is not guaranteed to ever return
+ * true if dma_fence_add_callback, dma_fence_wait or
+ * dma_fence_enable_sw_signaling haven't been called before.
+ *
+ * It's recommended for seqno fences to call dma_fence_signal when the
+ * operation is complete, it makes it possible to prevent issues from
+ * wraparound between time of issue and time of use by checking the return
+ * value of this function before calling hardware-specific wait instructions.
+ */
+static inline bool
+dma_fence_is_signaled(struct dma_fence *fence)
+{
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ return true;
+
+ if (fence->ops->signaled && fence->ops->signaled(fence)) {
+ dma_fence_signal(fence);
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * dma_fence_is_later - return if f1 is chronologically later than f2
+ * @f1: [in] the first fence from the same context
+ * @f2: [in] the second fence from the same context
+ *
+ * Returns true if f1 is chronologically later than f2. Both fences must be
+ * from the same context, since a seqno is not re-used across contexts.
+ */
+static inline bool dma_fence_is_later(struct dma_fence *f1,
+ struct dma_fence *f2)
+{
+ if (WARN_ON(f1->context != f2->context))
+ return false;
+
+ return (int)(f1->seqno - f2->seqno) > 0;
+}
+
+/**
+ * dma_fence_later - return the chronologically later fence
+ * @f1: [in] the first fence from the same context
+ * @f2: [in] the second fence from the same context
+ *
+ * Returns NULL if both fences are signaled, otherwise the fence that would be
+ * signaled last. Both fences must be from the same context, since a seqno is
+ * not re-used across contexts.
+ */
+static inline struct dma_fence *dma_fence_later(struct dma_fence *f1,
+ struct dma_fence *f2)
+{
+ if (WARN_ON(f1->context != f2->context))
+ return NULL;
+
+ /*
+ * Can't check just DMA_FENCE_FLAG_SIGNALED_BIT here, it may never
+ * have been set if enable_signaling wasn't called, and enabling that
+ * here is overkill.
+ */
+ if (dma_fence_is_later(f1, f2))
+ return dma_fence_is_signaled(f1) ? NULL : f1;
+ else
+ return dma_fence_is_signaled(f2) ? NULL : f2;
+}
+
+signed long dma_fence_wait_timeout(struct dma_fence *,
+ bool intr, signed long timeout);
+signed long dma_fence_wait_any_timeout(struct dma_fence **fences,
+ uint32_t count,
+ bool intr, signed long timeout,
+ uint32_t *idx);
+
+/**
+ * dma_fence_wait - sleep until the fence gets signaled
+ * @fence: [in] the fence to wait on
+ * @intr: [in] if true, do an interruptible wait
+ *
+ * This function will return -ERESTARTSYS if interrupted by a signal,
+ * or 0 if the fence was signaled. Other error values may be
+ * returned on custom implementations.
+ *
+ * Performs a synchronous wait on this fence. It is assumed the caller
+ * directly or indirectly holds a reference to the fence, otherwise the
+ * fence might be freed before return, resulting in undefined behavior.
+ */
+static inline signed long dma_fence_wait(struct dma_fence *fence, bool intr)
+{
+ signed long ret;
+
+ /* Since dma_fence_wait_timeout cannot timeout with
+ * MAX_SCHEDULE_TIMEOUT, only valid return values are
+ * -ERESTARTSYS and MAX_SCHEDULE_TIMEOUT.
+ */
+ ret = dma_fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT);
+
+ return ret < 0 ? ret : 0;
+}
+
+u64 dma_fence_context_alloc(unsigned num);
+
+#define DMA_FENCE_TRACE(f, fmt, args...) \
+ do { \
+ struct dma_fence *__ff = (f); \
+ if (IS_ENABLED(CONFIG_DMA_FENCE_TRACE)) \
+ pr_info("f %llu#%u: " fmt, \
+ __ff->context, __ff->seqno, ##args); \
+ } while (0)
+
+#define DMA_FENCE_WARN(f, fmt, args...) \
+ do { \
+ struct dma_fence *__ff = (f); \
+ pr_warn("f %llu#%u: " fmt, __ff->context, __ff->seqno, \
+ ##args); \
+ } while (0)
+
+#define DMA_FENCE_ERR(f, fmt, args...) \
+ do { \
+ struct dma_fence *__ff = (f); \
+ pr_err("f %llu#%u: " fmt, __ff->context, __ff->seqno, \
+ ##args); \
+ } while (0)
+
+#endif /* __LINUX_DMA_FENCE_H */
diff --git a/include/linux/drbd_genl.h b/include/linux/drbd_genl.h
index c934d3a96b5e..2896f93808ae 100644
--- a/include/linux/drbd_genl.h
+++ b/include/linux/drbd_genl.h
@@ -67,7 +67,7 @@
* genl_magic_func.h
* generates an entry in the static genl_ops array,
* and static register/unregister functions to
- * genl_register_family_with_ops().
+ * genl_register_family().
*
* flags and handler:
* GENL_op_init( .doit = x, .dumpit = y, .flags = something)
diff --git a/include/linux/edac.h b/include/linux/edac.h
index 9e0d78966552..cb56dcba68c6 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -134,6 +134,7 @@ enum dev_type {
enum hw_event_mc_err_type {
HW_EVENT_ERR_CORRECTED,
HW_EVENT_ERR_UNCORRECTED,
+ HW_EVENT_ERR_DEFERRED,
HW_EVENT_ERR_FATAL,
HW_EVENT_ERR_INFO,
};
@@ -145,6 +146,8 @@ static inline char *mc_event_error_type(const unsigned int err_type)
return "Corrected";
case HW_EVENT_ERR_UNCORRECTED:
return "Uncorrected";
+ case HW_EVENT_ERR_DEFERRED:
+ return "Deferred";
case HW_EVENT_ERR_FATAL:
return "Fatal";
default:
@@ -192,10 +195,11 @@ static inline char *mc_event_error_type(const unsigned int err_type)
* @MEM_DDR3: DDR3 RAM
* @MEM_RDDR3: Registered DDR3 RAM
* This is a variant of the DDR3 memories.
- * @MEM_LRDDR3 Load-Reduced DDR3 memory.
+ * @MEM_LRDDR3: Load-Reduced DDR3 memory.
* @MEM_DDR4: Unbuffered DDR4 RAM
* @MEM_RDDR4: Registered DDR4 RAM
* This is a variant of the DDR4 memories.
+ * @MEM_LRDDR4: Load-Reduced DDR4 memory.
*/
enum mem_type {
MEM_EMPTY = 0,
@@ -218,6 +222,7 @@ enum mem_type {
MEM_LRDDR3,
MEM_DDR4,
MEM_RDDR4,
+ MEM_LRDDR4,
};
#define MEM_FLAG_EMPTY BIT(MEM_EMPTY)
@@ -239,6 +244,7 @@ enum mem_type {
#define MEM_FLAG_RDDR3 BIT(MEM_RDDR3)
#define MEM_FLAG_DDR4 BIT(MEM_DDR4)
#define MEM_FLAG_RDDR4 BIT(MEM_RDDR4)
+#define MEM_FLAG_LRDDR4 BIT(MEM_LRDDR4)
/**
* enum edac-type - Error Detection and Correction capabilities and mode
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 2d089487d2da..a07a476178cd 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -443,6 +443,22 @@ typedef struct {
#define EFI_PCI_IO_ATTRIBUTE_VGA_PALETTE_IO_16 0x20000
#define EFI_PCI_IO_ATTRIBUTE_VGA_IO_16 0x40000
+typedef struct {
+ u32 version;
+ u32 get;
+ u32 set;
+ u32 del;
+ u32 get_all;
+} apple_properties_protocol_32_t;
+
+typedef struct {
+ u64 version;
+ u64 get;
+ u64 set;
+ u64 del;
+ u64 get_all;
+} apple_properties_protocol_64_t;
+
/*
* Types and defines for EFI ResetSystem
*/
@@ -589,8 +605,10 @@ void efi_native_runtime_setup(void);
#define DEVICE_TREE_GUID EFI_GUID(0xb1b621d5, 0xf19c, 0x41a5, 0x83, 0x0b, 0xd9, 0x15, 0x2c, 0x69, 0xaa, 0xe0)
#define EFI_PROPERTIES_TABLE_GUID EFI_GUID(0x880aaca3, 0x4adc, 0x4a04, 0x90, 0x79, 0xb7, 0x47, 0x34, 0x08, 0x25, 0xe5)
#define EFI_RNG_PROTOCOL_GUID EFI_GUID(0x3152bca5, 0xeade, 0x433d, 0x86, 0x2e, 0xc0, 0x1c, 0xdc, 0x29, 0x1f, 0x44)
+#define EFI_RNG_ALGORITHM_RAW EFI_GUID(0xe43176d7, 0xb6e8, 0x4827, 0xb7, 0x84, 0x7f, 0xfd, 0xc4, 0xb6, 0x85, 0x61)
#define EFI_MEMORY_ATTRIBUTES_TABLE_GUID EFI_GUID(0xdcfa911d, 0x26eb, 0x469f, 0xa2, 0x20, 0x38, 0xb7, 0xdc, 0x46, 0x12, 0x20)
#define EFI_CONSOLE_OUT_DEVICE_GUID EFI_GUID(0xd3b36f2c, 0xd551, 0x11d4, 0x9a, 0x46, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
+#define APPLE_PROPERTIES_PROTOCOL_GUID EFI_GUID(0x91bd12fe, 0xf6c3, 0x44fb, 0xa5, 0xb7, 0x51, 0x22, 0xab, 0x30, 0x3a, 0xe0)
/*
* This GUID is used to pass to the kernel proper the struct screen_info
@@ -599,6 +617,7 @@ void efi_native_runtime_setup(void);
*/
#define LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID EFI_GUID(0xe03fc20a, 0x85dc, 0x406e, 0xb9, 0x0e, 0x4a, 0xb5, 0x02, 0x37, 0x1d, 0x95)
#define LINUX_EFI_LOADER_ENTRY_GUID EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf, 0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f)
+#define LINUX_EFI_RANDOM_SEED_TABLE_GUID EFI_GUID(0x1ce1e5bc, 0x7ceb, 0x42f2, 0x81, 0xe5, 0x8a, 0xad, 0xf1, 0x80, 0xf5, 0x7b)
typedef struct {
efi_guid_t guid;
@@ -872,6 +891,7 @@ extern struct efi {
unsigned long esrt; /* ESRT table */
unsigned long properties_table; /* properties table */
unsigned long mem_attr_table; /* memory attributes table */
+ unsigned long rng_seed; /* UEFI firmware random seed */
efi_get_time_t *get_time;
efi_set_time_t *set_time;
efi_get_wakeup_time_t *get_wakeup_time;
@@ -1145,6 +1165,26 @@ struct efi_generic_dev_path {
u16 length;
} __attribute ((packed));
+struct efi_dev_path {
+ u8 type; /* can be replaced with unnamed */
+ u8 sub_type; /* struct efi_generic_dev_path; */
+ u16 length; /* once we've moved to -std=c11 */
+ union {
+ struct {
+ u32 hid;
+ u32 uid;
+ } acpi;
+ struct {
+ u8 fn;
+ u8 dev;
+ } pci;
+ };
+} __attribute ((packed));
+
+#if IS_ENABLED(CONFIG_EFI_DEV_PATH_PARSER)
+struct device *efi_get_device_by_path(struct efi_dev_path **node, size_t *len);
+#endif
+
static inline void memrange_efi_to_native(u64 *addr, u64 *npages)
{
*npages = PFN_UP(*addr + (*npages<<EFI_PAGE_SHIFT)) - PFN_DOWN(*addr);
@@ -1493,4 +1533,10 @@ efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table,
struct efi_boot_memmap *map,
void *priv,
efi_exit_boot_map_processing priv_func);
+
+struct linux_efi_random_seed {
+ u32 size;
+ u8 bits[];
+};
+
#endif /* _LINUX_EFI_H */
diff --git a/include/linux/fddidevice.h b/include/linux/fddidevice.h
index 9a79f0106da1..32c22cfb238b 100644
--- a/include/linux/fddidevice.h
+++ b/include/linux/fddidevice.h
@@ -26,7 +26,6 @@
#ifdef __KERNEL__
__be16 fddi_type_trans(struct sk_buff *skb, struct net_device *dev);
-int fddi_change_mtu(struct net_device *dev, int new_mtu);
struct net_device *alloc_fddidev(int sizeof_priv);
#endif
diff --git a/include/linux/fence-array.h b/include/linux/fence-array.h
deleted file mode 100644
index a44794e508df..000000000000
--- a/include/linux/fence-array.h
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * fence-array: aggregates fence to be waited together
- *
- * Copyright (C) 2016 Collabora Ltd
- * Copyright (C) 2016 Advanced Micro Devices, Inc.
- * Authors:
- * Gustavo Padovan <gustavo@padovan.org>
- * Christian König <christian.koenig@amd.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- */
-
-#ifndef __LINUX_FENCE_ARRAY_H
-#define __LINUX_FENCE_ARRAY_H
-
-#include <linux/fence.h>
-
-/**
- * struct fence_array_cb - callback helper for fence array
- * @cb: fence callback structure for signaling
- * @array: reference to the parent fence array object
- */
-struct fence_array_cb {
- struct fence_cb cb;
- struct fence_array *array;
-};
-
-/**
- * struct fence_array - fence to represent an array of fences
- * @base: fence base class
- * @lock: spinlock for fence handling
- * @num_fences: number of fences in the array
- * @num_pending: fences in the array still pending
- * @fences: array of the fences
- */
-struct fence_array {
- struct fence base;
-
- spinlock_t lock;
- unsigned num_fences;
- atomic_t num_pending;
- struct fence **fences;
-};
-
-extern const struct fence_ops fence_array_ops;
-
-/**
- * fence_is_array - check if a fence is from the array subsclass
- *
- * Return true if it is a fence_array and false otherwise.
- */
-static inline bool fence_is_array(struct fence *fence)
-{
- return fence->ops == &fence_array_ops;
-}
-
-/**
- * to_fence_array - cast a fence to a fence_array
- * @fence: fence to cast to a fence_array
- *
- * Returns NULL if the fence is not a fence_array,
- * or the fence_array otherwise.
- */
-static inline struct fence_array *to_fence_array(struct fence *fence)
-{
- if (fence->ops != &fence_array_ops)
- return NULL;
-
- return container_of(fence, struct fence_array, base);
-}
-
-struct fence_array *fence_array_create(int num_fences, struct fence **fences,
- u64 context, unsigned seqno,
- bool signal_on_any);
-
-#endif /* __LINUX_FENCE_ARRAY_H */
diff --git a/include/linux/fence.h b/include/linux/fence.h
deleted file mode 100644
index 0d763053f97a..000000000000
--- a/include/linux/fence.h
+++ /dev/null
@@ -1,378 +0,0 @@
-/*
- * Fence mechanism for dma-buf to allow for asynchronous dma access
- *
- * Copyright (C) 2012 Canonical Ltd
- * Copyright (C) 2012 Texas Instruments
- *
- * Authors:
- * Rob Clark <robdclark@gmail.com>
- * Maarten Lankhorst <maarten.lankhorst@canonical.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- */
-
-#ifndef __LINUX_FENCE_H
-#define __LINUX_FENCE_H
-
-#include <linux/err.h>
-#include <linux/wait.h>
-#include <linux/list.h>
-#include <linux/bitops.h>
-#include <linux/kref.h>
-#include <linux/sched.h>
-#include <linux/printk.h>
-#include <linux/rcupdate.h>
-
-struct fence;
-struct fence_ops;
-struct fence_cb;
-
-/**
- * struct fence - software synchronization primitive
- * @refcount: refcount for this fence
- * @ops: fence_ops associated with this fence
- * @rcu: used for releasing fence with kfree_rcu
- * @cb_list: list of all callbacks to call
- * @lock: spin_lock_irqsave used for locking
- * @context: execution context this fence belongs to, returned by
- * fence_context_alloc()
- * @seqno: the sequence number of this fence inside the execution context,
- * can be compared to decide which fence would be signaled later.
- * @flags: A mask of FENCE_FLAG_* defined below
- * @timestamp: Timestamp when the fence was signaled.
- * @status: Optional, only valid if < 0, must be set before calling
- * fence_signal, indicates that the fence has completed with an error.
- *
- * the flags member must be manipulated and read using the appropriate
- * atomic ops (bit_*), so taking the spinlock will not be needed most
- * of the time.
- *
- * FENCE_FLAG_SIGNALED_BIT - fence is already signaled
- * FENCE_FLAG_ENABLE_SIGNAL_BIT - enable_signaling might have been called*
- * FENCE_FLAG_USER_BITS - start of the unused bits, can be used by the
- * implementer of the fence for its own purposes. Can be used in different
- * ways by different fence implementers, so do not rely on this.
- *
- * Since atomic bitops are used, this is not guaranteed to be the case.
- * Particularly, if the bit was set, but fence_signal was called right
- * before this bit was set, it would have been able to set the
- * FENCE_FLAG_SIGNALED_BIT, before enable_signaling was called.
- * Adding a check for FENCE_FLAG_SIGNALED_BIT after setting
- * FENCE_FLAG_ENABLE_SIGNAL_BIT closes this race, and makes sure that
- * after fence_signal was called, any enable_signaling call will have either
- * been completed, or never called at all.
- */
-struct fence {
- struct kref refcount;
- const struct fence_ops *ops;
- struct rcu_head rcu;
- struct list_head cb_list;
- spinlock_t *lock;
- u64 context;
- unsigned seqno;
- unsigned long flags;
- ktime_t timestamp;
- int status;
-};
-
-enum fence_flag_bits {
- FENCE_FLAG_SIGNALED_BIT,
- FENCE_FLAG_ENABLE_SIGNAL_BIT,
- FENCE_FLAG_USER_BITS, /* must always be last member */
-};
-
-typedef void (*fence_func_t)(struct fence *fence, struct fence_cb *cb);
-
-/**
- * struct fence_cb - callback for fence_add_callback
- * @node: used by fence_add_callback to append this struct to fence::cb_list
- * @func: fence_func_t to call
- *
- * This struct will be initialized by fence_add_callback, additional
- * data can be passed along by embedding fence_cb in another struct.
- */
-struct fence_cb {
- struct list_head node;
- fence_func_t func;
-};
-
-/**
- * struct fence_ops - operations implemented for fence
- * @get_driver_name: returns the driver name.
- * @get_timeline_name: return the name of the context this fence belongs to.
- * @enable_signaling: enable software signaling of fence.
- * @signaled: [optional] peek whether the fence is signaled, can be null.
- * @wait: custom wait implementation, or fence_default_wait.
- * @release: [optional] called on destruction of fence, can be null
- * @fill_driver_data: [optional] callback to fill in free-form debug info
- * Returns amount of bytes filled, or -errno.
- * @fence_value_str: [optional] fills in the value of the fence as a string
- * @timeline_value_str: [optional] fills in the current value of the timeline
- * as a string
- *
- * Notes on enable_signaling:
- * For fence implementations that have the capability for hw->hw
- * signaling, they can implement this op to enable the necessary
- * irqs, or insert commands into cmdstream, etc. This is called
- * in the first wait() or add_callback() path to let the fence
- * implementation know that there is another driver waiting on
- * the signal (ie. hw->sw case).
- *
- * This function can be called called from atomic context, but not
- * from irq context, so normal spinlocks can be used.
- *
- * A return value of false indicates the fence already passed,
- * or some failure occurred that made it impossible to enable
- * signaling. True indicates successful enabling.
- *
- * fence->status may be set in enable_signaling, but only when false is
- * returned.
- *
- * Calling fence_signal before enable_signaling is called allows
- * for a tiny race window in which enable_signaling is called during,
- * before, or after fence_signal. To fight this, it is recommended
- * that before enable_signaling returns true an extra reference is
- * taken on the fence, to be released when the fence is signaled.
- * This will mean fence_signal will still be called twice, but
- * the second time will be a noop since it was already signaled.
- *
- * Notes on signaled:
- * May set fence->status if returning true.
- *
- * Notes on wait:
- * Must not be NULL, set to fence_default_wait for default implementation.
- * the fence_default_wait implementation should work for any fence, as long
- * as enable_signaling works correctly.
- *
- * Must return -ERESTARTSYS if the wait is intr = true and the wait was
- * interrupted, and remaining jiffies if fence has signaled, or 0 if wait
- * timed out. Can also return other error values on custom implementations,
- * which should be treated as if the fence is signaled. For example a hardware
- * lockup could be reported like that.
- *
- * Notes on release:
- * Can be NULL, this function allows additional commands to run on
- * destruction of the fence. Can be called from irq context.
- * If pointer is set to NULL, kfree will get called instead.
- */
-
-struct fence_ops {
- const char * (*get_driver_name)(struct fence *fence);
- const char * (*get_timeline_name)(struct fence *fence);
- bool (*enable_signaling)(struct fence *fence);
- bool (*signaled)(struct fence *fence);
- signed long (*wait)(struct fence *fence, bool intr, signed long timeout);
- void (*release)(struct fence *fence);
-
- int (*fill_driver_data)(struct fence *fence, void *data, int size);
- void (*fence_value_str)(struct fence *fence, char *str, int size);
- void (*timeline_value_str)(struct fence *fence, char *str, int size);
-};
-
-void fence_init(struct fence *fence, const struct fence_ops *ops,
- spinlock_t *lock, u64 context, unsigned seqno);
-
-void fence_release(struct kref *kref);
-void fence_free(struct fence *fence);
-
-/**
- * fence_get - increases refcount of the fence
- * @fence: [in] fence to increase refcount of
- *
- * Returns the same fence, with refcount increased by 1.
- */
-static inline struct fence *fence_get(struct fence *fence)
-{
- if (fence)
- kref_get(&fence->refcount);
- return fence;
-}
-
-/**
- * fence_get_rcu - get a fence from a reservation_object_list with rcu read lock
- * @fence: [in] fence to increase refcount of
- *
- * Function returns NULL if no refcount could be obtained, or the fence.
- */
-static inline struct fence *fence_get_rcu(struct fence *fence)
-{
- if (kref_get_unless_zero(&fence->refcount))
- return fence;
- else
- return NULL;
-}
-
-/**
- * fence_put - decreases refcount of the fence
- * @fence: [in] fence to reduce refcount of
- */
-static inline void fence_put(struct fence *fence)
-{
- if (fence)
- kref_put(&fence->refcount, fence_release);
-}
-
-int fence_signal(struct fence *fence);
-int fence_signal_locked(struct fence *fence);
-signed long fence_default_wait(struct fence *fence, bool intr, signed long timeout);
-int fence_add_callback(struct fence *fence, struct fence_cb *cb,
- fence_func_t func);
-bool fence_remove_callback(struct fence *fence, struct fence_cb *cb);
-void fence_enable_sw_signaling(struct fence *fence);
-
-/**
- * fence_is_signaled_locked - Return an indication if the fence is signaled yet.
- * @fence: [in] the fence to check
- *
- * Returns true if the fence was already signaled, false if not. Since this
- * function doesn't enable signaling, it is not guaranteed to ever return
- * true if fence_add_callback, fence_wait or fence_enable_sw_signaling
- * haven't been called before.
- *
- * This function requires fence->lock to be held.
- */
-static inline bool
-fence_is_signaled_locked(struct fence *fence)
-{
- if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
- return true;
-
- if (fence->ops->signaled && fence->ops->signaled(fence)) {
- fence_signal_locked(fence);
- return true;
- }
-
- return false;
-}
-
-/**
- * fence_is_signaled - Return an indication if the fence is signaled yet.
- * @fence: [in] the fence to check
- *
- * Returns true if the fence was already signaled, false if not. Since this
- * function doesn't enable signaling, it is not guaranteed to ever return
- * true if fence_add_callback, fence_wait or fence_enable_sw_signaling
- * haven't been called before.
- *
- * It's recommended for seqno fences to call fence_signal when the
- * operation is complete, it makes it possible to prevent issues from
- * wraparound between time of issue and time of use by checking the return
- * value of this function before calling hardware-specific wait instructions.
- */
-static inline bool
-fence_is_signaled(struct fence *fence)
-{
- if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
- return true;
-
- if (fence->ops->signaled && fence->ops->signaled(fence)) {
- fence_signal(fence);
- return true;
- }
-
- return false;
-}
-
-/**
- * fence_is_later - return if f1 is chronologically later than f2
- * @f1: [in] the first fence from the same context
- * @f2: [in] the second fence from the same context
- *
- * Returns true if f1 is chronologically later than f2. Both fences must be
- * from the same context, since a seqno is not re-used across contexts.
- */
-static inline bool fence_is_later(struct fence *f1, struct fence *f2)
-{
- if (WARN_ON(f1->context != f2->context))
- return false;
-
- return (int)(f1->seqno - f2->seqno) > 0;
-}
-
-/**
- * fence_later - return the chronologically later fence
- * @f1: [in] the first fence from the same context
- * @f2: [in] the second fence from the same context
- *
- * Returns NULL if both fences are signaled, otherwise the fence that would be
- * signaled last. Both fences must be from the same context, since a seqno is
- * not re-used across contexts.
- */
-static inline struct fence *fence_later(struct fence *f1, struct fence *f2)
-{
- if (WARN_ON(f1->context != f2->context))
- return NULL;
-
- /*
- * can't check just FENCE_FLAG_SIGNALED_BIT here, it may never have been
- * set if enable_signaling wasn't called, and enabling that here is
- * overkill.
- */
- if (fence_is_later(f1, f2))
- return fence_is_signaled(f1) ? NULL : f1;
- else
- return fence_is_signaled(f2) ? NULL : f2;
-}
-
-signed long fence_wait_timeout(struct fence *, bool intr, signed long timeout);
-signed long fence_wait_any_timeout(struct fence **fences, uint32_t count,
- bool intr, signed long timeout);
-
-/**
- * fence_wait - sleep until the fence gets signaled
- * @fence: [in] the fence to wait on
- * @intr: [in] if true, do an interruptible wait
- *
- * This function will return -ERESTARTSYS if interrupted by a signal,
- * or 0 if the fence was signaled. Other error values may be
- * returned on custom implementations.
- *
- * Performs a synchronous wait on this fence. It is assumed the caller
- * directly or indirectly holds a reference to the fence, otherwise the
- * fence might be freed before return, resulting in undefined behavior.
- */
-static inline signed long fence_wait(struct fence *fence, bool intr)
-{
- signed long ret;
-
- /* Since fence_wait_timeout cannot timeout with
- * MAX_SCHEDULE_TIMEOUT, only valid return values are
- * -ERESTARTSYS and MAX_SCHEDULE_TIMEOUT.
- */
- ret = fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT);
-
- return ret < 0 ? ret : 0;
-}
-
-u64 fence_context_alloc(unsigned num);
-
-#define FENCE_TRACE(f, fmt, args...) \
- do { \
- struct fence *__ff = (f); \
- if (IS_ENABLED(CONFIG_FENCE_TRACE)) \
- pr_info("f %llu#%u: " fmt, \
- __ff->context, __ff->seqno, ##args); \
- } while (0)
-
-#define FENCE_WARN(f, fmt, args...) \
- do { \
- struct fence *__ff = (f); \
- pr_warn("f %llu#%u: " fmt, __ff->context, __ff->seqno, \
- ##args); \
- } while (0)
-
-#define FENCE_ERR(f, fmt, args...) \
- do { \
- struct fence *__ff = (f); \
- pr_err("f %llu#%u: " fmt, __ff->context, __ff->seqno, \
- ##args); \
- } while (0)
-
-#endif /* __LINUX_FENCE_H */
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 1f09c521adfe..6a1658308612 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -14,6 +14,7 @@
#include <linux/workqueue.h>
#include <linux/sched.h>
#include <linux/capability.h>
+#include <linux/cryptohash.h>
#include <net/sch_generic.h>
@@ -56,6 +57,9 @@ struct bpf_prog_aux;
/* BPF program can access up to 512 bytes of stack space. */
#define MAX_BPF_STACK 512
+/* Maximum BPF program size in bytes. */
+#define MAX_BPF_SIZE (BPF_MAXINSNS * sizeof(struct bpf_insn))
+
/* Helper macros for filter block array initializers. */
/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
@@ -402,14 +406,16 @@ struct bpf_prog {
u16 jited:1, /* Is our filter JIT'ed? */
gpl_compatible:1, /* Is filter GPL compatible? */
cb_access:1, /* Is control block accessed? */
- dst_needed:1; /* Do we need dst entry? */
+ dst_needed:1, /* Do we need dst entry? */
+ xdp_adjust_head:1; /* Adjusting pkt head? */
kmemcheck_bitfield_end(meta);
- u32 len; /* Number of filter blocks */
enum bpf_prog_type type; /* Type of BPF program */
+ u32 len; /* Number of filter blocks */
+ u32 digest[SHA_DIGEST_WORDS]; /* Program digest */
struct bpf_prog_aux *aux; /* Auxiliary fields */
struct sock_fprog_kern *orig_prog; /* Original BPF program */
- unsigned int (*bpf_func)(const struct sk_buff *skb,
- const struct bpf_insn *filter);
+ unsigned int (*bpf_func)(const void *ctx,
+ const struct bpf_insn *insn);
/* Instructions for interpreter */
union {
struct sock_filter insns[0];
@@ -435,10 +441,11 @@ struct bpf_skb_data_end {
struct xdp_buff {
void *data;
void *data_end;
+ void *data_hard_start;
};
/* compute the linear packet data range [data, data_end) which
- * will be accessed by cls_bpf and act_bpf programs
+ * will be accessed by cls_bpf, act_bpf and lwt programs
*/
static inline void bpf_compute_data_end(struct sk_buff *skb)
{
@@ -498,16 +505,16 @@ static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog,
return BPF_PROG_RUN(prog, skb);
}
-static inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
- struct xdp_buff *xdp)
+static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
+ struct xdp_buff *xdp)
{
- u32 ret;
-
- rcu_read_lock();
- ret = BPF_PROG_RUN(prog, (void *)xdp);
- rcu_read_unlock();
-
- return ret;
+ /* Caller needs to hold rcu_read_lock() (!), otherwise program
+ * can be released while still running, or map elements could be
+ * freed early while still having concurrent users. XDP fastpath
+ * already takes rcu_read_lock() when fetching the program, so
+ * it's not necessary here anymore.
+ */
+ return BPF_PROG_RUN(prog, xdp);
}
static inline unsigned int bpf_prog_size(unsigned int proglen)
@@ -590,7 +597,7 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
-bool bpf_helper_changes_skb_data(void *func);
+bool bpf_helper_changes_pkt_data(void *func);
struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
const struct bpf_insn *patch, u32 len);
diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
index c46d2aa16d81..1d18af034554 100644
--- a/include/linux/frontswap.h
+++ b/include/linux/frontswap.h
@@ -106,8 +106,9 @@ static inline void frontswap_invalidate_area(unsigned type)
static inline void frontswap_init(unsigned type, unsigned long *map)
{
- if (frontswap_enabled())
- __frontswap_init(type, map);
+#ifdef CONFIG_FRONTSWAP
+ __frontswap_init(type, map);
+#endif
}
#endif /* _LINUX_FRONTSWAP_H */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 8533e9d59c29..b84230e070be 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -269,6 +269,7 @@ struct writeback_control;
#define IOCB_HIPRI (1 << 3)
#define IOCB_DSYNC (1 << 4)
#define IOCB_SYNC (1 << 5)
+#define IOCB_WRITE (1 << 6)
struct kiocb {
struct file *ki_filp;
@@ -1657,7 +1658,6 @@ struct file_operations {
int (*flush) (struct file *, fl_owner_t id);
int (*release) (struct inode *, struct file *);
int (*fsync) (struct file *, loff_t, loff_t, int datasync);
- int (*aio_fsync) (struct kiocb *, int datasync);
int (*fasync) (int, struct file *, int);
int (*lock) (struct file *, int, struct file_lock *);
ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int);
diff --git a/include/linux/fsl/guts.h b/include/linux/fsl/guts.h
index 649e9171a9b3..3efa3b861d44 100644
--- a/include/linux/fsl/guts.h
+++ b/include/linux/fsl/guts.h
@@ -29,83 +29,112 @@
* #ifdefs.
*/
struct ccsr_guts {
- __be32 porpllsr; /* 0x.0000 - POR PLL Ratio Status Register */
- __be32 porbmsr; /* 0x.0004 - POR Boot Mode Status Register */
- __be32 porimpscr; /* 0x.0008 - POR I/O Impedance Status and Control Register */
- __be32 pordevsr; /* 0x.000c - POR I/O Device Status Register */
- __be32 pordbgmsr; /* 0x.0010 - POR Debug Mode Status Register */
- __be32 pordevsr2; /* 0x.0014 - POR device status register 2 */
+ u32 porpllsr; /* 0x.0000 - POR PLL Ratio Status Register */
+ u32 porbmsr; /* 0x.0004 - POR Boot Mode Status Register */
+ u32 porimpscr; /* 0x.0008 - POR I/O Impedance Status and
+ * Control Register
+ */
+ u32 pordevsr; /* 0x.000c - POR I/O Device Status Register */
+ u32 pordbgmsr; /* 0x.0010 - POR Debug Mode Status Register */
+ u32 pordevsr2; /* 0x.0014 - POR device status register 2 */
u8 res018[0x20 - 0x18];
- __be32 porcir; /* 0x.0020 - POR Configuration Information Register */
+ u32 porcir; /* 0x.0020 - POR Configuration Information
+ * Register
+ */
u8 res024[0x30 - 0x24];
- __be32 gpiocr; /* 0x.0030 - GPIO Control Register */
+ u32 gpiocr; /* 0x.0030 - GPIO Control Register */
u8 res034[0x40 - 0x34];
- __be32 gpoutdr; /* 0x.0040 - General-Purpose Output Data Register */
+ u32 gpoutdr; /* 0x.0040 - General-Purpose Output Data
+ * Register
+ */
u8 res044[0x50 - 0x44];
- __be32 gpindr; /* 0x.0050 - General-Purpose Input Data Register */
+ u32 gpindr; /* 0x.0050 - General-Purpose Input Data
+ * Register
+ */
u8 res054[0x60 - 0x54];
- __be32 pmuxcr; /* 0x.0060 - Alternate Function Signal Multiplex Control */
- __be32 pmuxcr2; /* 0x.0064 - Alternate function signal multiplex control 2 */
- __be32 dmuxcr; /* 0x.0068 - DMA Mux Control Register */
+ u32 pmuxcr; /* 0x.0060 - Alternate Function Signal
+ * Multiplex Control
+ */
+ u32 pmuxcr2; /* 0x.0064 - Alternate function signal
+ * multiplex control 2
+ */
+ u32 dmuxcr; /* 0x.0068 - DMA Mux Control Register */
u8 res06c[0x70 - 0x6c];
- __be32 devdisr; /* 0x.0070 - Device Disable Control */
+ u32 devdisr; /* 0x.0070 - Device Disable Control */
#define CCSR_GUTS_DEVDISR_TB1 0x00001000
#define CCSR_GUTS_DEVDISR_TB0 0x00004000
- __be32 devdisr2; /* 0x.0074 - Device Disable Control 2 */
+ u32 devdisr2; /* 0x.0074 - Device Disable Control 2 */
u8 res078[0x7c - 0x78];
- __be32 pmjcr; /* 0x.007c - 4 Power Management Jog Control Register */
- __be32 powmgtcsr; /* 0x.0080 - Power Management Status and Control Register */
- __be32 pmrccr; /* 0x.0084 - Power Management Reset Counter Configuration Register */
- __be32 pmpdccr; /* 0x.0088 - Power Management Power Down Counter Configuration Register */
- __be32 pmcdr; /* 0x.008c - 4Power management clock disable register */
- __be32 mcpsumr; /* 0x.0090 - Machine Check Summary Register */
- __be32 rstrscr; /* 0x.0094 - Reset Request Status and Control Register */
- __be32 ectrstcr; /* 0x.0098 - Exception reset control register */
- __be32 autorstsr; /* 0x.009c - Automatic reset status register */
- __be32 pvr; /* 0x.00a0 - Processor Version Register */
- __be32 svr; /* 0x.00a4 - System Version Register */
+ u32 pmjcr; /* 0x.007c - 4 Power Management Jog Control
+ * Register
+ */
+ u32 powmgtcsr; /* 0x.0080 - Power Management Status and
+ * Control Register
+ */
+ u32 pmrccr; /* 0x.0084 - Power Management Reset Counter
+ * Configuration Register
+ */
+ u32 pmpdccr; /* 0x.0088 - Power Management Power Down Counter
+ * Configuration Register
+ */
+ u32 pmcdr; /* 0x.008c - 4Power management clock disable
+ * register
+ */
+ u32 mcpsumr; /* 0x.0090 - Machine Check Summary Register */
+ u32 rstrscr; /* 0x.0094 - Reset Request Status and
+ * Control Register
+ */
+ u32 ectrstcr; /* 0x.0098 - Exception reset control register */
+ u32 autorstsr; /* 0x.009c - Automatic reset status register */
+ u32 pvr; /* 0x.00a0 - Processor Version Register */
+ u32 svr; /* 0x.00a4 - System Version Register */
u8 res0a8[0xb0 - 0xa8];
- __be32 rstcr; /* 0x.00b0 - Reset Control Register */
+ u32 rstcr; /* 0x.00b0 - Reset Control Register */
u8 res0b4[0xc0 - 0xb4];
- __be32 iovselsr; /* 0x.00c0 - I/O voltage select status register
+ u32 iovselsr; /* 0x.00c0 - I/O voltage select status register
Called 'elbcvselcr' on 86xx SOCs */
u8 res0c4[0x100 - 0xc4];
- __be32 rcwsr[16]; /* 0x.0100 - Reset Control Word Status registers
+ u32 rcwsr[16]; /* 0x.0100 - Reset Control Word Status registers
There are 16 registers */
u8 res140[0x224 - 0x140];
- __be32 iodelay1; /* 0x.0224 - IO delay control register 1 */
- __be32 iodelay2; /* 0x.0228 - IO delay control register 2 */
+ u32 iodelay1; /* 0x.0224 - IO delay control register 1 */
+ u32 iodelay2; /* 0x.0228 - IO delay control register 2 */
u8 res22c[0x604 - 0x22c];
- __be32 pamubypenr; /* 0x.604 - PAMU bypass enable register */
+ u32 pamubypenr; /* 0x.604 - PAMU bypass enable register */
u8 res608[0x800 - 0x608];
- __be32 clkdvdr; /* 0x.0800 - Clock Divide Register */
+ u32 clkdvdr; /* 0x.0800 - Clock Divide Register */
u8 res804[0x900 - 0x804];
- __be32 ircr; /* 0x.0900 - Infrared Control Register */
+ u32 ircr; /* 0x.0900 - Infrared Control Register */
u8 res904[0x908 - 0x904];
- __be32 dmacr; /* 0x.0908 - DMA Control Register */
+ u32 dmacr; /* 0x.0908 - DMA Control Register */
u8 res90c[0x914 - 0x90c];
- __be32 elbccr; /* 0x.0914 - eLBC Control Register */
+ u32 elbccr; /* 0x.0914 - eLBC Control Register */
u8 res918[0xb20 - 0x918];
- __be32 ddr1clkdr; /* 0x.0b20 - DDR1 Clock Disable Register */
- __be32 ddr2clkdr; /* 0x.0b24 - DDR2 Clock Disable Register */
- __be32 ddrclkdr; /* 0x.0b28 - DDR Clock Disable Register */
+ u32 ddr1clkdr; /* 0x.0b20 - DDR1 Clock Disable Register */
+ u32 ddr2clkdr; /* 0x.0b24 - DDR2 Clock Disable Register */
+ u32 ddrclkdr; /* 0x.0b28 - DDR Clock Disable Register */
u8 resb2c[0xe00 - 0xb2c];
- __be32 clkocr; /* 0x.0e00 - Clock Out Select Register */
+ u32 clkocr; /* 0x.0e00 - Clock Out Select Register */
u8 rese04[0xe10 - 0xe04];
- __be32 ddrdllcr; /* 0x.0e10 - DDR DLL Control Register */
+ u32 ddrdllcr; /* 0x.0e10 - DDR DLL Control Register */
u8 rese14[0xe20 - 0xe14];
- __be32 lbcdllcr; /* 0x.0e20 - LBC DLL Control Register */
- __be32 cpfor; /* 0x.0e24 - L2 charge pump fuse override register */
+ u32 lbcdllcr; /* 0x.0e20 - LBC DLL Control Register */
+ u32 cpfor; /* 0x.0e24 - L2 charge pump fuse override
+ * register
+ */
u8 rese28[0xf04 - 0xe28];
- __be32 srds1cr0; /* 0x.0f04 - SerDes1 Control Register 0 */
- __be32 srds1cr1; /* 0x.0f08 - SerDes1 Control Register 0 */
+ u32 srds1cr0; /* 0x.0f04 - SerDes1 Control Register 0 */
+ u32 srds1cr1; /* 0x.0f08 - SerDes1 Control Register 0 */
u8 resf0c[0xf2c - 0xf0c];
- __be32 itcr; /* 0x.0f2c - Internal transaction control register */
+ u32 itcr; /* 0x.0f2c - Internal transaction control
+ * register
+ */
u8 resf30[0xf40 - 0xf30];
- __be32 srds2cr0; /* 0x.0f40 - SerDes2 Control Register 0 */
- __be32 srds2cr1; /* 0x.0f44 - SerDes2 Control Register 0 */
+ u32 srds2cr0; /* 0x.0f40 - SerDes2 Control Register 0 */
+ u32 srds2cr1; /* 0x.0f44 - SerDes2 Control Register 0 */
} __attribute__ ((packed));
+u32 fsl_guts_get_svr(void);
/* Alternate function signal multiplex control */
#define MPC85xx_PMUXCR_QE(x) (0x8000 >> (x))
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index b3d34d3e0e7e..d4a884db16a3 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -398,6 +398,7 @@ int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
void ftrace_set_global_filter(unsigned char *buf, int len, int reset);
void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
void ftrace_free_filter(struct ftrace_ops *ops);
+void ftrace_ops_set_global_filter(struct ftrace_ops *ops);
int register_ftrace_command(struct ftrace_func_command *cmd);
int unregister_ftrace_command(struct ftrace_func_command *cmd);
@@ -645,6 +646,7 @@ static inline unsigned long ftrace_location(unsigned long ip)
#define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
#define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
#define ftrace_free_filter(ops) do { } while (0)
+#define ftrace_ops_set_global_filter(ops) do { } while (0)
static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
size_t cnt, loff_t *ppos) { return -ENODEV; }
diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
index 667c31101b8b..377257d8f7e3 100644
--- a/include/linux/genl_magic_func.h
+++ b/include/linux/genl_magic_func.h
@@ -259,16 +259,7 @@ static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
* {{{2
*/
#define ZZZ_genl_family CONCAT_(GENL_MAGIC_FAMILY, _genl_family)
-static struct genl_family ZZZ_genl_family __read_mostly = {
- .id = GENL_ID_GENERATE,
- .name = __stringify(GENL_MAGIC_FAMILY),
- .version = GENL_MAGIC_VERSION,
-#ifdef GENL_MAGIC_FAMILY_HDRSZ
- .hdrsize = NLA_ALIGN(GENL_MAGIC_FAMILY_HDRSZ),
-#endif
- .maxattr = ARRAY_SIZE(drbd_tla_nl_policy)-1,
-};
-
+static struct genl_family ZZZ_genl_family;
/*
* Magic: define multicast groups
* Magic: define multicast group registration helper
@@ -302,11 +293,23 @@ static int CONCAT_(GENL_MAGIC_FAMILY, _genl_multicast_ ## group)( \
#undef GENL_mc_group
#define GENL_mc_group(group)
+static struct genl_family ZZZ_genl_family __ro_after_init = {
+ .name = __stringify(GENL_MAGIC_FAMILY),
+ .version = GENL_MAGIC_VERSION,
+#ifdef GENL_MAGIC_FAMILY_HDRSZ
+ .hdrsize = NLA_ALIGN(GENL_MAGIC_FAMILY_HDRSZ),
+#endif
+ .maxattr = ARRAY_SIZE(drbd_tla_nl_policy)-1,
+ .ops = ZZZ_genl_ops,
+ .n_ops = ARRAY_SIZE(ZZZ_genl_ops),
+ .mcgrps = ZZZ_genl_mcgrps,
+ .n_mcgrps = ARRAY_SIZE(ZZZ_genl_mcgrps),
+ .module = THIS_MODULE,
+};
+
int CONCAT_(GENL_MAGIC_FAMILY, _genl_register)(void)
{
- return genl_register_family_with_ops_groups(&ZZZ_genl_family, \
- ZZZ_genl_ops, \
- ZZZ_genl_mcgrps);
+ return genl_register_family(&ZZZ_genl_family);
}
void CONCAT_(GENL_MAGIC_FAMILY, _genl_unregister)(void)
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index 24e2cc56beb1..c2748accea71 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -82,8 +82,6 @@ enum single_ended_mode {
* implies that if the chip supports IRQs, these IRQs need to be threaded
* as the chip access may sleep when e.g. reading out the IRQ status
* registers.
- * @irq_not_threaded: flag must be set if @can_sleep is set but the
- * IRQs don't need to be threaded
* @read_reg: reader function for generic GPIO
* @write_reg: writer function for generic GPIO
* @pin2mask: some generic GPIO controllers work with the big-endian bits
@@ -91,7 +89,7 @@ enum single_ended_mode {
* bit. This callback assigns the right bit mask.
* @reg_dat: data (in) register for generic GPIO
* @reg_set: output set register (out=high) for generic GPIO
- * @reg_clk: output clear register (out=low) for generic GPIO
+ * @reg_clr: output clear register (out=low) for generic GPIO
* @reg_dir: direction setting register for generic GPIO
* @bgpio_bits: number of register bits used for a generic GPIO i.e.
* <register width> * 8
@@ -109,8 +107,10 @@ enum single_ended_mode {
* for GPIO IRQs, provided by GPIO driver
* @irq_default_type: default IRQ triggering type applied during GPIO driver
* initialization, provided by GPIO driver
- * @irq_parent: GPIO IRQ chip parent/bank linux irq number,
- * provided by GPIO driver
+ * @irq_chained_parent: GPIO IRQ chip parent/bank linux irq number,
+ * provided by GPIO driver for chained interrupt (not for nested
+ * interrupts).
+ * @irq_nested: True if set the interrupt handling is nested.
* @irq_need_valid_mask: If set core allocates @irq_valid_mask with all
* bits set to one
* @irq_valid_mask: If not %NULL holds bitmask of GPIOs which are valid to
@@ -166,7 +166,6 @@ struct gpio_chip {
u16 ngpio;
const char *const *names;
bool can_sleep;
- bool irq_not_threaded;
#if IS_ENABLED(CONFIG_GPIO_GENERIC)
unsigned long (*read_reg)(void __iomem *reg);
@@ -192,7 +191,8 @@ struct gpio_chip {
unsigned int irq_base;
irq_flow_handler_t irq_handler;
unsigned int irq_default_type;
- int irq_parent;
+ int irq_chained_parent;
+ bool irq_nested;
bool irq_need_valid_mask;
unsigned long *irq_valid_mask;
struct lock_class_key *lock_key;
@@ -270,24 +270,40 @@ void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip,
int parent_irq,
irq_flow_handler_t parent_handler);
+void gpiochip_set_nested_irqchip(struct gpio_chip *gpiochip,
+ struct irq_chip *irqchip,
+ int parent_irq);
+
int _gpiochip_irqchip_add(struct gpio_chip *gpiochip,
struct irq_chip *irqchip,
unsigned int first_irq,
irq_flow_handler_t handler,
unsigned int type,
+ bool nested,
struct lock_class_key *lock_key);
+/* FIXME: I assume threaded IRQchips do not have the lockdep problem */
+static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
+ struct irq_chip *irqchip,
+ unsigned int first_irq,
+ irq_flow_handler_t handler,
+ unsigned int type)
+{
+ return _gpiochip_irqchip_add(gpiochip, irqchip, first_irq,
+ handler, type, true, NULL);
+}
+
#ifdef CONFIG_LOCKDEP
#define gpiochip_irqchip_add(...) \
( \
({ \
static struct lock_class_key _key; \
- _gpiochip_irqchip_add(__VA_ARGS__, &_key); \
+ _gpiochip_irqchip_add(__VA_ARGS__, false, &_key); \
}) \
)
#else
#define gpiochip_irqchip_add(...) \
- _gpiochip_irqchip_add(__VA_ARGS__, NULL)
+ _gpiochip_irqchip_add(__VA_ARGS__, false, NULL)
#endif
#endif /* CONFIG_GPIOLIB_IRQCHIP */
diff --git a/include/linux/hdlc.h b/include/linux/hdlc.h
index e31bcd4c7859..97585d9679f3 100644
--- a/include/linux/hdlc.h
+++ b/include/linux/hdlc.h
@@ -93,8 +93,6 @@ static __inline__ void debug_frame(const struct sk_buff *skb)
int hdlc_open(struct net_device *dev);
/* Must be called by hardware driver when HDLC device is being closed */
void hdlc_close(struct net_device *dev);
-/* May be used by hardware driver */
-int hdlc_change_mtu(struct net_device *dev, int new_mtu);
/* Must be pointed to by hw driver's dev->netdev_ops->ndo_start_xmit */
netdev_tx_t hdlc_start_xmit(struct sk_buff *skb, struct net_device *dev);
diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h
index e9744202fa29..edbb4fc674ed 100644
--- a/include/linux/hdmi.h
+++ b/include/linux/hdmi.h
@@ -78,6 +78,8 @@ enum hdmi_picture_aspect {
HDMI_PICTURE_ASPECT_NONE,
HDMI_PICTURE_ASPECT_4_3,
HDMI_PICTURE_ASPECT_16_9,
+ HDMI_PICTURE_ASPECT_64_27,
+ HDMI_PICTURE_ASPECT_256_135,
HDMI_PICTURE_ASPECT_RESERVED,
};
diff --git a/include/linux/hippidevice.h b/include/linux/hippidevice.h
index 8ec23fb0b412..402f99e328d4 100644
--- a/include/linux/hippidevice.h
+++ b/include/linux/hippidevice.h
@@ -32,7 +32,6 @@ struct hippi_cb {
};
__be16 hippi_type_trans(struct sk_buff *skb, struct net_device *dev);
-int hippi_change_mtu(struct net_device *dev, int new_mtu);
int hippi_mac_addr(struct net_device *dev, void *p);
int hippi_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p);
struct net_device *alloc_hippi_dev(int sizeof_priv);
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 9b9f65d99873..1f782aa1d8e6 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -22,7 +22,7 @@ extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned char *vec);
extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
unsigned long new_addr, unsigned long old_end,
- pmd_t *old_pmd, pmd_t *new_pmd);
+ pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush);
extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, pgprot_t newprot,
int prot_numa);
@@ -189,6 +189,8 @@ static inline void deferred_split_huge_page(struct page *page) {}
#define split_huge_pmd(__vma, __pmd, __address) \
do { } while (0)
+static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
+ unsigned long address, bool freeze, struct page *page) {}
static inline void split_huge_pmd_address(struct vm_area_struct *vma,
unsigned long address, bool freeze, struct page *page) {}
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 6824556d37ed..cd184bdca58f 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -1169,13 +1169,6 @@ int __must_check __vmbus_driver_register(struct hv_driver *hv_driver,
const char *mod_name);
void vmbus_driver_unregister(struct hv_driver *hv_driver);
-static inline const char *vmbus_dev_name(const struct hv_device *device_obj)
-{
- const struct kobject *kobj = &device_obj->device.kobj;
-
- return kobj->name;
-}
-
void vmbus_hvsock_device_unregister(struct vmbus_channel *channel);
int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index a80516fd65c8..fe849329511a 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -1576,6 +1576,9 @@ struct ieee80211_vht_operation {
#define WLAN_AUTH_SHARED_KEY 1
#define WLAN_AUTH_FT 2
#define WLAN_AUTH_SAE 3
+#define WLAN_AUTH_FILS_SK 4
+#define WLAN_AUTH_FILS_SK_PFS 5
+#define WLAN_AUTH_FILS_PK 6
#define WLAN_AUTH_LEAP 128
#define WLAN_AUTH_CHALLENGE_LEN 128
@@ -1960,6 +1963,26 @@ enum ieee80211_eid {
WLAN_EID_VENDOR_SPECIFIC = 221,
WLAN_EID_QOS_PARAMETER = 222,
+ WLAN_EID_CAG_NUMBER = 237,
+ WLAN_EID_AP_CSN = 239,
+ WLAN_EID_FILS_INDICATION = 240,
+ WLAN_EID_DILS = 241,
+ WLAN_EID_FRAGMENT = 242,
+ WLAN_EID_EXTENSION = 255
+};
+
+/* Element ID Extensions for Element ID 255 */
+enum ieee80211_eid_ext {
+ WLAN_EID_EXT_ASSOC_DELAY_INFO = 1,
+ WLAN_EID_EXT_FILS_REQ_PARAMS = 2,
+ WLAN_EID_EXT_FILS_KEY_CONFIRM = 3,
+ WLAN_EID_EXT_FILS_SESSION = 4,
+ WLAN_EID_EXT_FILS_HLP_CONTAINER = 5,
+ WLAN_EID_EXT_FILS_IP_ADDR_ASSIGN = 6,
+ WLAN_EID_EXT_KEY_DELIVERY = 7,
+ WLAN_EID_EXT_FILS_WRAPPED_DATA = 8,
+ WLAN_EID_EXT_FILS_PUBLIC_KEY = 12,
+ WLAN_EID_EXT_FILS_NONCE = 13,
};
/* Action category code */
@@ -2073,6 +2096,9 @@ enum ieee80211_key_len {
#define IEEE80211_GCMP_MIC_LEN 16
#define IEEE80211_GCMP_PN_LEN 6
+#define FILS_NONCE_LEN 16
+#define FILS_MAX_KEK_LEN 64
+
/* Public action codes */
enum ieee80211_pub_actioncode {
WLAN_PUB_ACTION_EXT_CHANSW_ANN = 4,
diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h
index f563907ed776..3355efc89781 100644
--- a/include/linux/if_arp.h
+++ b/include/linux/if_arp.h
@@ -44,4 +44,20 @@ static inline int arp_hdr_len(struct net_device *dev)
return sizeof(struct arphdr) + (dev->addr_len + sizeof(u32)) * 2;
}
}
+
+static inline bool dev_is_mac_header_xmit(const struct net_device *dev)
+{
+ switch (dev->type) {
+ case ARPHRD_TUNNEL:
+ case ARPHRD_TUNNEL6:
+ case ARPHRD_SIT:
+ case ARPHRD_IPGRE:
+ case ARPHRD_VOID:
+ case ARPHRD_NONE:
+ return false;
+ default:
+ return true;
+ }
+}
+
#endif /* _LINUX_IF_ARP_H */
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 3319d97d789d..8d5fcd6284ce 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -399,22 +399,6 @@ static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb)
skb->vlan_tci = 0;
return skb;
}
-/*
- * vlan_hwaccel_push_inside - pushes vlan tag to the payload
- * @skb: skbuff to tag
- *
- * Checks is tag is present in @skb->vlan_tci and if it is, it pushes the
- * VLAN tag from @skb->vlan_tci inside to the payload.
- *
- * Following the skb_unshare() example, in case of error, the calling function
- * doesn't have to worry about freeing the original skb.
- */
-static inline struct sk_buff *vlan_hwaccel_push_inside(struct sk_buff *skb)
-{
- if (skb_vlan_tag_present(skb))
- skb = __vlan_hwaccel_push_inside(skb);
- return skb;
-}
/**
* __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 2d9b650047a5..d49e26c6cdc7 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -429,6 +429,7 @@ struct intel_iommu {
struct page_req_dsc *prq;
unsigned char prq_name[16]; /* Name for PRQ interrupt */
struct idr pasid_idr;
+ u32 pasid_max;
#endif
struct q_inval *qi; /* Queued invalidation info */
u32 *iommu_state; /* Store iommu states between suspend and resume.*/
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 72f0721f75e7..53144e78a369 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -232,6 +232,18 @@ struct irq_affinity_notify {
void (*release)(struct kref *ref);
};
+/**
+ * struct irq_affinity - Description for automatic irq affinity assignements
+ * @pre_vectors: Don't apply affinity to @pre_vectors at beginning of
+ * the MSI(-X) vector space
+ * @post_vectors: Don't apply affinity to @post_vectors at end of
+ * the MSI(-X) vector space
+ */
+struct irq_affinity {
+ int pre_vectors;
+ int post_vectors;
+};
+
#if defined(CONFIG_SMP)
extern cpumask_var_t irq_default_affinity;
@@ -278,8 +290,8 @@ extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
extern int
irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
-struct cpumask *irq_create_affinity_masks(const struct cpumask *affinity, int nvec);
-int irq_calc_affinity_vectors(const struct cpumask *affinity, int maxvec);
+struct cpumask *irq_create_affinity_masks(int nvec, const struct irq_affinity *affd);
+int irq_calc_affinity_vectors(int maxvec, const struct irq_affinity *affd);
#else /* CONFIG_SMP */
@@ -313,13 +325,13 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
}
static inline struct cpumask *
-irq_create_affinity_masks(const struct cpumask *affinity, int nvec)
+irq_create_affinity_masks(int nvec, const struct irq_affinity *affd)
{
return NULL;
}
static inline int
-irq_calc_affinity_vectors(const struct cpumask *affinity, int maxvec)
+irq_calc_affinity_vectors(int maxvec, const struct irq_affinity *affd)
{
return maxvec;
}
diff --git a/include/linux/io.h b/include/linux/io.h
index e2c8419278c1..82ef36eac8a1 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -141,4 +141,26 @@ enum {
void *memremap(resource_size_t offset, size_t size, unsigned long flags);
void memunmap(void *addr);
+/*
+ * On x86 PAT systems we have memory tracking that keeps track of
+ * the allowed mappings on memory ranges. This tracking works for
+ * all the in-kernel mapping APIs (ioremap*), but where the user
+ * wishes to map a range from a physical device into user memory
+ * the tracking won't be updated. This API is to be used by
+ * drivers which remap physical device pages into userspace,
+ * and wants to make sure they are mapped WC and not UC.
+ */
+#ifndef arch_io_reserve_memtype_wc
+static inline int arch_io_reserve_memtype_wc(resource_size_t base,
+ resource_size_t size)
+{
+ return 0;
+}
+
+static inline void arch_io_free_memtype_wc(resource_size_t base,
+ resource_size_t size)
+{
+}
+#endif
+
#endif /* _LINUX_IO_H */
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index e63e288dee83..7892f55a1866 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -19,11 +19,15 @@ struct vm_fault;
#define IOMAP_UNWRITTEN 0x04 /* blocks allocated @blkno in unwritten state */
/*
- * Flags for iomap mappings:
+ * Flags for all iomap mappings:
*/
-#define IOMAP_F_MERGED 0x01 /* contains multiple blocks/extents */
-#define IOMAP_F_SHARED 0x02 /* block shared with another file */
-#define IOMAP_F_NEW 0x04 /* blocks have been newly allocated */
+#define IOMAP_F_NEW 0x01 /* blocks have been newly allocated */
+
+/*
+ * Flags that only need to be reported for IOMAP_REPORT requests:
+ */
+#define IOMAP_F_MERGED 0x10 /* contains multiple blocks/extents */
+#define IOMAP_F_SHARED 0x20 /* block shared with another file */
/*
* Magic value for blkno:
@@ -42,8 +46,9 @@ struct iomap {
/*
* Flags for iomap_begin / iomap_end. No flag implies a read.
*/
-#define IOMAP_WRITE (1 << 0)
-#define IOMAP_ZERO (1 << 1)
+#define IOMAP_WRITE (1 << 0) /* writing, must allocate blocks */
+#define IOMAP_ZERO (1 << 1) /* zeroing operation, may skip holes */
+#define IOMAP_REPORT (1 << 2) /* report extent status, e.g. FIEMAP */
struct iomap_ops {
/*
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 7e9a789be5e0..671d014e6429 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -64,6 +64,11 @@ struct ipv6_devconf {
} stable_secret;
__s32 use_oif_addrs_only;
__s32 keep_addr_on_down;
+ __s32 seg6_enabled;
+#ifdef CONFIG_IPV6_SEG6_HMAC
+ __s32 seg6_require_hmac;
+#endif
+ __u32 enhanced_dad;
struct ctl_table_header *sysctl_header;
};
@@ -123,12 +128,12 @@ struct inet6_skb_parm {
};
#if defined(CONFIG_NET_L3_MASTER_DEV)
-static inline bool skb_l3mdev_slave(__u16 flags)
+static inline bool ipv6_l3mdev_skb(__u16 flags)
{
return flags & IP6SKB_L3SLAVE;
}
#else
-static inline bool skb_l3mdev_slave(__u16 flags)
+static inline bool ipv6_l3mdev_skb(__u16 flags)
{
return false;
}
@@ -139,11 +144,22 @@ static inline bool skb_l3mdev_slave(__u16 flags)
static inline int inet6_iif(const struct sk_buff *skb)
{
- bool l3_slave = skb_l3mdev_slave(IP6CB(skb)->flags);
+ bool l3_slave = ipv6_l3mdev_skb(IP6CB(skb)->flags);
return l3_slave ? skb->skb_iif : IP6CB(skb)->iif;
}
+/* can not be used in TCP layer after tcp_v6_fill_cb */
+static inline bool inet6_exact_dif_match(struct net *net, struct sk_buff *skb)
+{
+#if defined(CONFIG_NET_L3_MASTER_DEV)
+ if (!net->ipv4.sysctl_tcp_l3mdev_accept &&
+ skb && ipv6_l3mdev_skb(IP6CB(skb)->flags))
+ return true;
+#endif
+ return false;
+}
+
struct tcp6_request_sock {
struct tcp_request_sock tcp6rsk_tcp;
};
@@ -218,8 +234,9 @@ struct ipv6_pinfo {
rxflow:1,
rxtclass:1,
rxpmtu:1,
- rxorigdstaddr:1;
- /* 2 bits hole */
+ rxorigdstaddr:1,
+ recvfragsize:1;
+ /* 1 bits hole */
} bits;
__u16 all;
} rxopt;
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 8361c8d3edd1..5118d3a0c9ca 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -239,7 +239,7 @@
#define GITS_TYPER_PTA (1UL << 19)
#define GITS_TYPER_HWCOLLCNT_SHIFT 24
-#define GITS_CBASER_VALID (1UL << 63)
+#define GITS_CBASER_VALID (1ULL << 63)
#define GITS_CBASER_SHAREABILITY_SHIFT (10)
#define GITS_CBASER_INNER_CACHEABILITY_SHIFT (59)
#define GITS_CBASER_OUTER_CACHEABILITY_SHIFT (53)
@@ -265,7 +265,7 @@
#define GITS_BASER_NR_REGS 8
-#define GITS_BASER_VALID (1UL << 63)
+#define GITS_BASER_VALID (1ULL << 63)
#define GITS_BASER_INDIRECT (1ULL << 62)
#define GITS_BASER_INNER_CACHEABILITY_SHIFT (59)
@@ -290,7 +290,7 @@
#define GITS_BASER_TYPE_SHIFT (56)
#define GITS_BASER_TYPE(r) (((r) >> GITS_BASER_TYPE_SHIFT) & 7)
#define GITS_BASER_ENTRY_SIZE_SHIFT (48)
-#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0xff) + 1)
+#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0x1f) + 1)
#define GITS_BASER_SHAREABILITY_SHIFT (10)
#define GITS_BASER_InnerShareable \
GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable)
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index d600303306eb..820c0ad54a01 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -44,6 +44,7 @@ static inline void kasan_disable_current(void)
void kasan_unpoison_shadow(const void *address, size_t size);
void kasan_unpoison_task_stack(struct task_struct *task);
+void kasan_unpoison_stack_above_sp_to(const void *watermark);
void kasan_alloc_pages(struct page *page, unsigned int order);
void kasan_free_pages(struct page *page, unsigned int order);
@@ -85,6 +86,7 @@ size_t kasan_metadata_size(struct kmem_cache *cache);
static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
+static inline void kasan_unpoison_stack_above_sp_to(const void *watermark) {}
static inline void kasan_enable_current(void) {}
static inline void kasan_disable_current(void) {}
diff --git a/include/linux/kconfig.h b/include/linux/kconfig.h
index 15ec117ec537..8f2e059e4d45 100644
--- a/include/linux/kconfig.h
+++ b/include/linux/kconfig.h
@@ -31,7 +31,6 @@
* When CONFIG_BOOGER is not defined, we generate a (... 1, 0) pair, and when
* the last step cherry picks the 2nd arg, we get a zero.
*/
-#define config_enabled(cfg) ___is_defined(cfg)
#define __is_defined(x) ___is_defined(x)
#define ___is_defined(val) ____is_defined(__ARG_PLACEHOLDER_##val)
#define ____is_defined(arg1_or_junk) __take_second_arg(arg1_or_junk 1, 0)
@@ -41,13 +40,13 @@
* otherwise. For boolean options, this is equivalent to
* IS_ENABLED(CONFIG_FOO).
*/
-#define IS_BUILTIN(option) config_enabled(option)
+#define IS_BUILTIN(option) __is_defined(option)
/*
* IS_MODULE(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'm', 0
* otherwise.
*/
-#define IS_MODULE(option) config_enabled(option##_MODULE)
+#define IS_MODULE(option) __is_defined(option##_MODULE)
/*
* IS_REACHABLE(CONFIG_FOO) evaluates to 1 if the currently compiled
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index 44fda64ad434..00f776816aa3 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -78,8 +78,8 @@ static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu)
return kstat_cpu(cpu).irqs_sum;
}
-extern void account_user_time(struct task_struct *, cputime_t, cputime_t);
-extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t);
+extern void account_user_time(struct task_struct *, cputime_t);
+extern void account_system_time(struct task_struct *, int, cputime_t);
extern void account_steal_time(cputime_t);
extern void account_idle_time(cputime_t);
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index a6e82a69c363..4fec8b775895 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -48,6 +48,7 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
__k; \
})
+void free_kthread_struct(struct task_struct *k);
void kthread_bind(struct task_struct *k, unsigned int cpu);
void kthread_bind_mask(struct task_struct *k, const struct cpumask *mask);
int kthread_stop(struct task_struct *k);
@@ -174,7 +175,7 @@ __printf(2, 3)
struct kthread_worker *
kthread_create_worker(unsigned int flags, const char namefmt[], ...);
-struct kthread_worker *
+__printf(3, 4) struct kthread_worker *
kthread_create_worker_on_cpu(int cpu, unsigned int flags,
const char namefmt[], ...);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 01c0b9cc3915..81ba3ba641ba 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -224,7 +224,6 @@ struct kvm_vcpu {
int fpu_active;
int guest_fpu_loaded, guest_xcr0_loaded;
- unsigned char fpu_counter;
struct swait_queue_head wq;
struct pid *pid;
int sigset_active;
@@ -645,6 +644,8 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
unsigned long len);
int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
void *data, unsigned long len);
+int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+ void *data, int offset, unsigned long len);
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
gpa_t gpa, unsigned long len);
int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
diff --git a/include/linux/leds.h b/include/linux/leds.h
index ddfcb2df3656..569cb531094c 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -42,16 +42,20 @@ struct led_classdev {
#define LED_UNREGISTERING (1 << 1)
/* Upper 16 bits reflect control information */
#define LED_CORE_SUSPENDRESUME (1 << 16)
-#define LED_BLINK_SW (1 << 17)
-#define LED_BLINK_ONESHOT (1 << 18)
-#define LED_BLINK_ONESHOT_STOP (1 << 19)
-#define LED_BLINK_INVERT (1 << 20)
-#define LED_BLINK_BRIGHTNESS_CHANGE (1 << 21)
-#define LED_BLINK_DISABLE (1 << 22)
-#define LED_SYSFS_DISABLE (1 << 23)
-#define LED_DEV_CAP_FLASH (1 << 24)
-#define LED_HW_PLUGGABLE (1 << 25)
-#define LED_PANIC_INDICATOR (1 << 26)
+#define LED_SYSFS_DISABLE (1 << 17)
+#define LED_DEV_CAP_FLASH (1 << 18)
+#define LED_HW_PLUGGABLE (1 << 19)
+#define LED_PANIC_INDICATOR (1 << 20)
+
+ /* set_brightness_work / blink_timer flags, atomic, private. */
+ unsigned long work_flags;
+
+#define LED_BLINK_SW 0
+#define LED_BLINK_ONESHOT 1
+#define LED_BLINK_ONESHOT_STOP 2
+#define LED_BLINK_INVERT 3
+#define LED_BLINK_BRIGHTNESS_CHANGE 4
+#define LED_BLINK_DISABLE 5
/* Set LED brightness level
* Must not sleep. Use brightness_set_blocking for drivers
@@ -89,6 +93,7 @@ struct led_classdev {
unsigned long blink_delay_on, blink_delay_off;
struct timer_list blink_timer;
int blink_brightness;
+ int new_blink_brightness;
void (*flash_resume)(struct led_classdev *led_cdev);
struct work_struct set_brightness_work;
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
index f4947fda11e7..8458c5351e56 100644
--- a/include/linux/libnvdimm.h
+++ b/include/linux/libnvdimm.h
@@ -143,7 +143,7 @@ u32 nd_cmd_in_size(struct nvdimm *nvdimm, int cmd,
const struct nd_cmd_desc *desc, int idx, void *buf);
u32 nd_cmd_out_size(struct nvdimm *nvdimm, int cmd,
const struct nd_cmd_desc *desc, int idx, const u32 *in_field,
- const u32 *out_field);
+ const u32 *out_field, unsigned long remainder);
int nvdimm_bus_check_dimm_count(struct nvdimm_bus *nvdimm_bus, int dimm_count);
struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus,
struct nd_region_desc *ndr_desc);
diff --git a/include/linux/list.h b/include/linux/list.h
index 5809e9a2de5b..d1039ecaf94f 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -28,27 +28,42 @@ static inline void INIT_LIST_HEAD(struct list_head *list)
list->prev = list;
}
+#ifdef CONFIG_DEBUG_LIST
+extern bool __list_add_valid(struct list_head *new,
+ struct list_head *prev,
+ struct list_head *next);
+extern bool __list_del_entry_valid(struct list_head *entry);
+#else
+static inline bool __list_add_valid(struct list_head *new,
+ struct list_head *prev,
+ struct list_head *next)
+{
+ return true;
+}
+static inline bool __list_del_entry_valid(struct list_head *entry)
+{
+ return true;
+}
+#endif
+
/*
* Insert a new entry between two known consecutive entries.
*
* This is only for internal list manipulation where we know
* the prev/next entries already!
*/
-#ifndef CONFIG_DEBUG_LIST
static inline void __list_add(struct list_head *new,
struct list_head *prev,
struct list_head *next)
{
+ if (!__list_add_valid(new, prev, next))
+ return;
+
next->prev = new;
new->next = next;
new->prev = prev;
WRITE_ONCE(prev->next, new);
}
-#else
-extern void __list_add(struct list_head *new,
- struct list_head *prev,
- struct list_head *next);
-#endif
/**
* list_add - add a new entry
@@ -96,22 +111,20 @@ static inline void __list_del(struct list_head * prev, struct list_head * next)
* Note: list_empty() on entry does not return true after this, the entry is
* in an undefined state.
*/
-#ifndef CONFIG_DEBUG_LIST
static inline void __list_del_entry(struct list_head *entry)
{
+ if (!__list_del_entry_valid(entry))
+ return;
+
__list_del(entry->prev, entry->next);
}
static inline void list_del(struct list_head *entry)
{
- __list_del(entry->prev, entry->next);
+ __list_del_entry(entry);
entry->next = LIST_POISON1;
entry->prev = LIST_POISON2;
}
-#else
-extern void __list_del_entry(struct list_head *entry);
-extern void list_del(struct list_head *entry);
-#endif
/**
* list_replace - replace old entry by new one
diff --git a/include/linux/mbus.h b/include/linux/mbus.h
index 2931aa43dab1..0d3f14fd2621 100644
--- a/include/linux/mbus.h
+++ b/include/linux/mbus.h
@@ -82,6 +82,7 @@ static inline int mvebu_mbus_get_io_win_info(phys_addr_t phyaddr, u32 *size,
}
#endif
+#ifdef CONFIG_MVEBU_MBUS
int mvebu_mbus_save_cpu_target(u32 __iomem *store_addr);
void mvebu_mbus_get_pcie_mem_aperture(struct resource *res);
void mvebu_mbus_get_pcie_io_aperture(struct resource *res);
@@ -97,5 +98,12 @@ int mvebu_mbus_init(const char *soc, phys_addr_t mbus_phys_base,
size_t mbus_size, phys_addr_t sdram_phys_base,
size_t sdram_size);
int mvebu_mbus_dt_init(bool is_coherent);
+#else
+static inline int mvebu_mbus_get_dram_win_info(phys_addr_t phyaddr, u8 *target,
+ u8 *attr)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_MVEBU_MBUS */
#endif /* __LINUX_MBUS_H */
diff --git a/include/linux/mc146818rtc.h b/include/linux/mc146818rtc.h
index a585b4b5fa0e..0661af17a758 100644
--- a/include/linux/mc146818rtc.h
+++ b/include/linux/mc146818rtc.h
@@ -16,6 +16,7 @@
#include <asm/mc146818rtc.h> /* register access macros */
#include <linux/bcd.h>
#include <linux/delay.h>
+#include <linux/pm-trace.h>
#ifdef __KERNEL__
#include <linux/spinlock.h> /* spinlock_t */
diff --git a/include/linux/mdev.h b/include/linux/mdev.h
new file mode 100644
index 000000000000..ec819e9a115a
--- /dev/null
+++ b/include/linux/mdev.h
@@ -0,0 +1,168 @@
+/*
+ * Mediated device definition
+ *
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ * Author: Neo Jia <cjia@nvidia.com>
+ * Kirti Wankhede <kwankhede@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef MDEV_H
+#define MDEV_H
+
+/* Parent device */
+struct parent_device {
+ struct device *dev;
+ const struct parent_ops *ops;
+
+ /* internal */
+ struct kref ref;
+ struct mutex lock;
+ struct list_head next;
+ struct kset *mdev_types_kset;
+ struct list_head type_list;
+};
+
+/* Mediated device */
+struct mdev_device {
+ struct device dev;
+ struct parent_device *parent;
+ uuid_le uuid;
+ void *driver_data;
+
+ /* internal */
+ struct kref ref;
+ struct list_head next;
+ struct kobject *type_kobj;
+};
+
+/**
+ * struct parent_ops - Structure to be registered for each parent device to
+ * register the device to mdev module.
+ *
+ * @owner: The module owner.
+ * @dev_attr_groups: Attributes of the parent device.
+ * @mdev_attr_groups: Attributes of the mediated device.
+ * @supported_type_groups: Attributes to define supported types. It is mandatory
+ * to provide supported types.
+ * @create: Called to allocate basic resources in parent device's
+ * driver for a particular mediated device. It is
+ * mandatory to provide create ops.
+ * @kobj: kobject of type for which 'create' is called.
+ * @mdev: mdev_device structure on of mediated device
+ * that is being created
+ * Returns integer: success (0) or error (< 0)
+ * @remove: Called to free resources in parent device's driver for a
+ * a mediated device. It is mandatory to provide 'remove'
+ * ops.
+ * @mdev: mdev_device device structure which is being
+ * destroyed
+ * Returns integer: success (0) or error (< 0)
+ * @open: Open mediated device.
+ * @mdev: mediated device.
+ * Returns integer: success (0) or error (< 0)
+ * @release: release mediated device
+ * @mdev: mediated device.
+ * @read: Read emulation callback
+ * @mdev: mediated device structure
+ * @buf: read buffer
+ * @count: number of bytes to read
+ * @ppos: address.
+ * Retuns number on bytes read on success or error.
+ * @write: Write emulation callback
+ * @mdev: mediated device structure
+ * @buf: write buffer
+ * @count: number of bytes to be written
+ * @ppos: address.
+ * Retuns number on bytes written on success or error.
+ * @ioctl: IOCTL callback
+ * @mdev: mediated device structure
+ * @cmd: ioctl command
+ * @arg: arguments to ioctl
+ * @mmap: mmap callback
+ * @mdev: mediated device structure
+ * @vma: vma structure
+ * Parent device that support mediated device should be registered with mdev
+ * module with parent_ops structure.
+ **/
+
+struct parent_ops {
+ struct module *owner;
+ const struct attribute_group **dev_attr_groups;
+ const struct attribute_group **mdev_attr_groups;
+ struct attribute_group **supported_type_groups;
+
+ int (*create)(struct kobject *kobj, struct mdev_device *mdev);
+ int (*remove)(struct mdev_device *mdev);
+ int (*open)(struct mdev_device *mdev);
+ void (*release)(struct mdev_device *mdev);
+ ssize_t (*read)(struct mdev_device *mdev, char __user *buf,
+ size_t count, loff_t *ppos);
+ ssize_t (*write)(struct mdev_device *mdev, const char __user *buf,
+ size_t count, loff_t *ppos);
+ ssize_t (*ioctl)(struct mdev_device *mdev, unsigned int cmd,
+ unsigned long arg);
+ int (*mmap)(struct mdev_device *mdev, struct vm_area_struct *vma);
+};
+
+/* interface for exporting mdev supported type attributes */
+struct mdev_type_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct kobject *kobj, struct device *dev, char *buf);
+ ssize_t (*store)(struct kobject *kobj, struct device *dev,
+ const char *buf, size_t count);
+};
+
+#define MDEV_TYPE_ATTR(_name, _mode, _show, _store) \
+struct mdev_type_attribute mdev_type_attr_##_name = \
+ __ATTR(_name, _mode, _show, _store)
+#define MDEV_TYPE_ATTR_RW(_name) \
+ struct mdev_type_attribute mdev_type_attr_##_name = __ATTR_RW(_name)
+#define MDEV_TYPE_ATTR_RO(_name) \
+ struct mdev_type_attribute mdev_type_attr_##_name = __ATTR_RO(_name)
+#define MDEV_TYPE_ATTR_WO(_name) \
+ struct mdev_type_attribute mdev_type_attr_##_name = __ATTR_WO(_name)
+
+/**
+ * struct mdev_driver - Mediated device driver
+ * @name: driver name
+ * @probe: called when new device created
+ * @remove: called when device removed
+ * @driver: device driver structure
+ *
+ **/
+struct mdev_driver {
+ const char *name;
+ int (*probe)(struct device *dev);
+ void (*remove)(struct device *dev);
+ struct device_driver driver;
+};
+
+#define to_mdev_driver(drv) container_of(drv, struct mdev_driver, driver)
+#define to_mdev_device(dev) container_of(dev, struct mdev_device, dev)
+
+static inline void *mdev_get_drvdata(struct mdev_device *mdev)
+{
+ return mdev->driver_data;
+}
+
+static inline void mdev_set_drvdata(struct mdev_device *mdev, void *data)
+{
+ mdev->driver_data = data;
+}
+
+extern struct bus_type mdev_bus_type;
+
+#define dev_is_mdev(d) ((d)->bus == &mdev_bus_type)
+
+extern int mdev_register_device(struct device *dev,
+ const struct parent_ops *ops);
+extern void mdev_unregister_device(struct device *dev);
+
+extern int mdev_register_driver(struct mdev_driver *drv, struct module *owner);
+extern void mdev_unregister_driver(struct mdev_driver *drv);
+
+#endif /* MDEV_H */
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 5e5b2969d931..5f4d8281832b 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -7,6 +7,7 @@
#include <linux/mmzone.h>
+#include <linux/dax.h>
#include <linux/slab.h>
#include <linux/rbtree.h>
#include <linux/spinlock.h>
@@ -177,6 +178,13 @@ static inline bool vma_migratable(struct vm_area_struct *vma)
if (vma->vm_flags & (VM_IO | VM_PFNMAP))
return false;
+ /*
+ * DAX device mappings require predictable access latency, so avoid
+ * incurring periodic faults.
+ */
+ if (vma_is_dax(vma))
+ return false;
+
#ifndef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
if (vma->vm_flags & VM_HUGETLB)
return false;
diff --git a/include/linux/mfd/max77620.h b/include/linux/mfd/max77620.h
index 3ca0af07fc78..ad2a9a852aea 100644
--- a/include/linux/mfd/max77620.h
+++ b/include/linux/mfd/max77620.h
@@ -180,6 +180,7 @@
#define MAX77620_SD_CFG1_FPWM_SD_MASK BIT(2)
#define MAX77620_SD_CFG1_FPWM_SD_SKIP 0
#define MAX77620_SD_CFG1_FPWM_SD_FPWM BIT(2)
+#define MAX20024_SD_CFG1_MPOK_MASK BIT(1)
#define MAX77620_SD_CFG1_FSRADE_SD_MASK BIT(0)
#define MAX77620_SD_CFG1_FSRADE_SD_DISABLE 0
#define MAX77620_SD_CFG1_FSRADE_SD_ENABLE BIT(0)
@@ -187,6 +188,7 @@
/* LDO_CNFG2 */
#define MAX77620_LDO_POWER_MODE_MASK 0xC0
#define MAX77620_LDO_POWER_MODE_SHIFT 6
+#define MAX20024_LDO_CFG2_MPOK_MASK BIT(2)
#define MAX77620_LDO_CFG2_ADE_MASK BIT(1)
#define MAX77620_LDO_CFG2_ADE_DISABLE 0
#define MAX77620_LDO_CFG2_ADE_ENABLE BIT(1)
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
index 7a26286db895..fba44abd05ba 100644
--- a/include/linux/mfd/tmio.h
+++ b/include/linux/mfd/tmio.h
@@ -100,6 +100,11 @@
#define TMIO_MMC_SDIO_STATUS_QUIRK (1 << 8)
/*
+ * Some controllers have a 32-bit wide data port register
+ */
+#define TMIO_MMC_32BIT_DATA_PORT (1 << 9)
+
+/*
* Some controllers allows to set SDx actual clock
*/
#define TMIO_MMC_CLK_ACTUAL (1 << 10)
diff --git a/include/linux/mii.h b/include/linux/mii.h
index 47492c9631b3..1629a0c32679 100644
--- a/include/linux/mii.h
+++ b/include/linux/mii.h
@@ -31,7 +31,11 @@ struct mii_if_info {
extern int mii_link_ok (struct mii_if_info *mii);
extern int mii_nway_restart (struct mii_if_info *mii);
extern int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd);
+extern int mii_ethtool_get_link_ksettings(
+ struct mii_if_info *mii, struct ethtool_link_ksettings *cmd);
extern int mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd);
+extern int mii_ethtool_set_link_ksettings(
+ struct mii_if_info *mii, const struct ethtool_link_ksettings *cmd);
extern int mii_check_gmii_support(struct mii_if_info *mii);
extern void mii_check_link (struct mii_if_info *mii);
extern unsigned int mii_check_media (struct mii_if_info *mii,
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index f6a164297358..c9f379689dd0 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -476,7 +476,6 @@ enum {
enum {
MLX4_INTERFACE_STATE_UP = 1 << 0,
MLX4_INTERFACE_STATE_DELETION = 1 << 1,
- MLX4_INTERFACE_STATE_SHUTDOWN = 1 << 2,
};
#define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \
@@ -1399,7 +1398,8 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
u32 *lkey, u32 *rkey);
int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
int mlx4_SYNC_TPT(struct mlx4_dev *dev);
-int mlx4_test_interrupts(struct mlx4_dev *dev);
+int mlx4_test_interrupt(struct mlx4_dev *dev, int vector);
+int mlx4_test_async(struct mlx4_dev *dev);
int mlx4_query_diag_counters(struct mlx4_dev *dev, u8 op_modifier,
const u32 offset[], u32 value[],
size_t array_len, u8 port);
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 58276144ba81..9f489365b3d3 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -277,6 +277,7 @@ enum mlx5_event {
MLX5_EVENT_TYPE_INTERNAL_ERROR = 0x08,
MLX5_EVENT_TYPE_PORT_CHANGE = 0x09,
MLX5_EVENT_TYPE_GPIO_EVENT = 0x15,
+ MLX5_EVENT_TYPE_PORT_MODULE_EVENT = 0x16,
MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19,
MLX5_EVENT_TYPE_DB_BF_CONGESTION = 0x1a,
@@ -552,6 +553,15 @@ struct mlx5_eqe_vport_change {
__be32 rsvd1[6];
} __packed;
+struct mlx5_eqe_port_module {
+ u8 reserved_at_0[1];
+ u8 module;
+ u8 reserved_at_2[1];
+ u8 module_status;
+ u8 reserved_at_4[2];
+ u8 error_type;
+} __packed;
+
union ev_data {
__be32 raw[7];
struct mlx5_eqe_cmd cmd;
@@ -565,6 +575,7 @@ union ev_data {
struct mlx5_eqe_page_req req_pages;
struct mlx5_eqe_page_fault page_fault;
struct mlx5_eqe_vport_change vport_change;
+ struct mlx5_eqe_port_module port_module;
} __packed;
struct mlx5_eqe {
@@ -1060,6 +1071,11 @@ enum {
MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20,
};
+enum {
+ MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP = 0x0,
+ MLX5_PCIE_TIMERS_AND_STATES_COUNTERS_GROUP = 0x2,
+};
+
static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
{
if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE)
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 85c4786427e4..0ae55361e674 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -104,6 +104,8 @@ enum {
enum {
MLX5_REG_QETCR = 0x4005,
MLX5_REG_QTCT = 0x400a,
+ MLX5_REG_DCBX_PARAM = 0x4020,
+ MLX5_REG_DCBX_APP = 0x4021,
MLX5_REG_PCAP = 0x5001,
MLX5_REG_PMTU = 0x5003,
MLX5_REG_PTYS = 0x5004,
@@ -121,6 +123,12 @@ enum {
MLX5_REG_HOST_ENDIANNESS = 0x7004,
MLX5_REG_MCIA = 0x9014,
MLX5_REG_MLCR = 0x902b,
+ MLX5_REG_MPCNT = 0x9051,
+};
+
+enum mlx5_dcbx_oper_mode {
+ MLX5E_DCBX_PARAM_VER_OPER_HOST = 0x0,
+ MLX5E_DCBX_PARAM_VER_OPER_AUTO = 0x3,
};
enum {
@@ -208,7 +216,7 @@ struct mlx5_cmd_first {
struct mlx5_cmd_msg {
struct list_head list;
- struct cache_ent *cache;
+ struct cmd_msg_cache *parent;
u32 len;
struct mlx5_cmd_first first;
struct mlx5_cmd_mailbox *next;
@@ -228,17 +236,17 @@ struct mlx5_cmd_debug {
u16 outlen;
};
-struct cache_ent {
+struct cmd_msg_cache {
/* protect block chain allocations
*/
spinlock_t lock;
struct list_head head;
+ unsigned int max_inbox_size;
+ unsigned int num_ent;
};
-struct cmd_msg_cache {
- struct cache_ent large;
- struct cache_ent med;
-
+enum {
+ MLX5_NUM_COMMAND_CACHES = 5,
};
struct mlx5_cmd_stats {
@@ -281,7 +289,7 @@ struct mlx5_cmd {
struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS];
struct pci_pool *pool;
struct mlx5_cmd_debug dbg;
- struct cmd_msg_cache cache;
+ struct cmd_msg_cache cache[MLX5_NUM_COMMAND_CACHES];
int checksum_disabled;
struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX];
};
@@ -310,6 +318,13 @@ struct mlx5_buf {
u8 page_shift;
};
+struct mlx5_frag_buf {
+ struct mlx5_buf_list *frags;
+ int npages;
+ int size;
+ u8 page_shift;
+};
+
struct mlx5_eq_tasklet {
struct list_head list;
struct list_head process_list;
@@ -418,8 +433,12 @@ struct mlx5_core_health {
u32 prev;
int miss_counter;
bool sick;
+ /* wq spinlock to synchronize draining */
+ spinlock_t wq_lock;
struct workqueue_struct *wq;
+ unsigned long flags;
struct work_struct work;
+ struct delayed_work recover_work;
};
struct mlx5_cq_table {
@@ -494,6 +513,31 @@ struct mlx5_rl_table {
struct mlx5_rl_entry *rl_entry;
};
+enum port_module_event_status_type {
+ MLX5_MODULE_STATUS_PLUGGED = 0x1,
+ MLX5_MODULE_STATUS_UNPLUGGED = 0x2,
+ MLX5_MODULE_STATUS_ERROR = 0x3,
+ MLX5_MODULE_STATUS_NUM = 0x3,
+};
+
+enum port_module_event_error_type {
+ MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED,
+ MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX_CABLE_MODULE,
+ MLX5_MODULE_EVENT_ERROR_BUS_STUCK,
+ MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT,
+ MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST,
+ MLX5_MODULE_EVENT_ERROR_UNKNOWN_IDENTIFIER,
+ MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE,
+ MLX5_MODULE_EVENT_ERROR_BAD_CABLE,
+ MLX5_MODULE_EVENT_ERROR_UNKNOWN,
+ MLX5_MODULE_EVENT_ERROR_NUM,
+};
+
+struct mlx5_port_module_event_stats {
+ u64 status_counters[MLX5_MODULE_STATUS_NUM];
+ u64 error_counters[MLX5_MODULE_EVENT_ERROR_NUM];
+};
+
struct mlx5_priv {
char name[MLX5_MAX_NAME_LEN];
struct mlx5_eq_table eq_table;
@@ -555,6 +599,8 @@ struct mlx5_priv {
unsigned long pci_dev_data;
struct mlx5_fc_stats fc_stats;
struct mlx5_rl_table rl_table;
+
+ struct mlx5_port_module_event_stats pme_stats;
};
enum mlx5_device_state {
@@ -626,10 +672,6 @@ struct mlx5_db {
};
enum {
- MLX5_DB_PER_PAGE = PAGE_SIZE / L1_CACHE_BYTES,
-};
-
-enum {
MLX5_COMP_EQ_SIZE = 1024,
};
@@ -638,13 +680,6 @@ enum {
MLX5_PTYS_EN = 1 << 2,
};
-struct mlx5_db_pgdir {
- struct list_head list;
- DECLARE_BITMAP(bitmap, MLX5_DB_PER_PAGE);
- __be32 *db_page;
- dma_addr_t db_dma;
-};
-
typedef void (*mlx5_cmd_cbk_t)(int status, void *context);
struct mlx5_cmd_work_ent {
@@ -789,10 +824,14 @@ void mlx5_health_cleanup(struct mlx5_core_dev *dev);
int mlx5_health_init(struct mlx5_core_dev *dev);
void mlx5_start_health_poll(struct mlx5_core_dev *dev);
void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
+void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
struct mlx5_buf *buf, int node);
int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf);
void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf);
+int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
+ struct mlx5_frag_buf *buf, int node);
+void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf);
struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
gfp_t flags, int npages);
void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
@@ -837,6 +876,7 @@ void mlx5_unregister_debugfs(void);
int mlx5_eq_init(struct mlx5_core_dev *dev);
void mlx5_eq_cleanup(struct mlx5_core_dev *dev);
void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas);
+void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas);
void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn);
void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type);
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 93ebc5e21334..949b24b6c479 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -42,6 +42,10 @@ enum {
MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO = 1 << 16,
};
+enum {
+ MLX5_FLOW_TABLE_TUNNEL_EN = BIT(0),
+};
+
#define LEFTOVERS_RULE_NUM 2
static inline void build_leftovers_ft_param(int *priority,
int *n_ent,
@@ -69,8 +73,8 @@ enum mlx5_flow_namespace_type {
struct mlx5_flow_table;
struct mlx5_flow_group;
-struct mlx5_flow_rule;
struct mlx5_flow_namespace;
+struct mlx5_flow_handle;
struct mlx5_flow_spec {
u8 match_criteria_enable;
@@ -97,13 +101,15 @@ mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
int prio,
int num_flow_table_entries,
int max_num_groups,
- u32 level);
+ u32 level,
+ u32 flags);
struct mlx5_flow_table *
mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
int prio,
int num_flow_table_entries,
- u32 level);
+ u32 level,
+ u32 flags);
struct mlx5_flow_table *
mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
int prio,
@@ -124,21 +130,28 @@ struct mlx5_flow_group *
mlx5_create_flow_group(struct mlx5_flow_table *ft, u32 *in);
void mlx5_destroy_flow_group(struct mlx5_flow_group *fg);
+struct mlx5_flow_act {
+ u32 action;
+ u32 flow_tag;
+ u32 encap_id;
+};
+
/* Single destination per rule.
* Group ID is implied by the match criteria.
*/
-struct mlx5_flow_rule *
-mlx5_add_flow_rule(struct mlx5_flow_table *ft,
- struct mlx5_flow_spec *spec,
- u32 action,
- u32 flow_tag,
- struct mlx5_flow_destination *dest);
-void mlx5_del_flow_rule(struct mlx5_flow_rule *fr);
-
-int mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
- struct mlx5_flow_destination *dest);
-
-struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_rule *rule);
+struct mlx5_flow_handle *
+mlx5_add_flow_rules(struct mlx5_flow_table *ft,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_flow_destination *dest,
+ int dest_num);
+void mlx5_del_flow_rules(struct mlx5_flow_handle *fr);
+
+int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler,
+ struct mlx5_flow_destination *new_dest,
+ struct mlx5_flow_destination *old_dest);
+
+struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_handle *handler);
struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging);
void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter);
void mlx5_fc_query_cached(struct mlx5_fc *counter,
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 6045d4d58065..a5f0fbedf1e7 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -83,6 +83,7 @@ enum {
MLX5_CMD_OP_SET_HCA_CAP = 0x109,
MLX5_CMD_OP_QUERY_ISSI = 0x10a,
MLX5_CMD_OP_SET_ISSI = 0x10b,
+ MLX5_CMD_OP_SET_DRIVER_VERSION = 0x10d,
MLX5_CMD_OP_CREATE_MKEY = 0x200,
MLX5_CMD_OP_QUERY_MKEY = 0x201,
MLX5_CMD_OP_DESTROY_MKEY = 0x202,
@@ -145,6 +146,12 @@ enum {
MLX5_CMD_OP_QUERY_Q_COUNTER = 0x773,
MLX5_CMD_OP_SET_RATE_LIMIT = 0x780,
MLX5_CMD_OP_QUERY_RATE_LIMIT = 0x781,
+ MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT = 0x782,
+ MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT = 0x783,
+ MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT = 0x784,
+ MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT = 0x785,
+ MLX5_CMD_OP_CREATE_QOS_PARA_VPORT = 0x786,
+ MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT = 0x787,
MLX5_CMD_OP_ALLOC_PD = 0x800,
MLX5_CMD_OP_DEALLOC_PD = 0x801,
MLX5_CMD_OP_ALLOC_UAR = 0x802,
@@ -537,13 +544,27 @@ struct mlx5_ifc_e_switch_cap_bits {
struct mlx5_ifc_qos_cap_bits {
u8 packet_pacing[0x1];
- u8 reserved_0[0x1f];
- u8 reserved_1[0x20];
+ u8 esw_scheduling[0x1];
+ u8 reserved_at_2[0x1e];
+
+ u8 reserved_at_20[0x20];
+
u8 packet_pacing_max_rate[0x20];
+
u8 packet_pacing_min_rate[0x20];
- u8 reserved_2[0x10];
+
+ u8 reserved_at_80[0x10];
u8 packet_pacing_rate_table_size[0x10];
- u8 reserved_3[0x760];
+
+ u8 esw_element_type[0x10];
+ u8 esw_tsar_type[0x10];
+
+ u8 reserved_at_c0[0x10];
+ u8 max_qos_para_vport[0x10];
+
+ u8 max_tsar_bw_share[0x20];
+
+ u8 reserved_at_100[0x700];
};
struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
@@ -804,7 +825,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 early_vf_enable[0x1];
u8 reserved_at_1a9[0x2];
u8 local_ca_ack_delay[0x5];
- u8 reserved_at_1af[0x2];
+ u8 port_module_event[0x1];
+ u8 reserved_at_1b0[0x1];
u8 ports_check[0x1];
u8 reserved_at_1b2[0x1];
u8 disable_link_up[0x1];
@@ -888,7 +910,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 log_pg_sz[0x8];
u8 bf[0x1];
- u8 reserved_at_261[0x1];
+ u8 driver_version[0x1];
u8 pad_tx_eth_packet[0x1];
u8 reserved_at_263[0x8];
u8 log_bf_reg_size[0x5];
@@ -1735,6 +1757,80 @@ struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits {
u8 reserved_at_4c0[0x300];
};
+struct mlx5_ifc_pcie_perf_cntrs_grp_data_layout_bits {
+ u8 life_time_counter_high[0x20];
+
+ u8 life_time_counter_low[0x20];
+
+ u8 rx_errors[0x20];
+
+ u8 tx_errors[0x20];
+
+ u8 l0_to_recovery_eieos[0x20];
+
+ u8 l0_to_recovery_ts[0x20];
+
+ u8 l0_to_recovery_framing[0x20];
+
+ u8 l0_to_recovery_retrain[0x20];
+
+ u8 crc_error_dllp[0x20];
+
+ u8 crc_error_tlp[0x20];
+
+ u8 reserved_at_140[0x680];
+};
+
+struct mlx5_ifc_pcie_tas_cntrs_grp_data_layout_bits {
+ u8 life_time_counter_high[0x20];
+
+ u8 life_time_counter_low[0x20];
+
+ u8 time_to_boot_image_start[0x20];
+
+ u8 time_to_link_image[0x20];
+
+ u8 calibration_time[0x20];
+
+ u8 time_to_first_perst[0x20];
+
+ u8 time_to_detect_state[0x20];
+
+ u8 time_to_l0[0x20];
+
+ u8 time_to_crs_en[0x20];
+
+ u8 time_to_plastic_image_start[0x20];
+
+ u8 time_to_iron_image_start[0x20];
+
+ u8 perst_handler[0x20];
+
+ u8 times_in_l1[0x20];
+
+ u8 times_in_l23[0x20];
+
+ u8 dl_down[0x20];
+
+ u8 config_cycle1usec[0x20];
+
+ u8 config_cycle2to7usec[0x20];
+
+ u8 config_cycle_8to15usec[0x20];
+
+ u8 config_cycle_16_to_63usec[0x20];
+
+ u8 config_cycle_64usec[0x20];
+
+ u8 correctable_err_msg_sent[0x20];
+
+ u8 non_fatal_err_msg_sent[0x20];
+
+ u8 fatal_err_msg_sent[0x20];
+
+ u8 reserved_at_2e0[0x4e0];
+};
+
struct mlx5_ifc_cmd_inter_comp_event_bits {
u8 command_completion_vector[0x20];
@@ -2333,6 +2429,30 @@ struct mlx5_ifc_sqc_bits {
struct mlx5_ifc_wq_bits wq;
};
+enum {
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR = 0x0,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT = 0x1,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC = 0x2,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC = 0x3,
+};
+
+struct mlx5_ifc_scheduling_context_bits {
+ u8 element_type[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 element_attributes[0x20];
+
+ u8 parent_element_id[0x20];
+
+ u8 reserved_at_60[0x40];
+
+ u8 bw_share[0x20];
+
+ u8 max_average_bw[0x20];
+
+ u8 reserved_at_e0[0x120];
+};
+
struct mlx5_ifc_rqtc_bits {
u8 reserved_at_0[0xa0];
@@ -2844,7 +2964,7 @@ struct mlx5_ifc_xrqc_bits {
struct mlx5_ifc_tag_matching_topology_context_bits tag_matching_topology_context;
- u8 reserved_at_180[0x200];
+ u8 reserved_at_180[0x880];
struct mlx5_ifc_wq_bits wq;
};
@@ -2875,6 +2995,12 @@ union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits {
u8 reserved_at_0[0x7c0];
};
+union mlx5_ifc_pcie_cntrs_grp_data_layout_auto_bits {
+ struct mlx5_ifc_pcie_perf_cntrs_grp_data_layout_bits pcie_perf_cntrs_grp_data_layout;
+ struct mlx5_ifc_pcie_tas_cntrs_grp_data_layout_bits pcie_tas_cntrs_grp_data_layout;
+ u8 reserved_at_0[0x7c0];
+};
+
union mlx5_ifc_event_auto_bits {
struct mlx5_ifc_comp_event_bits comp_event;
struct mlx5_ifc_dct_events_bits dct_events;
@@ -2920,6 +3046,29 @@ struct mlx5_ifc_register_loopback_control_bits {
u8 reserved_at_20[0x60];
};
+struct mlx5_ifc_vport_tc_element_bits {
+ u8 traffic_class[0x4];
+ u8 reserved_at_4[0xc];
+ u8 vport_number[0x10];
+};
+
+struct mlx5_ifc_vport_element_bits {
+ u8 reserved_at_0[0x10];
+ u8 vport_number[0x10];
+};
+
+enum {
+ TSAR_ELEMENT_TSAR_TYPE_DWRR = 0x0,
+ TSAR_ELEMENT_TSAR_TYPE_ROUND_ROBIN = 0x1,
+ TSAR_ELEMENT_TSAR_TYPE_ETS = 0x2,
+};
+
+struct mlx5_ifc_tsar_element_bits {
+ u8 reserved_at_0[0x8];
+ u8 tsar_type[0x8];
+ u8 reserved_at_10[0x10];
+};
+
struct mlx5_ifc_teardown_hca_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -3540,6 +3689,39 @@ struct mlx5_ifc_query_special_contexts_in_bits {
u8 reserved_at_40[0x40];
};
+struct mlx5_ifc_query_scheduling_element_out_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0xc0];
+
+ struct mlx5_ifc_scheduling_context_bits scheduling_context;
+
+ u8 reserved_at_300[0x100];
+};
+
+enum {
+ SCHEDULING_HIERARCHY_E_SWITCH = 0x2,
+};
+
+struct mlx5_ifc_query_scheduling_element_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 scheduling_hierarchy[0x8];
+ u8 reserved_at_48[0x18];
+
+ u8 scheduling_element_id[0x20];
+
+ u8 reserved_at_80[0x180];
+};
+
struct mlx5_ifc_query_rqt_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -3904,6 +4086,25 @@ struct mlx5_ifc_query_issi_in_bits {
u8 reserved_at_40[0x40];
};
+struct mlx5_ifc_set_driver_version_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_set_driver_version_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+ u8 driver_version[64][0x8];
+};
+
struct mlx5_ifc_query_hca_vport_pkey_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -4725,6 +4926,43 @@ struct mlx5_ifc_modify_sq_in_bits {
struct mlx5_ifc_sqc_bits ctx;
};
+struct mlx5_ifc_modify_scheduling_element_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x1c0];
+};
+
+enum {
+ MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE = 0x1,
+ MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW = 0x2,
+};
+
+struct mlx5_ifc_modify_scheduling_element_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 scheduling_hierarchy[0x8];
+ u8 reserved_at_48[0x18];
+
+ u8 scheduling_element_id[0x20];
+
+ u8 reserved_at_80[0x20];
+
+ u8 modify_bitmask[0x20];
+
+ u8 reserved_at_c0[0x40];
+
+ struct mlx5_ifc_scheduling_context_bits scheduling_context;
+
+ u8 reserved_at_300[0x100];
+};
+
struct mlx5_ifc_modify_rqt_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -5390,6 +5628,30 @@ struct mlx5_ifc_destroy_sq_in_bits {
u8 reserved_at_60[0x20];
};
+struct mlx5_ifc_destroy_scheduling_element_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x1c0];
+};
+
+struct mlx5_ifc_destroy_scheduling_element_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 scheduling_hierarchy[0x8];
+ u8 reserved_at_48[0x18];
+
+ u8 scheduling_element_id[0x20];
+
+ u8 reserved_at_80[0x180];
+};
+
struct mlx5_ifc_destroy_rqt_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -6017,6 +6279,36 @@ struct mlx5_ifc_create_sq_in_bits {
struct mlx5_ifc_sqc_bits ctx;
};
+struct mlx5_ifc_create_scheduling_element_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+
+ u8 scheduling_element_id[0x20];
+
+ u8 reserved_at_a0[0x160];
+};
+
+struct mlx5_ifc_create_scheduling_element_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 scheduling_hierarchy[0x8];
+ u8 reserved_at_48[0x18];
+
+ u8 reserved_at_60[0xa0];
+
+ struct mlx5_ifc_scheduling_context_bits scheduling_context;
+
+ u8 reserved_at_300[0x100];
+};
+
struct mlx5_ifc_create_rqt_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -7028,6 +7320,18 @@ struct mlx5_ifc_ppcnt_reg_bits {
union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits counter_set;
};
+struct mlx5_ifc_mpcnt_reg_bits {
+ u8 reserved_at_0[0x8];
+ u8 pcie_index[0x8];
+ u8 reserved_at_10[0xa];
+ u8 grp[0x6];
+
+ u8 clr[0x1];
+ u8 reserved_at_21[0x1f];
+
+ union mlx5_ifc_pcie_cntrs_grp_data_layout_auto_bits counter_set;
+};
+
struct mlx5_ifc_ppad_reg_bits {
u8 reserved_at_0[0x3];
u8 single_mac[0x1];
@@ -7633,6 +7937,7 @@ union mlx5_ifc_ports_control_registers_document_bits {
struct mlx5_ifc_pmtu_reg_bits pmtu_reg;
struct mlx5_ifc_ppad_reg_bits ppad_reg;
struct mlx5_ifc_ppcnt_reg_bits ppcnt_reg;
+ struct mlx5_ifc_mpcnt_reg_bits mpcnt_reg;
struct mlx5_ifc_pplm_reg_bits pplm_reg;
struct mlx5_ifc_pplr_reg_bits pplr_reg;
struct mlx5_ifc_ppsc_reg_bits ppsc_reg;
diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h
index b3065acd20b4..e527732fb31b 100644
--- a/include/linux/mlx5/port.h
+++ b/include/linux/mlx5/port.h
@@ -94,6 +94,9 @@ enum mlx5e_link_mode {
#define MLX5E_PROT_MASK(link_mode) (1 << link_mode)
+#define PORT_MODULE_EVENT_MODULE_STATUS_MASK 0xF
+#define PORT_MODULE_EVENT_ERROR_TYPE_MASK 0xF
+
int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps);
int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
int ptys_size, int proto_mask, u8 local_port);
@@ -138,8 +141,12 @@ int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx,
int mlx5_max_tc(struct mlx5_core_dev *mdev);
int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc);
+int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev,
+ u8 prio, u8 *tc);
int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group);
int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw);
+int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev,
+ u8 tc, u8 *bw_pct);
int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev,
u8 *max_bw_value,
u8 *max_bw_unit);
@@ -155,4 +162,6 @@ void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported,
int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
u16 offset, u16 size, u8 *data);
+int mlx5_query_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *out);
+int mlx5_set_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *in);
#endif /* __MLX5_PORT_H__ */
diff --git a/include/linux/mlx5/srq.h b/include/linux/mlx5/srq.h
index 33c97dc900f8..1cde0fd53f90 100644
--- a/include/linux/mlx5/srq.h
+++ b/include/linux/mlx5/srq.h
@@ -55,7 +55,7 @@ struct mlx5_srq_attr {
u32 lwm;
u32 user_index;
u64 db_record;
- u64 *pas;
+ __be64 *pas;
};
struct mlx5_core_dev;
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index 451b0bde9083..ec35157ea725 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -36,6 +36,12 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/device.h>
+enum {
+ MLX5_CAP_INLINE_MODE_L2,
+ MLX5_CAP_INLINE_MODE_VPORT_CONTEXT,
+ MLX5_CAP_INLINE_MODE_NOT_REQUIRED,
+};
+
u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport);
u8 mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
u16 vport);
@@ -43,8 +49,8 @@ int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
u16 vport, u8 state);
int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
u16 vport, u8 *addr);
-void mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
- u8 *min_inline);
+int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
+ u16 vport, u8 *min_inline);
int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
u16 vport, u8 min_inline);
int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev,
diff --git a/include/linux/mm.h b/include/linux/mm.h
index e9caec6a51e9..a92c8d73aeaf 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1266,29 +1266,25 @@ static inline int fixup_user_fault(struct task_struct *tsk,
}
#endif
-extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
+extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len,
+ unsigned int gup_flags);
extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
- void *buf, int len, int write);
+ void *buf, int len, unsigned int gup_flags);
-long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, unsigned long nr_pages,
- unsigned int foll_flags, struct page **pages,
- struct vm_area_struct **vmas, int *nonblocking);
long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages,
+ unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas);
long get_user_pages(unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages,
+ unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas);
long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages, int *locked);
+ unsigned int gup_flags, struct page **pages, int *locked);
long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages,
- unsigned int gup_flags);
+ struct page **pages, unsigned int gup_flags);
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages);
+ struct page **pages, unsigned int gup_flags);
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages);
@@ -1306,7 +1302,7 @@ struct frame_vector {
struct frame_vector *frame_vector_create(unsigned int nr_frames);
void frame_vector_destroy(struct frame_vector *vec);
int get_vaddr_frames(unsigned long start, unsigned int nr_pfns,
- bool write, bool force, struct frame_vector *vec);
+ unsigned int gup_flags, struct frame_vector *vec);
void put_vaddr_frames(struct frame_vector *vec);
int frame_vector_to_pages(struct frame_vector *vec);
void frame_vector_to_pfns(struct frame_vector *vec);
@@ -1391,7 +1387,7 @@ static inline int stack_guard_page_end(struct vm_area_struct *vma,
!vma_growsup(vma->vm_next, addr);
}
-int vma_is_stack_for_task(struct vm_area_struct *vma, struct task_struct *t);
+int vma_is_stack_for_current(struct vm_area_struct *vma);
extern unsigned long move_page_tables(struct vm_area_struct *vma,
unsigned long old_addr, struct vm_area_struct *new_vma,
@@ -2232,6 +2228,7 @@ static inline struct page *follow_page(struct vm_area_struct *vma,
#define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */
#define FOLL_MLOCK 0x1000 /* lock present pages */
#define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */
+#define FOLL_COW 0x4000 /* internal GUP flag */
typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
void *data);
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 73fad83acbcb..95d69d498296 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -89,6 +89,8 @@ struct mmc_ext_csd {
unsigned int boot_ro_lock; /* ro lock support */
bool boot_ro_lockable;
bool ffu_capable; /* Firmware upgrade support */
+ bool cmdq_support; /* Command Queue supported */
+ unsigned int cmdq_depth; /* Command Queue depth */
#define MMC_FIRMWARE_LEN 8
u8 fwrev[MMC_FIRMWARE_LEN]; /* FW version */
u8 raw_exception_status; /* 54 */
@@ -207,18 +209,6 @@ struct sdio_func_tuple;
#define SDIO_MAX_FUNCS 7
-enum mmc_blk_status {
- MMC_BLK_SUCCESS = 0,
- MMC_BLK_PARTIAL,
- MMC_BLK_CMD_ERR,
- MMC_BLK_RETRY,
- MMC_BLK_ABORT,
- MMC_BLK_DATA_ERR,
- MMC_BLK_ECC_ERR,
- MMC_BLK_NOMEDIUM,
- MMC_BLK_NEW_REQUEST,
-};
-
/* The number of MMC physical partitions. These consist of:
* boot partitions (2), general purpose partitions (4) and
* RPMB partition (1) in MMC v4.4.
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index 2b953eb8ceae..e33cc748dcfe 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -15,6 +15,18 @@ struct request;
struct mmc_data;
struct mmc_request;
+enum mmc_blk_status {
+ MMC_BLK_SUCCESS = 0,
+ MMC_BLK_PARTIAL,
+ MMC_BLK_CMD_ERR,
+ MMC_BLK_RETRY,
+ MMC_BLK_ABORT,
+ MMC_BLK_DATA_ERR,
+ MMC_BLK_ECC_ERR,
+ MMC_BLK_NOMEDIUM,
+ MMC_BLK_NEW_REQUEST,
+};
+
struct mmc_command {
u32 opcode;
u32 arg;
@@ -150,7 +162,8 @@ struct mmc_async_req;
extern int mmc_stop_bkops(struct mmc_card *);
extern int mmc_read_bkops_status(struct mmc_card *);
extern struct mmc_async_req *mmc_start_req(struct mmc_host *,
- struct mmc_async_req *, int *);
+ struct mmc_async_req *,
+ enum mmc_blk_status *);
extern int mmc_interrupt_hpi(struct mmc_card *);
extern void mmc_wait_for_req(struct mmc_host *, struct mmc_request *);
extern void mmc_wait_for_req_done(struct mmc_host *host,
@@ -163,6 +176,7 @@ extern int mmc_wait_for_app_cmd(struct mmc_host *, struct mmc_card *,
extern void mmc_start_bkops(struct mmc_card *card, bool from_exception);
extern int mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int);
extern int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error);
+extern int mmc_abort_tuning(struct mmc_host *host, u32 opcode);
extern int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd);
#define MMC_ERASE_ARG 0x00000000
diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h
index f5af2bd35e7f..15db6f83f53f 100644
--- a/include/linux/mmc/dw_mmc.h
+++ b/include/linux/mmc/dw_mmc.h
@@ -39,6 +39,12 @@ enum {
EVENT_DATA_ERROR,
};
+enum dw_mci_cookie {
+ COOKIE_UNMAPPED,
+ COOKIE_PRE_MAPPED, /* mapped by pre_req() of dwmmc */
+ COOKIE_MAPPED, /* mapped by prepare_data() of dwmmc */
+};
+
struct mmc_data;
enum {
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 0b2439441cc8..8bc884121465 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -93,8 +93,7 @@ struct mmc_host_ops {
*/
void (*post_req)(struct mmc_host *host, struct mmc_request *req,
int err);
- void (*pre_req)(struct mmc_host *host, struct mmc_request *req,
- bool is_first_req);
+ void (*pre_req)(struct mmc_host *host, struct mmc_request *req);
void (*request)(struct mmc_host *host, struct mmc_request *req);
/*
@@ -173,7 +172,7 @@ struct mmc_async_req {
* Check error status of completed mmc request.
* Returns 0 if success otherwise non zero.
*/
- int (*err_check) (struct mmc_card *, struct mmc_async_req *);
+ enum mmc_blk_status (*err_check)(struct mmc_card *, struct mmc_async_req *);
};
/**
@@ -198,14 +197,12 @@ struct mmc_slot {
* @is_new_req wake up reason was new request
* @is_waiting_last_req mmc context waiting for single running request
* @wait wait queue
- * @lock lock to protect data fields
*/
struct mmc_context_info {
bool is_done_rcv;
bool is_new_req;
bool is_waiting_last_req;
wait_queue_head_t wait;
- spinlock_t lock;
};
struct regulator;
@@ -495,11 +492,6 @@ static inline int mmc_host_uhs(struct mmc_host *host)
MMC_CAP_UHS_DDR50);
}
-static inline int mmc_host_packed_wr(struct mmc_host *host)
-{
- return host->caps2 & MMC_CAP2_PACKED_WR;
-}
-
static inline int mmc_card_hs(struct mmc_card *card)
{
return card->host->ios.timing == MMC_TIMING_SD_HS ||
@@ -546,6 +538,11 @@ static inline void mmc_retune_recheck(struct mmc_host *host)
host->retune_now = 1;
}
+static inline bool mmc_can_retune(struct mmc_host *host)
+{
+ return host->can_retune == 1;
+}
+
void mmc_retune_pause(struct mmc_host *host);
void mmc_retune_unpause(struct mmc_host *host);
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index c376209c70ef..672730acc705 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -84,6 +84,13 @@
#define MMC_APP_CMD 55 /* ac [31:16] RCA R1 */
#define MMC_GEN_CMD 56 /* adtc [0] RD/WR R1 */
+ /* class 11 */
+#define MMC_QUE_TASK_PARAMS 44 /* ac [20:16] task id R1 */
+#define MMC_QUE_TASK_ADDR 45 /* ac [31:0] data addr R1 */
+#define MMC_EXECUTE_READ_TASK 46 /* adtc [20:16] task id R1 */
+#define MMC_EXECUTE_WRITE_TASK 47 /* adtc [20:16] task id R1 */
+#define MMC_CMDQ_TASK_MGMT 48 /* ac [20:16] task id R1b */
+
static inline bool mmc_op_multi(u32 opcode)
{
return opcode == MMC_WRITE_MULTIPLE_BLOCK ||
@@ -272,6 +279,7 @@ struct _mmc_csd {
* EXT_CSD fields
*/
+#define EXT_CSD_CMDQ_MODE_EN 15 /* R/W */
#define EXT_CSD_FLUSH_CACHE 32 /* W */
#define EXT_CSD_CACHE_CTRL 33 /* R/W */
#define EXT_CSD_POWER_OFF_NOTIFICATION 34 /* R/W */
@@ -331,6 +339,8 @@ struct _mmc_csd {
#define EXT_CSD_CACHE_SIZE 249 /* RO, 4 bytes */
#define EXT_CSD_PWR_CL_DDR_200_360 253 /* RO */
#define EXT_CSD_FIRMWARE_VERSION 254 /* RO, 8 bytes */
+#define EXT_CSD_CMDQ_DEPTH 307 /* RO */
+#define EXT_CSD_CMDQ_SUPPORT 308 /* RO */
#define EXT_CSD_SUPPORTED_MODE 493 /* RO */
#define EXT_CSD_TAG_UNIT_SIZE 498 /* RO */
#define EXT_CSD_DATA_TAG_SUPPORT 499 /* RO */
@@ -438,6 +448,13 @@ struct _mmc_csd {
#define EXT_CSD_MANUAL_BKOPS_MASK 0x01
/*
+ * Command Queue
+ */
+#define EXT_CSD_CMDQ_MODE_ENABLED BIT(0)
+#define EXT_CSD_CMDQ_DEPTH_MASK GENMASK(4, 0)
+#define EXT_CSD_CMDQ_SUPPORTED BIT(0)
+
+/*
* MMC_SWITCH access modes
*/
diff --git a/include/linux/mmc/slot-gpio.h b/include/linux/mmc/slot-gpio.h
index 3945a8c9d3cb..a7972cd3bc14 100644
--- a/include/linux/mmc/slot-gpio.h
+++ b/include/linux/mmc/slot-gpio.h
@@ -29,5 +29,6 @@ int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
void mmc_gpio_set_cd_isr(struct mmc_host *host,
irqreturn_t (*isr)(int irq, void *dev_id));
void mmc_gpiod_request_cd_irq(struct mmc_host *host);
+bool mmc_can_gpio_cd(struct mmc_host *host);
#endif
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 7f2ae99e5daf..0f088f3a2fed 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -440,33 +440,7 @@ struct zone {
seqlock_t span_seqlock;
#endif
- /*
- * wait_table -- the array holding the hash table
- * wait_table_hash_nr_entries -- the size of the hash table array
- * wait_table_bits -- wait_table_size == (1 << wait_table_bits)
- *
- * The purpose of all these is to keep track of the people
- * waiting for a page to become available and make them
- * runnable again when possible. The trouble is that this
- * consumes a lot of space, especially when so few things
- * wait on pages at a given time. So instead of using
- * per-page waitqueues, we use a waitqueue hash table.
- *
- * The bucket discipline is to sleep on the same queue when
- * colliding and wake all in that wait queue when removing.
- * When something wakes, it must check to be sure its page is
- * truly available, a la thundering herd. The cost of a
- * collision is great, but given the expected load of the
- * table, they should be so rare as to be outweighed by the
- * benefits from the saved space.
- *
- * __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the
- * primary users of these fields, and in mm/page_alloc.c
- * free_area_init_core() performs the initialization of them.
- */
- wait_queue_head_t *wait_table;
- unsigned long wait_table_hash_nr_entries;
- unsigned long wait_table_bits;
+ int initialized;
/* Write-intensive fields used from the page allocator */
ZONE_PADDING(_pad1_)
@@ -546,7 +520,7 @@ static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
static inline bool zone_is_initialized(struct zone *zone)
{
- return !!zone->wait_table;
+ return zone->initialized;
}
static inline bool zone_is_empty(struct zone *zone)
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index c5d3d5024fc8..d8905a229f34 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -1184,7 +1184,7 @@ int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
int page);
/* Reset and initialize a NAND device */
-int nand_reset(struct nand_chip *chip);
+int nand_reset(struct nand_chip *chip, int chipnr);
/* Free resources held by the NAND device */
void nand_cleanup(struct nand_chip *chip);
diff --git a/include/linux/mutex-debug.h b/include/linux/mutex-debug.h
deleted file mode 100644
index 4ac8b1977b73..000000000000
--- a/include/linux/mutex-debug.h
+++ /dev/null
@@ -1,24 +0,0 @@
-#ifndef __LINUX_MUTEX_DEBUG_H
-#define __LINUX_MUTEX_DEBUG_H
-
-#include <linux/linkage.h>
-#include <linux/lockdep.h>
-#include <linux/debug_locks.h>
-
-/*
- * Mutexes - debugging helpers:
- */
-
-#define __DEBUG_MUTEX_INITIALIZER(lockname) \
- , .magic = &lockname
-
-#define mutex_init(mutex) \
-do { \
- static struct lock_class_key __key; \
- \
- __mutex_init((mutex), #mutex, &__key); \
-} while (0)
-
-extern void mutex_destroy(struct mutex *lock);
-
-#endif
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 2cb7531e7d7a..b97870f2debd 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -18,6 +18,7 @@
#include <linux/atomic.h>
#include <asm/processor.h>
#include <linux/osq_lock.h>
+#include <linux/debug_locks.h>
/*
* Simple, straightforward mutexes with strict semantics:
@@ -48,16 +49,12 @@
* locks and tasks (and only those tasks)
*/
struct mutex {
- /* 1: unlocked, 0: locked, negative: locked, possible waiters */
- atomic_t count;
+ atomic_long_t owner;
spinlock_t wait_lock;
- struct list_head wait_list;
-#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER)
- struct task_struct *owner;
-#endif
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
struct optimistic_spin_queue osq; /* Spinner MCS lock */
#endif
+ struct list_head wait_list;
#ifdef CONFIG_DEBUG_MUTEXES
void *magic;
#endif
@@ -66,6 +63,11 @@ struct mutex {
#endif
};
+static inline struct task_struct *__mutex_owner(struct mutex *lock)
+{
+ return (struct task_struct *)(atomic_long_read(&lock->owner) & ~0x03);
+}
+
/*
* This is the control structure for tasks blocked on mutex,
* which resides on the blocked task's kernel stack:
@@ -79,9 +81,20 @@ struct mutex_waiter {
};
#ifdef CONFIG_DEBUG_MUTEXES
-# include <linux/mutex-debug.h>
+
+#define __DEBUG_MUTEX_INITIALIZER(lockname) \
+ , .magic = &lockname
+
+extern void mutex_destroy(struct mutex *lock);
+
#else
+
# define __DEBUG_MUTEX_INITIALIZER(lockname)
+
+static inline void mutex_destroy(struct mutex *lock) {}
+
+#endif
+
/**
* mutex_init - initialize the mutex
* @mutex: the mutex to be initialized
@@ -90,14 +103,12 @@ struct mutex_waiter {
*
* It is not allowed to initialize an already locked mutex.
*/
-# define mutex_init(mutex) \
-do { \
- static struct lock_class_key __key; \
- \
- __mutex_init((mutex), #mutex, &__key); \
+#define mutex_init(mutex) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __mutex_init((mutex), #mutex, &__key); \
} while (0)
-static inline void mutex_destroy(struct mutex *lock) {}
-#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
@@ -107,7 +118,7 @@ static inline void mutex_destroy(struct mutex *lock) {}
#endif
#define __MUTEX_INITIALIZER(lockname) \
- { .count = ATOMIC_INIT(1) \
+ { .owner = ATOMIC_LONG_INIT(0) \
, .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
, .wait_list = LIST_HEAD_INIT(lockname.wait_list) \
__DEBUG_MUTEX_INITIALIZER(lockname) \
@@ -127,7 +138,10 @@ extern void __mutex_init(struct mutex *lock, const char *name,
*/
static inline int mutex_is_locked(struct mutex *lock)
{
- return atomic_read(&lock->count) != 1;
+ /*
+ * XXX think about spin_is_locked
+ */
+ return __mutex_owner(lock) != NULL;
}
/*
@@ -175,4 +189,35 @@ extern void mutex_unlock(struct mutex *lock);
extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
+/*
+ * These values are chosen such that FAIL and SUCCESS match the
+ * values of the regular mutex_trylock().
+ */
+enum mutex_trylock_recursive_enum {
+ MUTEX_TRYLOCK_FAILED = 0,
+ MUTEX_TRYLOCK_SUCCESS = 1,
+ MUTEX_TRYLOCK_RECURSIVE,
+};
+
+/**
+ * mutex_trylock_recursive - trylock variant that allows recursive locking
+ * @lock: mutex to be locked
+ *
+ * This function should not be used, _ever_. It is purely for hysterical GEM
+ * raisins, and once those are gone this will be removed.
+ *
+ * Returns:
+ * MUTEX_TRYLOCK_FAILED - trylock failed,
+ * MUTEX_TRYLOCK_SUCCESS - lock acquired,
+ * MUTEX_TRYLOCK_RECURSIVE - we already owned the lock.
+ */
+static inline /* __deprecated */ __must_check enum mutex_trylock_recursive_enum
+mutex_trylock_recursive(struct mutex *lock)
+{
+ if (unlikely(__mutex_owner(lock) == current))
+ return MUTEX_TRYLOCK_RECURSIVE;
+
+ return mutex_trylock(lock);
+}
+
#endif /* __LINUX_MUTEX_H */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 136ae6bbe81e..994f7423a74b 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -192,6 +192,7 @@ struct net_device_stats {
#ifdef CONFIG_RPS
#include <linux/static_key.h>
extern struct static_key rps_needed;
+extern struct static_key rfs_needed;
#endif
struct neighbour;
@@ -316,7 +317,6 @@ struct napi_struct {
unsigned int gro_count;
int (*poll)(struct napi_struct *, int);
#ifdef CONFIG_NETPOLL
- spinlock_t poll_lock;
int poll_owner;
#endif
struct net_device *dev;
@@ -334,6 +334,16 @@ enum {
NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
NAPI_STATE_HASHED, /* In NAPI hash (busy polling possible) */
NAPI_STATE_NO_BUSY_POLL,/* Do not add in napi_hash, no busy polling */
+ NAPI_STATE_IN_BUSY_POLL,/* sk_busy_loop() owns this NAPI */
+};
+
+enum {
+ NAPIF_STATE_SCHED = (1UL << NAPI_STATE_SCHED),
+ NAPIF_STATE_DISABLE = (1UL << NAPI_STATE_DISABLE),
+ NAPIF_STATE_NPSVC = (1UL << NAPI_STATE_NPSVC),
+ NAPIF_STATE_HASHED = (1UL << NAPI_STATE_HASHED),
+ NAPIF_STATE_NO_BUSY_POLL = (1UL << NAPI_STATE_NO_BUSY_POLL),
+ NAPIF_STATE_IN_BUSY_POLL = (1UL << NAPI_STATE_IN_BUSY_POLL),
};
enum gro_result {
@@ -453,32 +463,22 @@ static inline bool napi_reschedule(struct napi_struct *napi)
return false;
}
-void __napi_complete(struct napi_struct *n);
-void napi_complete_done(struct napi_struct *n, int work_done);
+bool __napi_complete(struct napi_struct *n);
+bool napi_complete_done(struct napi_struct *n, int work_done);
/**
* napi_complete - NAPI processing complete
* @n: NAPI context
*
* Mark NAPI processing as complete.
* Consider using napi_complete_done() instead.
+ * Return false if device should avoid rearming interrupts.
*/
-static inline void napi_complete(struct napi_struct *n)
+static inline bool napi_complete(struct napi_struct *n)
{
return napi_complete_done(n, 0);
}
/**
- * napi_hash_add - add a NAPI to global hashtable
- * @napi: NAPI context
- *
- * Generate a new napi_id and store a @napi under it in napi_hash.
- * Used for busy polling (CONFIG_NET_RX_BUSY_POLL).
- * Note: This is normally automatically done from netif_napi_add(),
- * so might disappear in a future Linux version.
- */
-void napi_hash_add(struct napi_struct *napi);
-
-/**
* napi_hash_del - remove a NAPI from global table
* @napi: NAPI context
*
@@ -732,8 +732,8 @@ struct xps_dev_maps {
struct rcu_head rcu;
struct xps_map __rcu *cpu_map[0];
};
-#define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) + \
- (nr_cpu_ids * sizeof(struct xps_map *)))
+#define XPS_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \
+ (nr_cpu_ids * (_tcs) * sizeof(struct xps_map *)))
#endif /* CONFIG_XPS */
#define TC_MAX_QUEUE 16
@@ -803,6 +803,7 @@ struct tc_to_netdev {
struct tc_cls_matchall_offload *cls_mall;
struct tc_cls_bpf_offload *cls_bpf;
};
+ bool egress_dev;
};
/* These structures hold the attributes of xdp state that are being passed
@@ -926,7 +927,7 @@ struct netdev_xdp {
* 3. Update dev->stats asynchronously and atomically, and define
* neither operation.
*
- * bool (*ndo_has_offload_stats)(int attr_id)
+ * bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id)
* Return true if this device supports offload stats of this attr_id.
*
* int (*ndo_get_offload_stats)(int attr_id, const struct net_device *dev,
@@ -1166,7 +1167,7 @@ struct net_device_ops {
struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
struct rtnl_link_stats64 *storage);
- bool (*ndo_has_offload_stats)(int attr_id);
+ bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id);
int (*ndo_get_offload_stats)(int attr_id,
const struct net_device *dev,
void *attr_data);
@@ -1456,7 +1457,6 @@ enum netdev_priv_flags {
* @ptype_specific: Device-specific, protocol-specific packet handlers
*
* @adj_list: Directly linked devices, like slaves for bonding
- * @all_adj_list: All linked devices, *including* neighbours
* @features: Currently active device features
* @hw_features: User-changeable features
*
@@ -1506,6 +1506,8 @@ enum netdev_priv_flags {
* @if_port: Selectable AUI, TP, ...
* @dma: DMA channel
* @mtu: Interface MTU value
+ * @min_mtu: Interface Minimum MTU value
+ * @max_mtu: Interface Maximum MTU value
* @type: Interface hardware type
* @hard_header_len: Maximum hardware header length.
*
@@ -1619,7 +1621,7 @@ enum netdev_priv_flags {
* @dcbnl_ops: Data Center Bridging netlink ops
* @num_tc: Number of traffic classes in the net device
* @tc_to_txq: XXX: need comments on this one
- * @prio_tc_map XXX: need comments on this one
+ * @prio_tc_map: XXX: need comments on this one
*
* @fcoe_ddp_xid: Max exchange id for FCoE LRO by ddp
*
@@ -1673,11 +1675,6 @@ struct net_device {
struct list_head lower;
} adj_list;
- struct {
- struct list_head upper;
- struct list_head lower;
- } all_adj_list;
-
netdev_features_t features;
netdev_features_t hw_features;
netdev_features_t wanted_features;
@@ -1726,6 +1723,8 @@ struct net_device {
unsigned char dma;
unsigned int mtu;
+ unsigned int min_mtu;
+ unsigned int max_mtu;
unsigned short type;
unsigned short hard_header_len;
@@ -1922,34 +1921,10 @@ int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
return 0;
}
-static inline
-void netdev_reset_tc(struct net_device *dev)
-{
- dev->num_tc = 0;
- memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
- memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
-}
-
-static inline
-int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
-{
- if (tc >= dev->num_tc)
- return -EINVAL;
-
- dev->tc_to_txq[tc].count = count;
- dev->tc_to_txq[tc].offset = offset;
- return 0;
-}
-
-static inline
-int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
-{
- if (num_tc > TC_MAX_QUEUE)
- return -EINVAL;
-
- dev->num_tc = num_tc;
- return 0;
-}
+int netdev_txq_to_tc(struct net_device *dev, unsigned int txq);
+void netdev_reset_tc(struct net_device *dev);
+int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset);
+int netdev_set_num_tc(struct net_device *dev, u8 num_tc);
static inline
int netdev_get_num_tc(struct net_device *dev)
@@ -2169,7 +2144,10 @@ struct napi_gro_cb {
/* Used to determine if flush_id can be ignored */
u8 is_atomic:1;
- /* 5 bit hole */
+ /* Number of gro_receive callbacks this packet already went through */
+ u8 recursion_counter:4;
+
+ /* 1 bit hole */
/* used to support CHECKSUM_COMPLETE for tunneling protocols */
__wsum csum;
@@ -2180,6 +2158,40 @@ struct napi_gro_cb {
#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
+#define GRO_RECURSION_LIMIT 15
+static inline int gro_recursion_inc_test(struct sk_buff *skb)
+{
+ return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
+}
+
+typedef struct sk_buff **(*gro_receive_t)(struct sk_buff **, struct sk_buff *);
+static inline struct sk_buff **call_gro_receive(gro_receive_t cb,
+ struct sk_buff **head,
+ struct sk_buff *skb)
+{
+ if (unlikely(gro_recursion_inc_test(skb))) {
+ NAPI_GRO_CB(skb)->flush |= 1;
+ return NULL;
+ }
+
+ return cb(head, skb);
+}
+
+typedef struct sk_buff **(*gro_receive_sk_t)(struct sock *, struct sk_buff **,
+ struct sk_buff *);
+static inline struct sk_buff **call_gro_receive_sk(gro_receive_sk_t cb,
+ struct sock *sk,
+ struct sk_buff **head,
+ struct sk_buff *skb)
+{
+ if (unlikely(gro_recursion_inc_test(skb))) {
+ NAPI_GRO_CB(skb)->flush |= 1;
+ return NULL;
+ }
+
+ return cb(sk, head, skb);
+}
+
struct packet_type {
__be16 type; /* This is really htons(ether_type). */
struct net_device *dev; /* NULL is wildcarded here */
@@ -2649,71 +2661,6 @@ static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
remcsum_unadjust((__sum16 *)ptr, grc->delta);
}
-struct skb_csum_offl_spec {
- __u16 ipv4_okay:1,
- ipv6_okay:1,
- encap_okay:1,
- ip_options_okay:1,
- ext_hdrs_okay:1,
- tcp_okay:1,
- udp_okay:1,
- sctp_okay:1,
- vlan_okay:1,
- no_encapped_ipv6:1,
- no_not_encapped:1;
-};
-
-bool __skb_csum_offload_chk(struct sk_buff *skb,
- const struct skb_csum_offl_spec *spec,
- bool *csum_encapped,
- bool csum_help);
-
-static inline bool skb_csum_offload_chk(struct sk_buff *skb,
- const struct skb_csum_offl_spec *spec,
- bool *csum_encapped,
- bool csum_help)
-{
- if (skb->ip_summed != CHECKSUM_PARTIAL)
- return false;
-
- return __skb_csum_offload_chk(skb, spec, csum_encapped, csum_help);
-}
-
-static inline bool skb_csum_offload_chk_help(struct sk_buff *skb,
- const struct skb_csum_offl_spec *spec)
-{
- bool csum_encapped;
-
- return skb_csum_offload_chk(skb, spec, &csum_encapped, true);
-}
-
-static inline bool skb_csum_off_chk_help_cmn(struct sk_buff *skb)
-{
- static const struct skb_csum_offl_spec csum_offl_spec = {
- .ipv4_okay = 1,
- .ip_options_okay = 1,
- .ipv6_okay = 1,
- .vlan_okay = 1,
- .tcp_okay = 1,
- .udp_okay = 1,
- };
-
- return skb_csum_offload_chk_help(skb, &csum_offl_spec);
-}
-
-static inline bool skb_csum_off_chk_help_cmn_v4_only(struct sk_buff *skb)
-{
- static const struct skb_csum_offl_spec csum_offl_spec = {
- .ipv4_okay = 1,
- .ip_options_okay = 1,
- .tcp_okay = 1,
- .udp_okay = 1,
- .vlan_okay = 1,
- };
-
- return skb_csum_offload_chk_help(skb, &csum_offl_spec);
-}
-
static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type,
const void *daddr, const void *saddr,
@@ -3308,7 +3255,7 @@ int dev_get_phys_port_id(struct net_device *dev,
int dev_get_phys_port_name(struct net_device *dev,
char *name, size_t len);
int dev_change_proto_down(struct net_device *dev, bool proto_down);
-int dev_change_xdp_fd(struct net_device *dev, int fd);
+int dev_change_xdp_fd(struct net_device *dev, int fd, u32 flags);
struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev);
struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq, int *ret);
@@ -3317,6 +3264,21 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
bool is_skb_forwardable(const struct net_device *dev,
const struct sk_buff *skb);
+static __always_inline int ____dev_forward_skb(struct net_device *dev,
+ struct sk_buff *skb)
+{
+ if (skb_orphan_frags(skb, GFP_ATOMIC) ||
+ unlikely(!is_skb_forwardable(dev, skb))) {
+ atomic_long_inc(&dev->rx_dropped);
+ kfree_skb(skb);
+ return NET_RX_DROP;
+ }
+
+ skb_scrub_packet(skb, true);
+ skb->priority = 0;
+ return 0;
+}
+
void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
extern int netdev_budget;
@@ -3502,6 +3464,17 @@ static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
txq->xmit_lock_owner = cpu;
}
+static inline bool __netif_tx_acquire(struct netdev_queue *txq)
+{
+ __acquire(&txq->_xmit_lock);
+ return true;
+}
+
+static inline void __netif_tx_release(struct netdev_queue *txq)
+{
+ __release(&txq->_xmit_lock);
+}
+
static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
{
spin_lock_bh(&txq->_xmit_lock);
@@ -3603,17 +3576,21 @@ static inline void netif_tx_unlock_bh(struct net_device *dev)
#define HARD_TX_LOCK(dev, txq, cpu) { \
if ((dev->features & NETIF_F_LLTX) == 0) { \
__netif_tx_lock(txq, cpu); \
+ } else { \
+ __netif_tx_acquire(txq); \
} \
}
#define HARD_TX_TRYLOCK(dev, txq) \
(((dev->features & NETIF_F_LLTX) == 0) ? \
__netif_tx_trylock(txq) : \
- true )
+ __netif_tx_acquire(txq))
#define HARD_TX_UNLOCK(dev, txq) { \
if ((dev->features & NETIF_F_LLTX) == 0) { \
__netif_tx_unlock(txq); \
+ } else { \
+ __netif_tx_release(txq); \
} \
}
@@ -3832,12 +3809,13 @@ struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
updev; \
updev = netdev_upper_get_next_dev_rcu(dev, &(iter)))
-/* iterate through upper list, must be called under RCU read lock */
-#define netdev_for_each_all_upper_dev_rcu(dev, updev, iter) \
- for (iter = &(dev)->all_adj_list.upper, \
- updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)); \
- updev; \
- updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)))
+int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
+ int (*fn)(struct net_device *upper_dev,
+ void *data),
+ void *data);
+
+bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
+ struct net_device *upper_dev);
void *netdev_lower_get_next_private(struct net_device *dev,
struct list_head **iter);
@@ -3870,17 +3848,14 @@ struct net_device *netdev_all_lower_get_next(struct net_device *dev,
struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev,
struct list_head **iter);
-#define netdev_for_each_all_lower_dev(dev, ldev, iter) \
- for (iter = (dev)->all_adj_list.lower.next, \
- ldev = netdev_all_lower_get_next(dev, &(iter)); \
- ldev; \
- ldev = netdev_all_lower_get_next(dev, &(iter)))
-
-#define netdev_for_each_all_lower_dev_rcu(dev, ldev, iter) \
- for (iter = (dev)->all_adj_list.lower.next, \
- ldev = netdev_all_lower_get_next_rcu(dev, &(iter)); \
- ldev; \
- ldev = netdev_all_lower_get_next_rcu(dev, &(iter)))
+int netdev_walk_all_lower_dev(struct net_device *dev,
+ int (*fn)(struct net_device *lower_dev,
+ void *data),
+ void *data);
+int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
+ int (*fn)(struct net_device *lower_dev,
+ void *data),
+ void *data);
void *netdev_adjacent_get_private(struct list_head *adj_list);
void *netdev_lower_get_first_private_rcu(struct net_device *dev);
@@ -3957,19 +3932,6 @@ static inline bool can_checksum_protocol(netdev_features_t features,
}
}
-/* Map an ethertype into IP protocol if possible */
-static inline int eproto_to_ipproto(int eproto)
-{
- switch (eproto) {
- case htons(ETH_P_IP):
- return IPPROTO_IP;
- case htons(ETH_P_IPV6):
- return IPPROTO_IPV6;
- default:
- return -1;
- }
-}
-
#ifdef CONFIG_BUG
void netdev_rx_csum_fault(struct net_device *dev);
#else
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index abc7fdcb9eb1..a4b97be30b28 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -49,13 +49,11 @@ struct sock;
struct nf_hook_state {
unsigned int hook;
- int thresh;
u_int8_t pf;
struct net_device *in;
struct net_device *out;
struct sock *sk;
struct net *net;
- struct nf_hook_entry __rcu *hook_entries;
int (*okfn)(struct net *, struct sock *, struct sk_buff *);
};
@@ -77,14 +75,42 @@ struct nf_hook_ops {
struct nf_hook_entry {
struct nf_hook_entry __rcu *next;
- struct nf_hook_ops ops;
+ nf_hookfn *hook;
+ void *priv;
const struct nf_hook_ops *orig_ops;
};
+static inline void
+nf_hook_entry_init(struct nf_hook_entry *entry, const struct nf_hook_ops *ops)
+{
+ entry->next = NULL;
+ entry->hook = ops->hook;
+ entry->priv = ops->priv;
+ entry->orig_ops = ops;
+}
+
+static inline int
+nf_hook_entry_priority(const struct nf_hook_entry *entry)
+{
+ return entry->orig_ops->priority;
+}
+
+static inline int
+nf_hook_entry_hookfn(const struct nf_hook_entry *entry, struct sk_buff *skb,
+ struct nf_hook_state *state)
+{
+ return entry->hook(entry->priv, skb, state);
+}
+
+static inline const struct nf_hook_ops *
+nf_hook_entry_ops(const struct nf_hook_entry *entry)
+{
+ return entry->orig_ops;
+}
+
static inline void nf_hook_state_init(struct nf_hook_state *p,
- struct nf_hook_entry *hook_entry,
unsigned int hook,
- int thresh, u_int8_t pf,
+ u_int8_t pf,
struct net_device *indev,
struct net_device *outdev,
struct sock *sk,
@@ -92,13 +118,11 @@ static inline void nf_hook_state_init(struct nf_hook_state *p,
int (*okfn)(struct net *, struct sock *, struct sk_buff *))
{
p->hook = hook;
- p->thresh = thresh;
p->pf = pf;
p->in = indev;
p->out = outdev;
p->sk = sk;
p->net = net;
- RCU_INIT_POINTER(p->hook_entries, hook_entry);
p->okfn = okfn;
}
@@ -152,23 +176,20 @@ void nf_unregister_sockopt(struct nf_sockopt_ops *reg);
extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
#endif
-int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state);
+int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state,
+ struct nf_hook_entry *entry);
/**
- * nf_hook_thresh - call a netfilter hook
+ * nf_hook - call a netfilter hook
*
* Returns 1 if the hook has allowed the packet to pass. The function
* okfn must be invoked by the caller in this case. Any other return
* value indicates the packet has been consumed by the hook.
*/
-static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
- struct net *net,
- struct sock *sk,
- struct sk_buff *skb,
- struct net_device *indev,
- struct net_device *outdev,
- int (*okfn)(struct net *, struct sock *, struct sk_buff *),
- int thresh)
+static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
+ struct sock *sk, struct sk_buff *skb,
+ struct net_device *indev, struct net_device *outdev,
+ int (*okfn)(struct net *, struct sock *, struct sk_buff *))
{
struct nf_hook_entry *hook_head;
int ret = 1;
@@ -185,24 +206,16 @@ static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
if (hook_head) {
struct nf_hook_state state;
- nf_hook_state_init(&state, hook_head, hook, thresh,
- pf, indev, outdev, sk, net, okfn);
+ nf_hook_state_init(&state, hook, pf, indev, outdev,
+ sk, net, okfn);
- ret = nf_hook_slow(skb, &state);
+ ret = nf_hook_slow(skb, &state, hook_head);
}
rcu_read_unlock();
return ret;
}
-static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
- struct sock *sk, struct sk_buff *skb,
- struct net_device *indev, struct net_device *outdev,
- int (*okfn)(struct net *, struct sock *, struct sk_buff *))
-{
- return nf_hook_thresh(pf, hook, net, sk, skb, indev, outdev, okfn, INT_MIN);
-}
-
/* Activate hook; either okfn or kfree_skb called, unless a hook
returns NF_STOLEN (in which case, it's up to the hook to deal with
the consequences).
@@ -221,19 +234,6 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
*/
static inline int
-NF_HOOK_THRESH(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
- struct sk_buff *skb, struct net_device *in,
- struct net_device *out,
- int (*okfn)(struct net *, struct sock *, struct sk_buff *),
- int thresh)
-{
- int ret = nf_hook_thresh(pf, hook, net, sk, skb, in, out, okfn, thresh);
- if (ret == 1)
- ret = okfn(net, sk, skb);
- return ret;
-}
-
-static inline int
NF_HOOK_COND(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
struct sk_buff *skb, struct net_device *in, struct net_device *out,
int (*okfn)(struct net *, struct sock *, struct sk_buff *),
@@ -242,7 +242,7 @@ NF_HOOK_COND(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
int ret;
if (!cond ||
- ((ret = nf_hook_thresh(pf, hook, net, sk, skb, in, out, okfn, INT_MIN)) == 1))
+ ((ret = nf_hook(pf, hook, net, sk, skb, in, out, okfn)) == 1))
ret = okfn(net, sk, skb);
return ret;
}
@@ -252,7 +252,10 @@ NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct
struct net_device *in, struct net_device *out,
int (*okfn)(struct net *, struct sock *, struct sk_buff *))
{
- return NF_HOOK_THRESH(pf, hook, net, sk, skb, in, out, okfn, INT_MIN);
+ int ret = nf_hook(pf, hook, net, sk, skb, in, out, okfn);
+ if (ret == 1)
+ ret = okfn(net, sk, skb);
+ return ret;
}
/* Call setsockopt() */
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index 83b9a2e0d8d4..8e42253e5d4d 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -79,10 +79,12 @@ enum ip_set_ext_id {
IPSET_EXT_ID_MAX,
};
+struct ip_set;
+
/* Extension type */
struct ip_set_ext_type {
/* Destroy extension private data (can be NULL) */
- void (*destroy)(void *ext);
+ void (*destroy)(struct ip_set *set, void *ext);
enum ip_set_extension type;
enum ipset_cadt_flags flag;
/* Size and minimal alignment */
@@ -92,17 +94,6 @@ struct ip_set_ext_type {
extern const struct ip_set_ext_type ip_set_extensions[];
-struct ip_set_ext {
- u64 packets;
- u64 bytes;
- u32 timeout;
- u32 skbmark;
- u32 skbmarkmask;
- u32 skbprio;
- u16 skbqueue;
- char *comment;
-};
-
struct ip_set_counter {
atomic64_t bytes;
atomic64_t packets;
@@ -122,6 +113,15 @@ struct ip_set_skbinfo {
u32 skbmarkmask;
u32 skbprio;
u16 skbqueue;
+ u16 __pad;
+};
+
+struct ip_set_ext {
+ struct ip_set_skbinfo skbinfo;
+ u64 packets;
+ u64 bytes;
+ char *comment;
+ u32 timeout;
};
struct ip_set;
@@ -252,6 +252,10 @@ struct ip_set {
u8 flags;
/* Default timeout value, if enabled */
u32 timeout;
+ /* Number of elements (vs timeout) */
+ u32 elements;
+ /* Size of the dynamic extensions (vs timeout) */
+ size_t ext_size;
/* Element data size */
size_t dsize;
/* Offsets to extensions in elements */
@@ -268,7 +272,7 @@ ip_set_ext_destroy(struct ip_set *set, void *data)
*/
if (SET_WITH_COMMENT(set))
ip_set_extensions[IPSET_EXT_ID_COMMENT].destroy(
- ext_comment(data, set));
+ set, ext_comment(data, set));
}
static inline int
@@ -294,104 +298,6 @@ ip_set_put_flags(struct sk_buff *skb, struct ip_set *set)
return nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(cadt_flags));
}
-static inline void
-ip_set_add_bytes(u64 bytes, struct ip_set_counter *counter)
-{
- atomic64_add((long long)bytes, &(counter)->bytes);
-}
-
-static inline void
-ip_set_add_packets(u64 packets, struct ip_set_counter *counter)
-{
- atomic64_add((long long)packets, &(counter)->packets);
-}
-
-static inline u64
-ip_set_get_bytes(const struct ip_set_counter *counter)
-{
- return (u64)atomic64_read(&(counter)->bytes);
-}
-
-static inline u64
-ip_set_get_packets(const struct ip_set_counter *counter)
-{
- return (u64)atomic64_read(&(counter)->packets);
-}
-
-static inline void
-ip_set_update_counter(struct ip_set_counter *counter,
- const struct ip_set_ext *ext,
- struct ip_set_ext *mext, u32 flags)
-{
- if (ext->packets != ULLONG_MAX &&
- !(flags & IPSET_FLAG_SKIP_COUNTER_UPDATE)) {
- ip_set_add_bytes(ext->bytes, counter);
- ip_set_add_packets(ext->packets, counter);
- }
- if (flags & IPSET_FLAG_MATCH_COUNTERS) {
- mext->packets = ip_set_get_packets(counter);
- mext->bytes = ip_set_get_bytes(counter);
- }
-}
-
-static inline void
-ip_set_get_skbinfo(struct ip_set_skbinfo *skbinfo,
- const struct ip_set_ext *ext,
- struct ip_set_ext *mext, u32 flags)
-{
- mext->skbmark = skbinfo->skbmark;
- mext->skbmarkmask = skbinfo->skbmarkmask;
- mext->skbprio = skbinfo->skbprio;
- mext->skbqueue = skbinfo->skbqueue;
-}
-static inline bool
-ip_set_put_skbinfo(struct sk_buff *skb, struct ip_set_skbinfo *skbinfo)
-{
- /* Send nonzero parameters only */
- return ((skbinfo->skbmark || skbinfo->skbmarkmask) &&
- nla_put_net64(skb, IPSET_ATTR_SKBMARK,
- cpu_to_be64((u64)skbinfo->skbmark << 32 |
- skbinfo->skbmarkmask),
- IPSET_ATTR_PAD)) ||
- (skbinfo->skbprio &&
- nla_put_net32(skb, IPSET_ATTR_SKBPRIO,
- cpu_to_be32(skbinfo->skbprio))) ||
- (skbinfo->skbqueue &&
- nla_put_net16(skb, IPSET_ATTR_SKBQUEUE,
- cpu_to_be16(skbinfo->skbqueue)));
-}
-
-static inline void
-ip_set_init_skbinfo(struct ip_set_skbinfo *skbinfo,
- const struct ip_set_ext *ext)
-{
- skbinfo->skbmark = ext->skbmark;
- skbinfo->skbmarkmask = ext->skbmarkmask;
- skbinfo->skbprio = ext->skbprio;
- skbinfo->skbqueue = ext->skbqueue;
-}
-
-static inline bool
-ip_set_put_counter(struct sk_buff *skb, struct ip_set_counter *counter)
-{
- return nla_put_net64(skb, IPSET_ATTR_BYTES,
- cpu_to_be64(ip_set_get_bytes(counter)),
- IPSET_ATTR_PAD) ||
- nla_put_net64(skb, IPSET_ATTR_PACKETS,
- cpu_to_be64(ip_set_get_packets(counter)),
- IPSET_ATTR_PAD);
-}
-
-static inline void
-ip_set_init_counter(struct ip_set_counter *counter,
- const struct ip_set_ext *ext)
-{
- if (ext->bytes != ULLONG_MAX)
- atomic64_set(&(counter)->bytes, (long long)(ext->bytes));
- if (ext->packets != ULLONG_MAX)
- atomic64_set(&(counter)->packets, (long long)(ext->packets));
-}
-
/* Netlink CB args */
enum {
IPSET_CB_NET = 0, /* net namespace */
@@ -431,6 +337,8 @@ extern size_t ip_set_elem_len(struct ip_set *set, struct nlattr *tb[],
size_t len, size_t align);
extern int ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[],
struct ip_set_ext *ext);
+extern int ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set,
+ const void *e, bool active);
static inline int
ip_set_get_hostipaddr4(struct nlattr *nla, u32 *ipaddr)
@@ -546,10 +454,8 @@ bitmap_bytes(u32 a, u32 b)
#include <linux/netfilter/ipset/ip_set_timeout.h>
#include <linux/netfilter/ipset/ip_set_comment.h>
-
-int
-ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set,
- const void *e, bool active);
+#include <linux/netfilter/ipset/ip_set_counter.h>
+#include <linux/netfilter/ipset/ip_set_skbinfo.h>
#define IP_SET_INIT_KEXT(skb, opt, set) \
{ .bytes = (skb)->len, .packets = 1, \
diff --git a/include/linux/netfilter/ipset/ip_set_bitmap.h b/include/linux/netfilter/ipset/ip_set_bitmap.h
index 5e4662a71e01..366d6c0ea04f 100644
--- a/include/linux/netfilter/ipset/ip_set_bitmap.h
+++ b/include/linux/netfilter/ipset/ip_set_bitmap.h
@@ -6,8 +6,8 @@
#define IPSET_BITMAP_MAX_RANGE 0x0000FFFF
enum {
+ IPSET_ADD_STORE_PLAIN_TIMEOUT = -1,
IPSET_ADD_FAILED = 1,
- IPSET_ADD_STORE_PLAIN_TIMEOUT,
IPSET_ADD_START_STORED_TIMEOUT,
};
diff --git a/include/linux/netfilter/ipset/ip_set_comment.h b/include/linux/netfilter/ipset/ip_set_comment.h
index 8d0248525957..8e2bab1e8e90 100644
--- a/include/linux/netfilter/ipset/ip_set_comment.h
+++ b/include/linux/netfilter/ipset/ip_set_comment.h
@@ -20,13 +20,14 @@ ip_set_comment_uget(struct nlattr *tb)
* The kadt functions don't use the comment extensions in any way.
*/
static inline void
-ip_set_init_comment(struct ip_set_comment *comment,
+ip_set_init_comment(struct ip_set *set, struct ip_set_comment *comment,
const struct ip_set_ext *ext)
{
struct ip_set_comment_rcu *c = rcu_dereference_protected(comment->c, 1);
size_t len = ext->comment ? strlen(ext->comment) : 0;
if (unlikely(c)) {
+ set->ext_size -= sizeof(*c) + strlen(c->str) + 1;
kfree_rcu(c, rcu);
rcu_assign_pointer(comment->c, NULL);
}
@@ -34,16 +35,17 @@ ip_set_init_comment(struct ip_set_comment *comment,
return;
if (unlikely(len > IPSET_MAX_COMMENT_SIZE))
len = IPSET_MAX_COMMENT_SIZE;
- c = kzalloc(sizeof(*c) + len + 1, GFP_ATOMIC);
+ c = kmalloc(sizeof(*c) + len + 1, GFP_ATOMIC);
if (unlikely(!c))
return;
strlcpy(c->str, ext->comment, len + 1);
+ set->ext_size += sizeof(*c) + strlen(c->str) + 1;
rcu_assign_pointer(comment->c, c);
}
/* Used only when dumping a set, protected by rcu_read_lock_bh() */
static inline int
-ip_set_put_comment(struct sk_buff *skb, struct ip_set_comment *comment)
+ip_set_put_comment(struct sk_buff *skb, const struct ip_set_comment *comment)
{
struct ip_set_comment_rcu *c = rcu_dereference_bh(comment->c);
@@ -58,13 +60,14 @@ ip_set_put_comment(struct sk_buff *skb, struct ip_set_comment *comment)
* of the set data anymore.
*/
static inline void
-ip_set_comment_free(struct ip_set_comment *comment)
+ip_set_comment_free(struct ip_set *set, struct ip_set_comment *comment)
{
struct ip_set_comment_rcu *c;
c = rcu_dereference_protected(comment->c, 1);
if (unlikely(!c))
return;
+ set->ext_size -= sizeof(*c) + strlen(c->str) + 1;
kfree_rcu(c, rcu);
rcu_assign_pointer(comment->c, NULL);
}
diff --git a/include/linux/netfilter/ipset/ip_set_counter.h b/include/linux/netfilter/ipset/ip_set_counter.h
new file mode 100644
index 000000000000..bb6fba480118
--- /dev/null
+++ b/include/linux/netfilter/ipset/ip_set_counter.h
@@ -0,0 +1,75 @@
+#ifndef _IP_SET_COUNTER_H
+#define _IP_SET_COUNTER_H
+
+/* Copyright (C) 2015 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifdef __KERNEL__
+
+static inline void
+ip_set_add_bytes(u64 bytes, struct ip_set_counter *counter)
+{
+ atomic64_add((long long)bytes, &(counter)->bytes);
+}
+
+static inline void
+ip_set_add_packets(u64 packets, struct ip_set_counter *counter)
+{
+ atomic64_add((long long)packets, &(counter)->packets);
+}
+
+static inline u64
+ip_set_get_bytes(const struct ip_set_counter *counter)
+{
+ return (u64)atomic64_read(&(counter)->bytes);
+}
+
+static inline u64
+ip_set_get_packets(const struct ip_set_counter *counter)
+{
+ return (u64)atomic64_read(&(counter)->packets);
+}
+
+static inline void
+ip_set_update_counter(struct ip_set_counter *counter,
+ const struct ip_set_ext *ext,
+ struct ip_set_ext *mext, u32 flags)
+{
+ if (ext->packets != ULLONG_MAX &&
+ !(flags & IPSET_FLAG_SKIP_COUNTER_UPDATE)) {
+ ip_set_add_bytes(ext->bytes, counter);
+ ip_set_add_packets(ext->packets, counter);
+ }
+ if (flags & IPSET_FLAG_MATCH_COUNTERS) {
+ mext->packets = ip_set_get_packets(counter);
+ mext->bytes = ip_set_get_bytes(counter);
+ }
+}
+
+static inline bool
+ip_set_put_counter(struct sk_buff *skb, const struct ip_set_counter *counter)
+{
+ return nla_put_net64(skb, IPSET_ATTR_BYTES,
+ cpu_to_be64(ip_set_get_bytes(counter)),
+ IPSET_ATTR_PAD) ||
+ nla_put_net64(skb, IPSET_ATTR_PACKETS,
+ cpu_to_be64(ip_set_get_packets(counter)),
+ IPSET_ATTR_PAD);
+}
+
+static inline void
+ip_set_init_counter(struct ip_set_counter *counter,
+ const struct ip_set_ext *ext)
+{
+ if (ext->bytes != ULLONG_MAX)
+ atomic64_set(&(counter)->bytes, (long long)(ext->bytes));
+ if (ext->packets != ULLONG_MAX)
+ atomic64_set(&(counter)->packets, (long long)(ext->packets));
+}
+
+#endif /* __KERNEL__ */
+#endif /* _IP_SET_COUNTER_H */
diff --git a/include/linux/netfilter/ipset/ip_set_skbinfo.h b/include/linux/netfilter/ipset/ip_set_skbinfo.h
new file mode 100644
index 000000000000..29d7ef2bc3fa
--- /dev/null
+++ b/include/linux/netfilter/ipset/ip_set_skbinfo.h
@@ -0,0 +1,46 @@
+#ifndef _IP_SET_SKBINFO_H
+#define _IP_SET_SKBINFO_H
+
+/* Copyright (C) 2015 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifdef __KERNEL__
+
+static inline void
+ip_set_get_skbinfo(struct ip_set_skbinfo *skbinfo,
+ const struct ip_set_ext *ext,
+ struct ip_set_ext *mext, u32 flags)
+{
+ mext->skbinfo = *skbinfo;
+}
+
+static inline bool
+ip_set_put_skbinfo(struct sk_buff *skb, const struct ip_set_skbinfo *skbinfo)
+{
+ /* Send nonzero parameters only */
+ return ((skbinfo->skbmark || skbinfo->skbmarkmask) &&
+ nla_put_net64(skb, IPSET_ATTR_SKBMARK,
+ cpu_to_be64((u64)skbinfo->skbmark << 32 |
+ skbinfo->skbmarkmask),
+ IPSET_ATTR_PAD)) ||
+ (skbinfo->skbprio &&
+ nla_put_net32(skb, IPSET_ATTR_SKBPRIO,
+ cpu_to_be32(skbinfo->skbprio))) ||
+ (skbinfo->skbqueue &&
+ nla_put_net16(skb, IPSET_ATTR_SKBQUEUE,
+ cpu_to_be16(skbinfo->skbqueue)));
+}
+
+static inline void
+ip_set_init_skbinfo(struct ip_set_skbinfo *skbinfo,
+ const struct ip_set_ext *ext)
+{
+ *skbinfo = ext->skbinfo;
+}
+
+#endif /* __KERNEL__ */
+#endif /* _IP_SET_SKBINFO_H */
diff --git a/include/linux/netfilter/ipset/ip_set_timeout.h b/include/linux/netfilter/ipset/ip_set_timeout.h
index 1d6a935c1ac5..bfb3531fd88a 100644
--- a/include/linux/netfilter/ipset/ip_set_timeout.h
+++ b/include/linux/netfilter/ipset/ip_set_timeout.h
@@ -40,7 +40,7 @@ ip_set_timeout_uget(struct nlattr *tb)
}
static inline bool
-ip_set_timeout_expired(unsigned long *t)
+ip_set_timeout_expired(const unsigned long *t)
{
return *t != IPSET_ELEM_PERMANENT && time_is_before_jiffies(*t);
}
@@ -63,7 +63,7 @@ ip_set_timeout_set(unsigned long *timeout, u32 value)
}
static inline u32
-ip_set_timeout_get(unsigned long *timeout)
+ip_set_timeout_get(const unsigned long *timeout)
{
return *timeout == IPSET_ELEM_PERMANENT ? 0 :
jiffies_to_msecs(*timeout - jiffies)/MSEC_PER_SEC;
diff --git a/include/linux/netfilter/nf_conntrack_dccp.h b/include/linux/netfilter/nf_conntrack_dccp.h
index 40dcc82058d1..ff721d7325cf 100644
--- a/include/linux/netfilter/nf_conntrack_dccp.h
+++ b/include/linux/netfilter/nf_conntrack_dccp.h
@@ -25,7 +25,7 @@ enum ct_dccp_roles {
#define CT_DCCP_ROLE_MAX (__CT_DCCP_ROLE_MAX - 1)
#ifdef __KERNEL__
-#include <net/netfilter/nf_conntrack_tuple.h>
+#include <linux/netfilter/nf_conntrack_tuple_common.h>
struct nf_ct_dccp {
u_int8_t role[IP_CT_DIR_MAX];
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 2ad1a2b289b5..5117e4d2ddfa 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -4,6 +4,7 @@
#include <linux/netdevice.h>
#include <linux/static_key.h>
+#include <linux/netfilter.h>
#include <uapi/linux/netfilter/x_tables.h>
/* Test a struct->invflags and a boolean for inequality */
@@ -17,14 +18,9 @@
* @target: the target extension
* @matchinfo: per-match data
* @targetinfo: per-target data
- * @net network namespace through which the action was invoked
- * @in: input netdevice
- * @out: output netdevice
+ * @state: pointer to hook state this packet came from
* @fragoff: packet is a fragment, this is the data offset
* @thoff: position of transport header relative to skb->data
- * @hook: hook number given packet came from
- * @family: Actual NFPROTO_* through which the function is invoked
- * (helpful when match->family == NFPROTO_UNSPEC)
*
* Fields written to by extensions:
*
@@ -38,15 +34,47 @@ struct xt_action_param {
union {
const void *matchinfo, *targinfo;
};
- struct net *net;
- const struct net_device *in, *out;
+ const struct nf_hook_state *state;
int fragoff;
unsigned int thoff;
- unsigned int hooknum;
- u_int8_t family;
bool hotdrop;
};
+static inline struct net *xt_net(const struct xt_action_param *par)
+{
+ return par->state->net;
+}
+
+static inline struct net_device *xt_in(const struct xt_action_param *par)
+{
+ return par->state->in;
+}
+
+static inline const char *xt_inname(const struct xt_action_param *par)
+{
+ return par->state->in->name;
+}
+
+static inline struct net_device *xt_out(const struct xt_action_param *par)
+{
+ return par->state->out;
+}
+
+static inline const char *xt_outname(const struct xt_action_param *par)
+{
+ return par->state->out->name;
+}
+
+static inline unsigned int xt_hooknum(const struct xt_action_param *par)
+{
+ return par->state->hook;
+}
+
+static inline u_int8_t xt_family(const struct xt_action_param *par)
+{
+ return par->state->pf;
+}
+
/**
* struct xt_mtchk_param - parameters for match extensions'
* checkentry functions
@@ -375,38 +403,14 @@ static inline unsigned long ifname_compare_aligned(const char *_a,
return ret;
}
+struct xt_percpu_counter_alloc_state {
+ unsigned int off;
+ const char __percpu *mem;
+};
-/* On SMP, ip(6)t_entry->counters.pcnt holds address of the
- * real (percpu) counter. On !SMP, its just the packet count,
- * so nothing needs to be done there.
- *
- * xt_percpu_counter_alloc returns the address of the percpu
- * counter, or 0 on !SMP. We force an alignment of 16 bytes
- * so that bytes/packets share a common cache line.
- *
- * Hence caller must use IS_ERR_VALUE to check for error, this
- * allows us to return 0 for single core systems without forcing
- * callers to deal with SMP vs. NONSMP issues.
- */
-static inline unsigned long xt_percpu_counter_alloc(void)
-{
- if (nr_cpu_ids > 1) {
- void __percpu *res = __alloc_percpu(sizeof(struct xt_counters),
- sizeof(struct xt_counters));
-
- if (res == NULL)
- return -ENOMEM;
-
- return (__force unsigned long) res;
- }
-
- return 0;
-}
-static inline void xt_percpu_counter_free(u64 pcnt)
-{
- if (nr_cpu_ids > 1)
- free_percpu((void __percpu *) (unsigned long) pcnt);
-}
+bool xt_percpu_counter_alloc(struct xt_percpu_counter_alloc_state *state,
+ struct xt_counters *counter);
+void xt_percpu_counter_free(struct xt_counters *cnt);
static inline struct xt_counters *
xt_get_this_cpu_counter(struct xt_counters *cnt)
diff --git a/include/linux/netfilter_ingress.h b/include/linux/netfilter_ingress.h
index 33e37fb41d5d..59476061de86 100644
--- a/include/linux/netfilter_ingress.h
+++ b/include/linux/netfilter_ingress.h
@@ -19,6 +19,7 @@ static inline int nf_hook_ingress(struct sk_buff *skb)
{
struct nf_hook_entry *e = rcu_dereference(skb->dev->nf_hooks_ingress);
struct nf_hook_state state;
+ int ret;
/* Must recheck the ingress hook head, in the event it became NULL
* after the check in nf_hook_ingress_active evaluated to true.
@@ -26,10 +27,14 @@ static inline int nf_hook_ingress(struct sk_buff *skb)
if (unlikely(!e))
return 0;
- nf_hook_state_init(&state, e, NF_NETDEV_INGRESS, INT_MIN,
+ nf_hook_state_init(&state, NF_NETDEV_INGRESS,
NFPROTO_NETDEV, skb->dev, NULL, NULL,
dev_net(skb->dev), NULL);
- return nf_hook_slow(skb, &state);
+ ret = nf_hook_slow(skb, &state, e);
+ if (ret == 0)
+ return -1;
+
+ return ret;
}
static inline void nf_hook_ingress_init(struct net_device *dev)
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index b25ee9ffdbe6..1828900c9411 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -78,8 +78,11 @@ static inline void *netpoll_poll_lock(struct napi_struct *napi)
struct net_device *dev = napi->dev;
if (dev && dev->npinfo) {
- spin_lock(&napi->poll_lock);
- napi->poll_owner = smp_processor_id();
+ int owner = smp_processor_id();
+
+ while (cmpxchg(&napi->poll_owner, -1, owner) != -1)
+ cpu_relax();
+
return napi;
}
return NULL;
@@ -89,10 +92,8 @@ static inline void netpoll_poll_unlock(void *have)
{
struct napi_struct *napi = have;
- if (napi) {
- napi->poll_owner = -1;
- spin_unlock(&napi->poll_lock);
- }
+ if (napi)
+ smp_store_release(&napi->poll_owner, -1);
}
static inline bool netpoll_tx_running(struct net_device *dev)
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 5ac1f57226f4..3d1c6f1b15c9 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -16,7 +16,6 @@
#define _LINUX_NVME_H
#include <linux/types.h>
-#include <linux/uuid.h>
/* NQN names in commands fields specified one size */
#define NVMF_NQN_FIELD_LEN 256
@@ -182,7 +181,7 @@ struct nvme_id_ctrl {
char fr[8];
__u8 rab;
__u8 ieee[3];
- __u8 mic;
+ __u8 cmic;
__u8 mdts;
__le16 cntlid;
__le32 ver;
@@ -202,7 +201,13 @@ struct nvme_id_ctrl {
__u8 apsta;
__le16 wctemp;
__le16 cctemp;
- __u8 rsvd270[50];
+ __le16 mtfa;
+ __le32 hmpre;
+ __le32 hmmin;
+ __u8 tnvmcap[16];
+ __u8 unvmcap[16];
+ __le32 rpmbs;
+ __u8 rsvd316[4];
__le16 kas;
__u8 rsvd322[190];
__u8 sqes;
@@ -268,7 +273,7 @@ struct nvme_id_ns {
__le16 nabo;
__le16 nabspf;
__u16 rsvd46;
- __le64 nvmcap[2];
+ __u8 nvmcap[16];
__u8 rsvd64[40];
__u8 nguid[16];
__u8 eui64[8];
@@ -278,6 +283,16 @@ struct nvme_id_ns {
};
enum {
+ NVME_ID_CNS_NS = 0x00,
+ NVME_ID_CNS_CTRL = 0x01,
+ NVME_ID_CNS_NS_ACTIVE_LIST = 0x02,
+ NVME_ID_CNS_NS_PRESENT_LIST = 0x10,
+ NVME_ID_CNS_NS_PRESENT = 0x11,
+ NVME_ID_CNS_CTRL_NS_LIST = 0x12,
+ NVME_ID_CNS_CTRL_LIST = 0x13,
+};
+
+enum {
NVME_NS_FEAT_THIN = 1 << 0,
NVME_NS_FLBAS_LBA_MASK = 0xf,
NVME_NS_FLBAS_META_EXT = 0x10,
@@ -574,8 +589,10 @@ enum nvme_admin_opcode {
nvme_admin_set_features = 0x09,
nvme_admin_get_features = 0x0a,
nvme_admin_async_event = 0x0c,
+ nvme_admin_ns_mgmt = 0x0d,
nvme_admin_activate_fw = 0x10,
nvme_admin_download_fw = 0x11,
+ nvme_admin_ns_attach = 0x15,
nvme_admin_keep_alive = 0x18,
nvme_admin_format_nvm = 0x80,
nvme_admin_security_send = 0x81,
@@ -601,6 +618,7 @@ enum {
NVME_FEAT_WRITE_ATOMIC = 0x0a,
NVME_FEAT_ASYNC_EVENT = 0x0b,
NVME_FEAT_AUTO_PST = 0x0c,
+ NVME_FEAT_HOST_MEM_BUF = 0x0d,
NVME_FEAT_KATO = 0x0f,
NVME_FEAT_SW_PROGRESS = 0x80,
NVME_FEAT_HOST_ID = 0x81,
@@ -763,7 +781,7 @@ struct nvmf_common_command {
struct nvmf_disc_rsp_page_entry {
__u8 trtype;
__u8 adrfam;
- __u8 nqntype;
+ __u8 subtype;
__u8 treq;
__le16 portid;
__le16 cntlid;
@@ -812,7 +830,7 @@ struct nvmf_connect_command {
};
struct nvmf_connect_data {
- uuid_be hostid;
+ __u8 hostid[16];
__le16 cntlid;
char resv4[238];
char subsysnqn[NVMF_NQN_FIELD_LEN];
@@ -924,12 +942,23 @@ enum {
NVME_SC_INVALID_VECTOR = 0x108,
NVME_SC_INVALID_LOG_PAGE = 0x109,
NVME_SC_INVALID_FORMAT = 0x10a,
- NVME_SC_FIRMWARE_NEEDS_RESET = 0x10b,
+ NVME_SC_FW_NEEDS_CONV_RESET = 0x10b,
NVME_SC_INVALID_QUEUE = 0x10c,
NVME_SC_FEATURE_NOT_SAVEABLE = 0x10d,
NVME_SC_FEATURE_NOT_CHANGEABLE = 0x10e,
NVME_SC_FEATURE_NOT_PER_NS = 0x10f,
- NVME_SC_FW_NEEDS_RESET_SUBSYS = 0x110,
+ NVME_SC_FW_NEEDS_SUBSYS_RESET = 0x110,
+ NVME_SC_FW_NEEDS_RESET = 0x111,
+ NVME_SC_FW_NEEDS_MAX_TIME = 0x112,
+ NVME_SC_FW_ACIVATE_PROHIBITED = 0x113,
+ NVME_SC_OVERLAPPING_RANGE = 0x114,
+ NVME_SC_NS_INSUFFICENT_CAP = 0x115,
+ NVME_SC_NS_ID_UNAVAILABLE = 0x116,
+ NVME_SC_NS_ALREADY_ATTACHED = 0x118,
+ NVME_SC_NS_IS_PRIVATE = 0x119,
+ NVME_SC_NS_NOT_ATTACHED = 0x11a,
+ NVME_SC_THIN_PROV_NOT_SUPP = 0x11b,
+ NVME_SC_CTRL_LIST_INVALID = 0x11c,
/*
* I/O Command Set Specific - NVM commands:
@@ -961,6 +990,7 @@ enum {
NVME_SC_REFTAG_CHECK = 0x284,
NVME_SC_COMPARE_FAILED = 0x285,
NVME_SC_ACCESS_DENIED = 0x286,
+ NVME_SC_UNWRITTEN_BLOCK = 0x287,
NVME_SC_DNR = 0x4000,
@@ -993,6 +1023,7 @@ struct nvme_completion {
__le16 status; /* did the command fail, and if so, why? */
};
-#define NVME_VS(major, minor) (((major) << 16) | ((minor) << 8))
+#define NVME_VS(major, minor, tertiary) \
+ (((major) << 16) | ((minor) << 8) | (tertiary))
#endif /* _LINUX_NVME_H */
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index 4341f32516d8..271b3fdf0070 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -71,6 +71,7 @@ extern int early_init_dt_scan_chosen_stdout(void);
extern void early_init_fdt_scan_reserved_mem(void);
extern void early_init_fdt_reserve_self(void);
extern void early_init_dt_add_memory_arch(u64 base, u64 size);
+extern int early_init_dt_mark_hotplug_memory_arch(u64 base, u64 size);
extern int early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size,
bool no_map);
extern void * early_init_dt_alloc_memory_arch(u64 size, u64 align);
diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h
index 2ab233661ae5..a58cca8bcb29 100644
--- a/include/linux/of_mdio.h
+++ b/include/linux/of_mdio.h
@@ -29,6 +29,7 @@ struct phy_device *of_phy_attach(struct net_device *dev,
extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np);
extern int of_mdio_parse_addr(struct device *dev, const struct device_node *np);
extern int of_phy_register_fixed_link(struct device_node *np);
+extern void of_phy_deregister_fixed_link(struct device_node *np);
extern bool of_phy_is_fixed_link(struct device_node *np);
#else /* CONFIG_OF */
@@ -83,6 +84,9 @@ static inline int of_phy_register_fixed_link(struct device_node *np)
{
return -ENOSYS;
}
+static inline void of_phy_deregister_fixed_link(struct device_node *np)
+{
+}
static inline bool of_phy_is_fixed_link(struct device_node *np)
{
return false;
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index dd15d39e1985..7dbe9148b2f8 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -374,16 +374,13 @@ static inline struct page *read_mapping_page(struct address_space *mapping,
}
/*
- * Get the offset in PAGE_SIZE.
- * (TODO: hugepage should have ->index in PAGE_SIZE)
+ * Get index of the page with in radix-tree
+ * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
*/
-static inline pgoff_t page_to_pgoff(struct page *page)
+static inline pgoff_t page_to_index(struct page *page)
{
pgoff_t pgoff;
- if (unlikely(PageHeadHuge(page)))
- return page->index << compound_order(page);
-
if (likely(!PageTransTail(page)))
return page->index;
@@ -397,6 +394,18 @@ static inline pgoff_t page_to_pgoff(struct page *page)
}
/*
+ * Get the offset in PAGE_SIZE.
+ * (TODO: hugepage should have ->index in PAGE_SIZE)
+ */
+static inline pgoff_t page_to_pgoff(struct page *page)
+{
+ if (unlikely(PageHeadHuge(page)))
+ return page->index << compound_order(page);
+
+ return page_to_index(page);
+}
+
+/*
* Return byte-offset into filesystem object for page.
*/
static inline loff_t page_offset(struct page *page)
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 0e49f70dbd9b..30d6c162e053 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -244,6 +244,7 @@ struct pci_cap_saved_state {
struct pci_cap_saved_data cap;
};
+struct irq_affinity;
struct pcie_link_state;
struct pci_vpd;
struct pci_sriov;
@@ -332,7 +333,6 @@ struct pci_dev {
* directly, use the values stored here. They might be different!
*/
unsigned int irq;
- struct cpumask *irq_affinity;
struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */
bool match_driver; /* Skip attaching driver */
@@ -1310,8 +1310,10 @@ static inline int pci_enable_msix_exact(struct pci_dev *dev,
return rc;
return 0;
}
-int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
- unsigned int max_vecs, unsigned int flags);
+int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
+ unsigned int max_vecs, unsigned int flags,
+ const struct irq_affinity *affd);
+
void pci_free_irq_vectors(struct pci_dev *dev);
int pci_irq_vector(struct pci_dev *dev, unsigned int nr);
const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, int vec);
@@ -1339,14 +1341,17 @@ static inline int pci_enable_msix_range(struct pci_dev *dev,
static inline int pci_enable_msix_exact(struct pci_dev *dev,
struct msix_entry *entries, int nvec)
{ return -ENOSYS; }
-static inline int pci_alloc_irq_vectors(struct pci_dev *dev,
- unsigned int min_vecs, unsigned int max_vecs,
- unsigned int flags)
+
+static inline int
+pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
+ unsigned int max_vecs, unsigned int flags,
+ const struct irq_affinity *aff_desc)
{
if (min_vecs > 1)
return -EINVAL;
return 1;
}
+
static inline void pci_free_irq_vectors(struct pci_dev *dev)
{
}
@@ -1364,6 +1369,14 @@ static inline const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev,
}
#endif
+static inline int
+pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
+ unsigned int max_vecs, unsigned int flags)
+{
+ return pci_alloc_irq_vectors_affinity(dev, min_vecs, max_vecs, flags,
+ NULL);
+}
+
#ifdef CONFIG_PCIEPORTBUS
extern bool pcie_ports_disabled;
extern bool pcie_ports_auto;
@@ -1928,6 +1941,20 @@ static inline int pci_pcie_type(const struct pci_dev *dev)
return (pcie_caps_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4;
}
+static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
+{
+ while (1) {
+ if (!pci_is_pcie(dev))
+ break;
+ if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
+ return dev;
+ if (!dev->bus->self)
+ break;
+ dev = dev->bus->self;
+ }
+ return NULL;
+}
+
void pci_request_acs(void);
bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags);
bool pci_acs_path_enabled(struct pci_dev *start,
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 060d0ede88df..4741ecdb9817 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -1257,6 +1257,7 @@ extern u64 perf_swevent_set_period(struct perf_event *event);
extern void perf_event_enable(struct perf_event *event);
extern void perf_event_disable(struct perf_event *event);
extern void perf_event_disable_local(struct perf_event *event);
+extern void perf_event_disable_inatomic(struct perf_event *event);
extern void perf_event_task_tick(void);
#else /* !CONFIG_PERF_EVENTS: */
static inline void *
diff --git a/include/linux/phy.h b/include/linux/phy.h
index e25f1830fbcf..f7d95f644eed 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -25,6 +25,7 @@
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/mod_devicetable.h>
+#include <linux/phy_led_triggers.h>
#include <linux/atomic.h>
@@ -85,6 +86,21 @@ typedef enum {
} phy_interface_t;
/**
+ * phy_supported_speeds - return all speeds currently supported by a phy device
+ * @phy: The phy device to return supported speeds of.
+ * @speeds: buffer to store supported speeds in.
+ * @size: size of speeds buffer.
+ *
+ * Description: Returns the number of supported speeds, and
+ * fills the speeds * buffer with the supported speeds. If speeds buffer is
+ * too small to contain * all currently supported speeds, will return as
+ * many speeds as can fit.
+ */
+unsigned int phy_supported_speeds(struct phy_device *phy,
+ unsigned int *speeds,
+ unsigned int size);
+
+/**
* It maps 'enum phy_interface_t' found in include/linux/phy.h
* into the device tree binding of 'phy-mode', so that Ethernet
* device driver can get phy interface from device tree.
@@ -343,7 +359,7 @@ struct phy_c45_device_ids {
* giving up on the current attempt at acquiring a link
* irq: IRQ number of the PHY's interrupt (-1 if none)
* phy_timer: The timer for handling the state machine
- * phy_queue: A work_queue for the interrupt
+ * phy_queue: A work_queue for the phy_mac_interrupt
* attached_dev: The attached enet driver's device instance ptr
* adjust_link: Callback for the enet controller to respond to
* changes in the link state.
@@ -401,10 +417,19 @@ struct phy_device {
u32 advertising;
u32 lp_advertising;
+ /* Energy efficient ethernet modes which should be prohibited */
+ u32 eee_broken_modes;
+
int autoneg;
int link_timeout;
+#ifdef CONFIG_LED_TRIGGER_PHY
+ struct phy_led_trigger *phy_led_triggers;
+ unsigned int phy_num_led_triggers;
+ struct phy_led_trigger *last_triggered;
+#endif
+
/*
* Interrupt number for this PHY
* -1 means no interrupt
@@ -425,6 +450,7 @@ struct phy_device {
struct net_device *attached_dev;
u8 mdix;
+ u8 mdix_ctrl;
void (*adjust_link)(struct net_device *dev);
};
@@ -589,6 +615,13 @@ struct phy_driver {
void (*get_strings)(struct phy_device *dev, u8 *data);
void (*get_stats)(struct phy_device *dev,
struct ethtool_stats *stats, u64 *data);
+
+ /* Get and Set PHY tunables */
+ int (*get_tunable)(struct phy_device *dev,
+ struct ethtool_tunable *tuna, void *data);
+ int (*set_tunable)(struct phy_device *dev,
+ struct ethtool_tunable *tuna,
+ const void *data);
};
#define to_phy_driver(d) container_of(to_mdio_common_driver(d), \
struct phy_driver, mdiodrv)
@@ -764,6 +797,7 @@ void phy_detach(struct phy_device *phydev);
void phy_start(struct phy_device *phydev);
void phy_stop(struct phy_device *phydev);
int phy_start_aneg(struct phy_device *phydev);
+int phy_aneg_done(struct phy_device *phydev);
int phy_stop_interrupts(struct phy_device *phydev);
@@ -802,7 +836,8 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner);
int phy_drivers_register(struct phy_driver *new_driver, int n,
struct module *owner);
void phy_state_machine(struct work_struct *work);
-void phy_change(struct work_struct *work);
+void phy_change(struct phy_device *phydev);
+void phy_change_work(struct work_struct *work);
void phy_mac_interrupt(struct phy_device *phydev, int new_link);
void phy_start_machine(struct phy_device *phydev);
void phy_stop_machine(struct phy_device *phydev);
@@ -825,6 +860,10 @@ int phy_register_fixup_for_id(const char *bus_id,
int phy_register_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask,
int (*run)(struct phy_device *));
+int phy_unregister_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask);
+int phy_unregister_fixup_for_id(const char *bus_id);
+int phy_unregister_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask);
+
int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable);
int phy_get_eee_err(struct phy_device *phydev);
int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data);
@@ -836,6 +875,7 @@ int phy_ethtool_get_link_ksettings(struct net_device *ndev,
struct ethtool_link_ksettings *cmd);
int phy_ethtool_set_link_ksettings(struct net_device *ndev,
const struct ethtool_link_ksettings *cmd);
+int phy_ethtool_nway_reset(struct net_device *ndev);
int __init mdio_bus_init(void);
void mdio_bus_exit(void);
diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h
index ee1bed7dbfc6..78bb0d7f6b11 100644
--- a/include/linux/phy/phy.h
+++ b/include/linux/phy/phy.h
@@ -253,6 +253,13 @@ static inline int phy_set_mode(struct phy *phy, enum phy_mode mode)
return -ENOSYS;
}
+static inline int phy_reset(struct phy *phy)
+{
+ if (!phy)
+ return 0;
+ return -ENOSYS;
+}
+
static inline int phy_get_bus_width(struct phy *phy)
{
return -ENOSYS;
diff --git a/include/linux/phy_led_triggers.h b/include/linux/phy_led_triggers.h
new file mode 100644
index 000000000000..a2daea0a37d2
--- /dev/null
+++ b/include/linux/phy_led_triggers.h
@@ -0,0 +1,51 @@
+/* Copyright (C) 2016 National Instruments Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __PHY_LED_TRIGGERS
+#define __PHY_LED_TRIGGERS
+
+struct phy_device;
+
+#ifdef CONFIG_LED_TRIGGER_PHY
+
+#include <linux/leds.h>
+
+#define PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE 10
+#define PHY_MII_BUS_ID_SIZE (20 - 3)
+
+#define PHY_LINK_LED_TRIGGER_NAME_SIZE (PHY_MII_BUS_ID_SIZE + \
+ FIELD_SIZEOF(struct mdio_device, addr)+\
+ PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE)
+
+struct phy_led_trigger {
+ struct led_trigger trigger;
+ char name[PHY_LINK_LED_TRIGGER_NAME_SIZE];
+ unsigned int speed;
+};
+
+
+extern int phy_led_triggers_register(struct phy_device *phy);
+extern void phy_led_triggers_unregister(struct phy_device *phy);
+extern void phy_led_trigger_change_speed(struct phy_device *phy);
+
+#else
+
+static inline int phy_led_triggers_register(struct phy_device *phy)
+{
+ return 0;
+}
+static inline void phy_led_triggers_unregister(struct phy_device *phy) { }
+static inline void phy_led_trigger_change_speed(struct phy_device *phy) { }
+
+#endif
+
+#endif
diff --git a/include/linux/pim.h b/include/linux/pim.h
index e1d756f81348..0e81b2778ae0 100644
--- a/include/linux/pim.h
+++ b/include/linux/pim.h
@@ -1,6 +1,7 @@
#ifndef __LINUX_PIM_H
#define __LINUX_PIM_H
+#include <linux/skbuff.h>
#include <asm/byteorder.h>
/* Message types - V1 */
@@ -9,24 +10,86 @@
/* Message types - V2 */
#define PIM_VERSION 2
-#define PIM_REGISTER 1
+
+/* RFC7761, sec 4.9:
+ * Type
+ * Types for specific PIM messages. PIM Types are:
+ *
+ * Message Type Destination
+ * ---------------------------------------------------------------------
+ * 0 = Hello Multicast to ALL-PIM-ROUTERS
+ * 1 = Register Unicast to RP
+ * 2 = Register-Stop Unicast to source of Register
+ * packet
+ * 3 = Join/Prune Multicast to ALL-PIM-ROUTERS
+ * 4 = Bootstrap Multicast to ALL-PIM-ROUTERS
+ * 5 = Assert Multicast to ALL-PIM-ROUTERS
+ * 6 = Graft (used in PIM-DM only) Unicast to RPF'(S)
+ * 7 = Graft-Ack (used in PIM-DM only) Unicast to source of Graft
+ * packet
+ * 8 = Candidate-RP-Advertisement Unicast to Domain's BSR
+ */
+enum {
+ PIM_TYPE_HELLO,
+ PIM_TYPE_REGISTER,
+ PIM_TYPE_REGISTER_STOP,
+ PIM_TYPE_JOIN_PRUNE,
+ PIM_TYPE_BOOTSTRAP,
+ PIM_TYPE_ASSERT,
+ PIM_TYPE_GRAFT,
+ PIM_TYPE_GRAFT_ACK,
+ PIM_TYPE_CANDIDATE_RP_ADV
+};
#define PIM_NULL_REGISTER cpu_to_be32(0x40000000)
-static inline bool ipmr_pimsm_enabled(void)
-{
- return IS_BUILTIN(CONFIG_IP_PIMSM_V1) || IS_BUILTIN(CONFIG_IP_PIMSM_V2);
-}
+/* RFC7761, sec 4.9:
+ * The PIM header common to all PIM messages is:
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |PIM Ver| Type | Reserved | Checksum |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct pimhdr {
+ __u8 type;
+ __u8 reserved;
+ __be16 csum;
+};
/* PIMv2 register message header layout (ietf-draft-idmr-pimvsm-v2-00.ps */
-struct pimreghdr
-{
+struct pimreghdr {
__u8 type;
__u8 reserved;
__be16 csum;
__be32 flags;
};
-struct sk_buff;
-extern int pim_rcv_v1(struct sk_buff *);
+int pim_rcv_v1(struct sk_buff *skb);
+
+static inline bool ipmr_pimsm_enabled(void)
+{
+ return IS_BUILTIN(CONFIG_IP_PIMSM_V1) || IS_BUILTIN(CONFIG_IP_PIMSM_V2);
+}
+
+static inline struct pimhdr *pim_hdr(const struct sk_buff *skb)
+{
+ return (struct pimhdr *)skb_transport_header(skb);
+}
+
+static inline u8 pim_hdr_version(const struct pimhdr *pimhdr)
+{
+ return pimhdr->type >> 4;
+}
+
+static inline u8 pim_hdr_type(const struct pimhdr *pimhdr)
+{
+ return pimhdr->type & 0xf;
+}
+
+/* check if the address is 224.0.0.13, RFC7761 sec 4.3.1 */
+static inline bool pim_ipv4_all_pim_routers(__be32 addr)
+{
+ return addr == htonl(0xE000000D);
+}
#endif
diff --git a/include/linux/pm-trace.h b/include/linux/pm-trace.h
index ecbde7a5548e..7b78793f07d7 100644
--- a/include/linux/pm-trace.h
+++ b/include/linux/pm-trace.h
@@ -1,11 +1,17 @@
#ifndef PM_TRACE_H
#define PM_TRACE_H
+#include <linux/types.h>
#ifdef CONFIG_PM_TRACE
#include <asm/pm-trace.h>
-#include <linux/types.h>
extern int pm_trace_enabled;
+extern bool pm_trace_rtc_abused;
+
+static inline bool pm_trace_rtc_valid(void)
+{
+ return !pm_trace_rtc_abused;
+}
static inline int pm_trace_is_enabled(void)
{
@@ -24,6 +30,7 @@ extern int show_trace_dev_match(char *buf, size_t size);
#else
+static inline bool pm_trace_rtc_valid(void) { return true; }
static inline int pm_trace_is_enabled(void) { return 0; }
#define TRACE_DEVICE(dev) do { } while (0)
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 06eb353182ab..efa67b2dfee9 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -258,7 +258,7 @@ typedef struct pm_message {
* example, if it detects that a child was unplugged while the system was
* asleep).
*
- * Refer to Documentation/power/devices.txt for more information about the role
+ * Refer to Documentation/power/admin-guide/devices.rst for more information about the role
* of the above callbacks in the system suspend process.
*
* There also are callbacks related to runtime power management of devices.
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index 75e4e30677f1..7eeceac52dea 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -65,19 +65,24 @@
/*
* Are we doing bottom half or hardware interrupt processing?
- * Are we in a softirq context? Interrupt context?
- * in_softirq - Are we currently processing softirq or have bh disabled?
- * in_serving_softirq - Are we currently processing softirq?
+ *
+ * in_irq() - We're in (hard) IRQ context
+ * in_softirq() - We have BH disabled, or are processing softirqs
+ * in_interrupt() - We're in NMI,IRQ,SoftIRQ context or have BH disabled
+ * in_serving_softirq() - We're in softirq context
+ * in_nmi() - We're in NMI context
+ * in_task() - We're in task context
+ *
+ * Note: due to the BH disabled confusion: in_softirq(),in_interrupt() really
+ * should not be used in new code.
*/
#define in_irq() (hardirq_count())
#define in_softirq() (softirq_count())
#define in_interrupt() (irq_count())
#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
-
-/*
- * Are we in NMI context?
- */
-#define in_nmi() (preempt_count() & NMI_MASK)
+#define in_nmi() (preempt_count() & NMI_MASK)
+#define in_task() (!(preempt_count() & \
+ (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
/*
* The preempt_count offset after preempt_disable();
diff --git a/include/linux/printk.h b/include/linux/printk.h
index eac1af8502bb..3472cc6b7a60 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -10,6 +10,8 @@
extern const char linux_banner[];
extern const char linux_proc_banner[];
+#define PRINTK_MAX_SINGLE_HEADER_LEN 2
+
static inline int printk_get_level(const char *buffer)
{
if (buffer[0] == KERN_SOH_ASCII && buffer[1]) {
@@ -31,6 +33,14 @@ static inline const char *printk_skip_level(const char *buffer)
return buffer;
}
+static inline const char *printk_skip_headers(const char *buffer)
+{
+ while (printk_get_level(buffer))
+ buffer = printk_skip_level(buffer);
+
+ return buffer;
+}
+
#define CONSOLE_EXT_LOG_MAX 8192
/* printk's without a loglevel use this.. */
@@ -40,10 +50,15 @@ static inline const char *printk_skip_level(const char *buffer)
#define CONSOLE_LOGLEVEL_SILENT 0 /* Mum's the word */
#define CONSOLE_LOGLEVEL_MIN 1 /* Minimum loglevel we let people use */
#define CONSOLE_LOGLEVEL_QUIET 4 /* Shhh ..., when booted with "quiet" */
-#define CONSOLE_LOGLEVEL_DEFAULT 7 /* anything MORE serious than KERN_DEBUG */
#define CONSOLE_LOGLEVEL_DEBUG 10 /* issue debug messages */
#define CONSOLE_LOGLEVEL_MOTORMOUTH 15 /* You can't shut this one up */
+/*
+ * Default used to be hard-coded at 7, we're now allowing it to be set from
+ * kernel config.
+ */
+#define CONSOLE_LOGLEVEL_DEFAULT CONFIG_CONSOLE_LOGLEVEL_DEFAULT
+
extern int console_printk[];
#define console_loglevel (console_printk[0])
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index b97bf2ef996e..368c7ad06ae5 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -82,4 +82,8 @@ static inline struct proc_dir_entry *proc_net_mkdir(
return proc_mkdir_data(name, 0, parent, net);
}
+struct ns_common;
+int open_related_ns(struct ns_common *ns,
+ struct ns_common *(*get_ns)(struct ns_common *ns));
+
#endif /* _LINUX_PROC_FS_H */
diff --git a/include/linux/pstore.h b/include/linux/pstore.h
index 92013cc9cc8c..0da29cae009b 100644
--- a/include/linux/pstore.h
+++ b/include/linux/pstore.h
@@ -89,4 +89,80 @@ extern int pstore_register(struct pstore_info *);
extern void pstore_unregister(struct pstore_info *);
extern bool pstore_cannot_block_path(enum kmsg_dump_reason reason);
+struct pstore_ftrace_record {
+ unsigned long ip;
+ unsigned long parent_ip;
+ u64 ts;
+};
+
+/*
+ * ftrace related stuff: Both backends and frontends need these so expose
+ * them here.
+ */
+
+#if NR_CPUS <= 2 && defined(CONFIG_ARM_THUMB)
+#define PSTORE_CPU_IN_IP 0x1
+#elif NR_CPUS <= 4 && defined(CONFIG_ARM)
+#define PSTORE_CPU_IN_IP 0x3
+#endif
+
+#define TS_CPU_SHIFT 8
+#define TS_CPU_MASK (BIT(TS_CPU_SHIFT) - 1)
+
+/*
+ * If CPU number can be stored in IP, store it there, otherwise store it in
+ * the time stamp. This means more timestamp resolution is available when
+ * the CPU can be stored in the IP.
+ */
+#ifdef PSTORE_CPU_IN_IP
+static inline void
+pstore_ftrace_encode_cpu(struct pstore_ftrace_record *rec, unsigned int cpu)
+{
+ rec->ip |= cpu;
+}
+
+static inline unsigned int
+pstore_ftrace_decode_cpu(struct pstore_ftrace_record *rec)
+{
+ return rec->ip & PSTORE_CPU_IN_IP;
+}
+
+static inline u64
+pstore_ftrace_read_timestamp(struct pstore_ftrace_record *rec)
+{
+ return rec->ts;
+}
+
+static inline void
+pstore_ftrace_write_timestamp(struct pstore_ftrace_record *rec, u64 val)
+{
+ rec->ts = val;
+}
+#else
+static inline void
+pstore_ftrace_encode_cpu(struct pstore_ftrace_record *rec, unsigned int cpu)
+{
+ rec->ts &= ~(TS_CPU_MASK);
+ rec->ts |= cpu;
+}
+
+static inline unsigned int
+pstore_ftrace_decode_cpu(struct pstore_ftrace_record *rec)
+{
+ return rec->ts & TS_CPU_MASK;
+}
+
+static inline u64
+pstore_ftrace_read_timestamp(struct pstore_ftrace_record *rec)
+{
+ return rec->ts >> TS_CPU_SHIFT;
+}
+
+static inline void
+pstore_ftrace_write_timestamp(struct pstore_ftrace_record *rec, u64 val)
+{
+ rec->ts = (rec->ts & TS_CPU_MASK) | (val << TS_CPU_SHIFT);
+}
+#endif
+
#endif /*_LINUX_PSTORE_H*/
diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h
index c668c861c96c..9395f06e8372 100644
--- a/include/linux/pstore_ram.h
+++ b/include/linux/pstore_ram.h
@@ -24,6 +24,13 @@
#include <linux/list.h>
#include <linux/types.h>
+/*
+ * Choose whether access to the RAM zone requires locking or not. If a zone
+ * can be written to from different CPUs like with ftrace for example, then
+ * PRZ_FLAG_NO_LOCK is used. For all other cases, locking is required.
+ */
+#define PRZ_FLAG_NO_LOCK BIT(0)
+
struct persistent_ram_buffer;
struct rs_control;
@@ -40,6 +47,8 @@ struct persistent_ram_zone {
void *vaddr;
struct persistent_ram_buffer *buffer;
size_t buffer_size;
+ u32 flags;
+ raw_spinlock_t buffer_lock;
/* ECC correction */
char *par_buffer;
@@ -55,7 +64,7 @@ struct persistent_ram_zone {
struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
u32 sig, struct persistent_ram_ecc_info *ecc_info,
- unsigned int memtype);
+ unsigned int memtype, u32 flags);
void persistent_ram_free(struct persistent_ram_zone *prz);
void persistent_ram_zap(struct persistent_ram_zone *prz);
@@ -77,6 +86,8 @@ ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz,
* @mem_address physical memory address to contain ramoops
*/
+#define RAMOOPS_FLAG_FTRACE_PER_CPU BIT(0)
+
struct ramoops_platform_data {
unsigned long mem_size;
phys_addr_t mem_address;
@@ -86,6 +97,7 @@ struct ramoops_platform_data {
unsigned long ftrace_size;
unsigned long pmsg_size;
int dump_oops;
+ u32 flags;
struct persistent_ram_ecc_info ecc_info;
};
diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
index 5ad54fc66cf0..a026bfd089db 100644
--- a/include/linux/ptp_clock_kernel.h
+++ b/include/linux/ptp_clock_kernel.h
@@ -58,7 +58,14 @@ struct system_device_crosststamp;
*
* clock operations
*
+ * @adjfine: Adjusts the frequency of the hardware clock.
+ * parameter scaled_ppm: Desired frequency offset from
+ * nominal frequency in parts per million, but with a
+ * 16 bit binary fractional field.
+ *
* @adjfreq: Adjusts the frequency of the hardware clock.
+ * This method is deprecated. New drivers should implement
+ * the @adjfine method instead.
* parameter delta: Desired frequency offset from nominal frequency
* in parts per billion
*
@@ -108,6 +115,7 @@ struct ptp_clock_info {
int n_pins;
int pps;
struct ptp_pin_desc *pin_config;
+ int (*adjfine)(struct ptp_clock_info *ptp, long scaled_ppm);
int (*adjfreq)(struct ptp_clock_info *ptp, s32 delta);
int (*adjtime)(struct ptp_clock_info *ptp, s64 delta);
int (*gettime64)(struct ptp_clock_info *ptp, struct timespec64 *ts);
@@ -122,30 +130,6 @@ struct ptp_clock_info {
struct ptp_clock;
-/**
- * ptp_clock_register() - register a PTP hardware clock driver
- *
- * @info: Structure describing the new clock.
- * @parent: Pointer to the parent device of the new clock.
- *
- * Returns a valid pointer on success or PTR_ERR on failure. If PHC
- * support is missing at the configuration level, this function
- * returns NULL, and drivers are expected to gracefully handle that
- * case separately.
- */
-
-extern struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
- struct device *parent);
-
-/**
- * ptp_clock_unregister() - unregister a PTP hardware clock driver
- *
- * @ptp: The clock to remove from service.
- */
-
-extern int ptp_clock_unregister(struct ptp_clock *ptp);
-
-
enum ptp_clock_events {
PTP_CLOCK_ALARM,
PTP_CLOCK_EXTTS,
@@ -171,6 +155,31 @@ struct ptp_clock_event {
};
};
+#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
+
+/**
+ * ptp_clock_register() - register a PTP hardware clock driver
+ *
+ * @info: Structure describing the new clock.
+ * @parent: Pointer to the parent device of the new clock.
+ *
+ * Returns a valid pointer on success or PTR_ERR on failure. If PHC
+ * support is missing at the configuration level, this function
+ * returns NULL, and drivers are expected to gracefully handle that
+ * case separately.
+ */
+
+extern struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
+ struct device *parent);
+
+/**
+ * ptp_clock_unregister() - unregister a PTP hardware clock driver
+ *
+ * @ptp: The clock to remove from service.
+ */
+
+extern int ptp_clock_unregister(struct ptp_clock *ptp);
+
/**
* ptp_clock_event() - notify the PTP layer about an event
*
@@ -202,4 +211,20 @@ extern int ptp_clock_index(struct ptp_clock *ptp);
int ptp_find_pin(struct ptp_clock *ptp,
enum ptp_pin_function func, unsigned int chan);
+#else
+static inline struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
+ struct device *parent)
+{ return NULL; }
+static inline int ptp_clock_unregister(struct ptp_clock *ptp)
+{ return 0; }
+static inline void ptp_clock_event(struct ptp_clock *ptp,
+ struct ptp_clock_event *event)
+{ }
+static inline int ptp_clock_index(struct ptp_clock *ptp)
+{ return -1; }
+static inline int ptp_find_pin(struct ptp_clock *ptp,
+ enum ptp_pin_function func, unsigned int chan)
+{ return -1; }
+#endif
+
#endif
diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h
index 72d88cf3ca25..37dfba101c6c 100644
--- a/include/linux/qed/qed_chain.h
+++ b/include/linux/qed/qed_chain.h
@@ -56,23 +56,6 @@ struct qed_chain_pbl_u32 {
u32 cons_page_idx;
};
-struct qed_chain_pbl {
- /* Base address of a pre-allocated buffer for pbl */
- dma_addr_t p_phys_table;
- void *p_virt_table;
-
- /* Table for keeping the virtual addresses of the chain pages,
- * respectively to the physical addresses in the pbl table.
- */
- void **pp_virt_addr_tbl;
-
- /* Index to current used page by producer/consumer */
- union {
- struct qed_chain_pbl_u16 pbl16;
- struct qed_chain_pbl_u32 pbl32;
- } u;
-};
-
struct qed_chain_u16 {
/* Cyclic index of next element to produce/consme */
u16 prod_idx;
@@ -86,46 +69,78 @@ struct qed_chain_u32 {
};
struct qed_chain {
- void *p_virt_addr;
- dma_addr_t p_phys_addr;
- void *p_prod_elem;
- void *p_cons_elem;
+ /* fastpath portion of the chain - required for commands such
+ * as produce / consume.
+ */
+ /* Point to next element to produce/consume */
+ void *p_prod_elem;
+ void *p_cons_elem;
+
+ /* Fastpath portions of the PBL [if exists] */
+ struct {
+ /* Table for keeping the virtual addresses of the chain pages,
+ * respectively to the physical addresses in the pbl table.
+ */
+ void **pp_virt_addr_tbl;
- enum qed_chain_mode mode;
- enum qed_chain_use_mode intended_use; /* used to produce/consume */
- enum qed_chain_cnt_type cnt_type;
+ union {
+ struct qed_chain_pbl_u16 u16;
+ struct qed_chain_pbl_u32 u32;
+ } c;
+ } pbl;
union {
struct qed_chain_u16 chain16;
struct qed_chain_u32 chain32;
} u;
+ /* Capacity counts only usable elements */
+ u32 capacity;
u32 page_cnt;
- /* Number of elements - capacity is for usable elements only,
- * while size will contain total number of elements [for entire chain].
+ enum qed_chain_mode mode;
+
+ /* Elements information for fast calculations */
+ u16 elem_per_page;
+ u16 elem_per_page_mask;
+ u16 elem_size;
+ u16 next_page_mask;
+ u16 usable_per_page;
+ u8 elem_unusable;
+
+ u8 cnt_type;
+
+ /* Slowpath of the chain - required for initialization and destruction,
+ * but isn't involved in regular functionality.
*/
- u32 capacity;
+
+ /* Base address of a pre-allocated buffer for pbl */
+ struct {
+ dma_addr_t p_phys_table;
+ void *p_virt_table;
+ } pbl_sp;
+
+ /* Address of first page of the chain - the address is required
+ * for fastpath operation [consume/produce] but only for the the SINGLE
+ * flavour which isn't considered fastpath [== SPQ].
+ */
+ void *p_virt_addr;
+ dma_addr_t p_phys_addr;
+
+ /* Total number of elements [for entire chain] */
u32 size;
- /* Elements information for fast calculations */
- u16 elem_per_page;
- u16 elem_per_page_mask;
- u16 elem_unusable;
- u16 usable_per_page;
- u16 elem_size;
- u16 next_page_mask;
- struct qed_chain_pbl pbl;
+ u8 intended_use;
};
#define QED_CHAIN_PBL_ENTRY_SIZE (8)
#define QED_CHAIN_PAGE_SIZE (0x1000)
#define ELEMS_PER_PAGE(elem_size) (QED_CHAIN_PAGE_SIZE / (elem_size))
-#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \
- ((mode == QED_CHAIN_MODE_NEXT_PTR) ? \
- (1 + ((sizeof(struct qed_chain_next) - 1) / \
- (elem_size))) : 0)
+#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \
+ (((mode) == QED_CHAIN_MODE_NEXT_PTR) ? \
+ (u8)(1 + ((sizeof(struct qed_chain_next) - 1) / \
+ (elem_size))) : 0)
#define USABLE_ELEMS_PER_PAGE(elem_size, mode) \
((u32)(ELEMS_PER_PAGE(elem_size) - \
@@ -186,7 +201,7 @@ static inline u16 qed_chain_get_usable_per_page(struct qed_chain *p_chain)
return p_chain->usable_per_page;
}
-static inline u16 qed_chain_get_unusable_per_page(struct qed_chain *p_chain)
+static inline u8 qed_chain_get_unusable_per_page(struct qed_chain *p_chain)
{
return p_chain->elem_unusable;
}
@@ -198,7 +213,7 @@ static inline u32 qed_chain_get_page_cnt(struct qed_chain *p_chain)
static inline dma_addr_t qed_chain_get_pbl_phys(struct qed_chain *p_chain)
{
- return p_chain->pbl.p_phys_table;
+ return p_chain->pbl_sp.p_phys_table;
}
/**
@@ -214,10 +229,10 @@ static inline dma_addr_t qed_chain_get_pbl_phys(struct qed_chain *p_chain)
static inline void
qed_chain_advance_page(struct qed_chain *p_chain,
void **p_next_elem, void *idx_to_inc, void *page_to_inc)
-
{
struct qed_chain_next *p_next = NULL;
u32 page_index = 0;
+
switch (p_chain->mode) {
case QED_CHAIN_MODE_NEXT_PTR:
p_next = *p_next_elem;
@@ -305,7 +320,7 @@ static inline void *qed_chain_produce(struct qed_chain *p_chain)
if ((p_chain->u.chain16.prod_idx &
p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
p_prod_idx = &p_chain->u.chain16.prod_idx;
- p_prod_page_idx = &p_chain->pbl.u.pbl16.prod_page_idx;
+ p_prod_page_idx = &p_chain->pbl.c.u16.prod_page_idx;
qed_chain_advance_page(p_chain, &p_chain->p_prod_elem,
p_prod_idx, p_prod_page_idx);
}
@@ -314,7 +329,7 @@ static inline void *qed_chain_produce(struct qed_chain *p_chain)
if ((p_chain->u.chain32.prod_idx &
p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
p_prod_idx = &p_chain->u.chain32.prod_idx;
- p_prod_page_idx = &p_chain->pbl.u.pbl32.prod_page_idx;
+ p_prod_page_idx = &p_chain->pbl.c.u32.prod_page_idx;
qed_chain_advance_page(p_chain, &p_chain->p_prod_elem,
p_prod_idx, p_prod_page_idx);
}
@@ -378,7 +393,7 @@ static inline void *qed_chain_consume(struct qed_chain *p_chain)
if ((p_chain->u.chain16.cons_idx &
p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
p_cons_idx = &p_chain->u.chain16.cons_idx;
- p_cons_page_idx = &p_chain->pbl.u.pbl16.cons_page_idx;
+ p_cons_page_idx = &p_chain->pbl.c.u16.cons_page_idx;
qed_chain_advance_page(p_chain, &p_chain->p_cons_elem,
p_cons_idx, p_cons_page_idx);
}
@@ -387,8 +402,8 @@ static inline void *qed_chain_consume(struct qed_chain *p_chain)
if ((p_chain->u.chain32.cons_idx &
p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
p_cons_idx = &p_chain->u.chain32.cons_idx;
- p_cons_page_idx = &p_chain->pbl.u.pbl32.cons_page_idx;
- qed_chain_advance_page(p_chain, &p_chain->p_cons_elem,
+ p_cons_page_idx = &p_chain->pbl.c.u32.cons_page_idx;
+ qed_chain_advance_page(p_chain, &p_chain->p_cons_elem,
p_cons_idx, p_cons_page_idx);
}
p_chain->u.chain32.cons_idx++;
@@ -429,25 +444,26 @@ static inline void qed_chain_reset(struct qed_chain *p_chain)
u32 reset_val = p_chain->page_cnt - 1;
if (is_chain_u16(p_chain)) {
- p_chain->pbl.u.pbl16.prod_page_idx = (u16)reset_val;
- p_chain->pbl.u.pbl16.cons_page_idx = (u16)reset_val;
+ p_chain->pbl.c.u16.prod_page_idx = (u16)reset_val;
+ p_chain->pbl.c.u16.cons_page_idx = (u16)reset_val;
} else {
- p_chain->pbl.u.pbl32.prod_page_idx = reset_val;
- p_chain->pbl.u.pbl32.cons_page_idx = reset_val;
+ p_chain->pbl.c.u32.prod_page_idx = reset_val;
+ p_chain->pbl.c.u32.cons_page_idx = reset_val;
}
}
switch (p_chain->intended_use) {
- case QED_CHAIN_USE_TO_CONSUME_PRODUCE:
- case QED_CHAIN_USE_TO_PRODUCE:
- /* Do nothing */
- break;
-
case QED_CHAIN_USE_TO_CONSUME:
/* produce empty elements */
for (i = 0; i < p_chain->capacity; i++)
qed_chain_recycle_consumed(p_chain);
break;
+
+ case QED_CHAIN_USE_TO_CONSUME_PRODUCE:
+ case QED_CHAIN_USE_TO_PRODUCE:
+ default:
+ /* Do nothing */
+ break;
}
}
@@ -473,13 +489,13 @@ static inline void qed_chain_init_params(struct qed_chain *p_chain,
p_chain->p_virt_addr = NULL;
p_chain->p_phys_addr = 0;
p_chain->elem_size = elem_size;
- p_chain->intended_use = intended_use;
+ p_chain->intended_use = (u8)intended_use;
p_chain->mode = mode;
- p_chain->cnt_type = cnt_type;
+ p_chain->cnt_type = (u8)cnt_type;
- p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size);
+ p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size);
p_chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode);
- p_chain->elem_per_page_mask = p_chain->elem_per_page - 1;
+ p_chain->elem_per_page_mask = p_chain->elem_per_page - 1;
p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode);
p_chain->next_page_mask = (p_chain->usable_per_page &
p_chain->elem_per_page_mask);
@@ -488,8 +504,8 @@ static inline void qed_chain_init_params(struct qed_chain *p_chain,
p_chain->capacity = p_chain->usable_per_page * page_cnt;
p_chain->size = p_chain->elem_per_page * page_cnt;
- p_chain->pbl.p_phys_table = 0;
- p_chain->pbl.p_virt_table = NULL;
+ p_chain->pbl_sp.p_phys_table = 0;
+ p_chain->pbl_sp.p_virt_table = NULL;
p_chain->pbl.pp_virt_addr_tbl = NULL;
}
@@ -530,8 +546,8 @@ static inline void qed_chain_init_pbl_mem(struct qed_chain *p_chain,
dma_addr_t p_phys_pbl,
void **pp_virt_addr_tbl)
{
- p_chain->pbl.p_phys_table = p_phys_pbl;
- p_chain->pbl.p_virt_table = p_virt_pbl;
+ p_chain->pbl_sp.p_phys_table = p_phys_pbl;
+ p_chain->pbl_sp.p_virt_table = p_virt_pbl;
p_chain->pbl.pp_virt_addr_tbl = pp_virt_addr_tbl;
}
diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h
index 33c24ebc9b7f..7a52f7c58c37 100644
--- a/include/linux/qed/qed_eth_if.h
+++ b/include/linux/qed/qed_eth_if.h
@@ -15,6 +15,29 @@
#include <linux/qed/qed_if.h>
#include <linux/qed/qed_iov_if.h>
+struct qed_queue_start_common_params {
+ /* Should always be relative to entity sending this. */
+ u8 vport_id;
+ u16 queue_id;
+
+ /* Relative, but relevant only for PFs */
+ u8 stats_id;
+
+ /* These are always absolute */
+ u16 sb;
+ u8 sb_idx;
+};
+
+struct qed_rxq_start_ret_params {
+ void __iomem *p_prod;
+ void *p_handle;
+};
+
+struct qed_txq_start_ret_params {
+ void __iomem *p_doorbell;
+ void *p_handle;
+};
+
struct qed_dev_eth_info {
struct qed_dev_info common;
@@ -22,7 +45,8 @@ struct qed_dev_eth_info {
u8 num_tc;
u8 port_mac[ETH_ALEN];
- u8 num_vlan_filters;
+ u16 num_vlan_filters;
+ u16 num_mac_filters;
/* Legacy VF - this affects the datapath, so qede has to know */
bool is_legacy;
@@ -55,18 +79,6 @@ struct qed_start_vport_params {
bool clear_stats;
};
-struct qed_stop_rxq_params {
- u8 rss_id;
- u8 rx_queue_id;
- u8 vport_id;
- bool eq_completion_only;
-};
-
-struct qed_stop_txq_params {
- u8 rss_id;
- u8 tx_queue_id;
-};
-
enum qed_filter_rx_mode_type {
QED_FILTER_RX_MODE_TYPE_REGULAR,
QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC,
@@ -111,15 +123,6 @@ struct qed_filter_params {
union qed_filter_type_params filter;
};
-struct qed_queue_start_common_params {
- u8 rss_id;
- u8 queue_id;
- u8 vport_id;
- u16 sb;
- u16 sb_idx;
- u16 vf_qid;
-};
-
struct qed_tunn_params {
u16 vxlan_port;
u8 update_vxlan_port;
@@ -129,7 +132,7 @@ struct qed_tunn_params {
struct qed_eth_cb_ops {
struct qed_common_cb_ops common;
- void (*force_mac) (void *dev, u8 *mac);
+ void (*force_mac) (void *dev, u8 *mac, bool forced);
};
#ifdef CONFIG_DCB
@@ -219,24 +222,24 @@ struct qed_eth_ops {
struct qed_update_vport_params *params);
int (*q_rx_start)(struct qed_dev *cdev,
+ u8 rss_num,
struct qed_queue_start_common_params *params,
u16 bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr,
u16 cqe_pbl_size,
- void __iomem **pp_prod);
+ struct qed_rxq_start_ret_params *ret_params);
- int (*q_rx_stop)(struct qed_dev *cdev,
- struct qed_stop_rxq_params *params);
+ int (*q_rx_stop)(struct qed_dev *cdev, u8 rss_id, void *handle);
int (*q_tx_start)(struct qed_dev *cdev,
+ u8 rss_num,
struct qed_queue_start_common_params *params,
dma_addr_t pbl_addr,
u16 pbl_size,
- void __iomem **pp_doorbell);
+ struct qed_txq_start_ret_params *ret_params);
- int (*q_tx_stop)(struct qed_dev *cdev,
- struct qed_stop_txq_params *params);
+ int (*q_tx_stop)(struct qed_dev *cdev, u8 rss_id, void *handle);
int (*filter_config)(struct qed_dev *cdev,
struct qed_filter_params *params);
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index f9ae903bbb84..4b454f4f5b25 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -146,6 +146,7 @@ enum qed_led_mode {
#define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr))
#define QED_COALESCE_MAX 0xFF
+#define QED_DEFAULT_RX_USECS 12
/* forward */
struct qed_dev;
@@ -165,6 +166,7 @@ struct qed_iscsi_pf_params {
u32 max_cwnd;
u16 cq_num_entries;
u16 cmdq_num_entries;
+ u32 two_msl_timer;
u16 dup_ack_threshold;
u16 tx_sws_timer;
u16 min_rto;
@@ -266,11 +268,15 @@ struct qed_dev_info {
u8 mf_mode;
bool tx_switching;
bool rdma_supported;
+ u16 mtu;
+
+ bool wol_support;
};
enum qed_sb_type {
QED_SB_TYPE_L2_QUEUE,
QED_SB_TYPE_CNQ,
+ QED_SB_TYPE_STORAGE,
};
enum qed_protocol {
@@ -400,6 +406,15 @@ struct qed_selftest_ops {
* @return 0 on success, error otherwise.
*/
int (*selftest_clock)(struct qed_dev *cdev);
+
+/**
+ * @brief selftest_nvram - Perform nvram test
+ *
+ * @param cdev
+ *
+ * @return 0 on success, error otherwise.
+ */
+ int (*selftest_nvram) (struct qed_dev *cdev);
};
struct qed_common_ops {
@@ -553,6 +568,41 @@ struct qed_common_ops {
*/
int (*set_led)(struct qed_dev *cdev,
enum qed_led_mode mode);
+
+/**
+ * @brief update_drv_state - API to inform the change in the driver state.
+ *
+ * @param cdev
+ * @param active
+ *
+ */
+ int (*update_drv_state)(struct qed_dev *cdev, bool active);
+
+/**
+ * @brief update_mac - API to inform the change in the mac address
+ *
+ * @param cdev
+ * @param mac
+ *
+ */
+ int (*update_mac)(struct qed_dev *cdev, u8 *mac);
+
+/**
+ * @brief update_mtu - API to inform the change in the mtu
+ *
+ * @param cdev
+ * @param mtu
+ *
+ */
+ int (*update_mtu)(struct qed_dev *cdev, u16 mtu);
+
+/**
+ * @brief update_wol - update of changes in the WoL configuration
+ *
+ * @param cdev
+ * @param enabled - true iff WoL should be enabled.
+ */
+ int (*update_wol) (struct qed_dev *cdev, bool enabled);
};
#define MASK_FIELD(_name, _value) \
diff --git a/include/linux/qed/qed_iscsi_if.h b/include/linux/qed/qed_iscsi_if.h
new file mode 100644
index 000000000000..d27912480cb3
--- /dev/null
+++ b/include/linux/qed/qed_iscsi_if.h
@@ -0,0 +1,229 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_ISCSI_IF_H
+#define _QED_ISCSI_IF_H
+#include <linux/types.h>
+#include <linux/qed/qed_if.h>
+
+typedef int (*iscsi_event_cb_t) (void *context,
+ u8 fw_event_code, void *fw_handle);
+struct qed_iscsi_stats {
+ u64 iscsi_rx_bytes_cnt;
+ u64 iscsi_rx_packet_cnt;
+ u64 iscsi_rx_new_ooo_isle_events_cnt;
+ u32 iscsi_cmdq_threshold_cnt;
+ u32 iscsi_rq_threshold_cnt;
+ u32 iscsi_immq_threshold_cnt;
+
+ u64 iscsi_rx_dropped_pdus_task_not_valid;
+
+ u64 iscsi_rx_data_pdu_cnt;
+ u64 iscsi_rx_r2t_pdu_cnt;
+ u64 iscsi_rx_total_pdu_cnt;
+
+ u64 iscsi_tx_go_to_slow_start_event_cnt;
+ u64 iscsi_tx_fast_retransmit_event_cnt;
+
+ u64 iscsi_tx_data_pdu_cnt;
+ u64 iscsi_tx_r2t_pdu_cnt;
+ u64 iscsi_tx_total_pdu_cnt;
+
+ u64 iscsi_tx_bytes_cnt;
+ u64 iscsi_tx_packet_cnt;
+};
+
+struct qed_dev_iscsi_info {
+ struct qed_dev_info common;
+
+ void __iomem *primary_dbq_rq_addr;
+ void __iomem *secondary_bdq_rq_addr;
+};
+
+struct qed_iscsi_id_params {
+ u8 mac[ETH_ALEN];
+ u32 ip[4];
+ u16 port;
+};
+
+struct qed_iscsi_params_offload {
+ u8 layer_code;
+ dma_addr_t sq_pbl_addr;
+ u32 initial_ack;
+
+ struct qed_iscsi_id_params src;
+ struct qed_iscsi_id_params dst;
+ u16 vlan_id;
+ u8 tcp_flags;
+ u8 ip_version;
+ u8 default_cq;
+
+ u8 ka_max_probe_cnt;
+ u8 dup_ack_theshold;
+ u32 rcv_next;
+ u32 snd_una;
+ u32 snd_next;
+ u32 snd_max;
+ u32 snd_wnd;
+ u32 rcv_wnd;
+ u32 snd_wl1;
+ u32 cwnd;
+ u32 ss_thresh;
+ u16 srtt;
+ u16 rtt_var;
+ u32 ts_time;
+ u32 ts_recent;
+ u32 ts_recent_age;
+ u32 total_rt;
+ u32 ka_timeout_delta;
+ u32 rt_timeout_delta;
+ u8 dup_ack_cnt;
+ u8 snd_wnd_probe_cnt;
+ u8 ka_probe_cnt;
+ u8 rt_cnt;
+ u32 flow_label;
+ u32 ka_timeout;
+ u32 ka_interval;
+ u32 max_rt_time;
+ u32 initial_rcv_wnd;
+ u8 ttl;
+ u8 tos_or_tc;
+ u16 remote_port;
+ u16 local_port;
+ u16 mss;
+ u8 snd_wnd_scale;
+ u8 rcv_wnd_scale;
+ u32 ts_ticks_per_second;
+ u16 da_timeout_value;
+ u8 ack_frequency;
+};
+
+struct qed_iscsi_params_update {
+ u8 update_flag;
+#define QED_ISCSI_CONN_HD_EN BIT(0)
+#define QED_ISCSI_CONN_DD_EN BIT(1)
+#define QED_ISCSI_CONN_INITIAL_R2T BIT(2)
+#define QED_ISCSI_CONN_IMMEDIATE_DATA BIT(3)
+
+ u32 max_seq_size;
+ u32 max_recv_pdu_length;
+ u32 max_send_pdu_length;
+ u32 first_seq_length;
+ u32 exp_stat_sn;
+};
+
+#define MAX_TID_BLOCKS_ISCSI (512)
+struct qed_iscsi_tid {
+ u32 size; /* In bytes per task */
+ u32 num_tids_per_block;
+ u8 *blocks[MAX_TID_BLOCKS_ISCSI];
+};
+
+struct qed_iscsi_cb_ops {
+ struct qed_common_cb_ops common;
+};
+
+/**
+ * struct qed_iscsi_ops - qed iSCSI operations.
+ * @common: common operations pointer
+ * @ll2: light L2 operations pointer
+ * @fill_dev_info: fills iSCSI specific information
+ * @param cdev
+ * @param info
+ * @return 0 on sucesss, otherwise error value.
+ * @register_ops: register iscsi operations
+ * @param cdev
+ * @param ops - specified using qed_iscsi_cb_ops
+ * @param cookie - driver private
+ * @start: iscsi in FW
+ * @param cdev
+ * @param tasks - qed will fill information about tasks
+ * return 0 on success, otherwise error value.
+ * @stop: iscsi in FW
+ * @param cdev
+ * return 0 on success, otherwise error value.
+ * @acquire_conn: acquire a new iscsi connection
+ * @param cdev
+ * @param handle - qed will fill handle that should be
+ * used henceforth as identifier of the
+ * connection.
+ * @param p_doorbell - qed will fill the address of the
+ * doorbell.
+ * @return 0 on sucesss, otherwise error value.
+ * @release_conn: release a previously acquired iscsi connection
+ * @param cdev
+ * @param handle - the connection handle.
+ * @return 0 on success, otherwise error value.
+ * @offload_conn: configures an offloaded connection
+ * @param cdev
+ * @param handle - the connection handle.
+ * @param conn_info - the configuration to use for the
+ * offload.
+ * @return 0 on success, otherwise error value.
+ * @update_conn: updates an offloaded connection
+ * @param cdev
+ * @param handle - the connection handle.
+ * @param conn_info - the configuration to use for the
+ * offload.
+ * @return 0 on success, otherwise error value.
+ * @destroy_conn: stops an offloaded connection
+ * @param cdev
+ * @param handle - the connection handle.
+ * @return 0 on success, otherwise error value.
+ * @clear_sq: clear all task in sq
+ * @param cdev
+ * @param handle - the connection handle.
+ * @return 0 on success, otherwise error value.
+ * @get_stats: iSCSI related statistics
+ * @param cdev
+ * @param stats - pointer to struck that would be filled
+ * we stats
+ * @return 0 on success, error otherwise.
+ */
+struct qed_iscsi_ops {
+ const struct qed_common_ops *common;
+
+ const struct qed_ll2_ops *ll2;
+
+ int (*fill_dev_info)(struct qed_dev *cdev,
+ struct qed_dev_iscsi_info *info);
+
+ void (*register_ops)(struct qed_dev *cdev,
+ struct qed_iscsi_cb_ops *ops, void *cookie);
+
+ int (*start)(struct qed_dev *cdev,
+ struct qed_iscsi_tid *tasks,
+ void *event_context, iscsi_event_cb_t async_event_cb);
+
+ int (*stop)(struct qed_dev *cdev);
+
+ int (*acquire_conn)(struct qed_dev *cdev,
+ u32 *handle,
+ u32 *fw_cid, void __iomem **p_doorbell);
+
+ int (*release_conn)(struct qed_dev *cdev, u32 handle);
+
+ int (*offload_conn)(struct qed_dev *cdev,
+ u32 handle,
+ struct qed_iscsi_params_offload *conn_info);
+
+ int (*update_conn)(struct qed_dev *cdev,
+ u32 handle,
+ struct qed_iscsi_params_update *conn_info);
+
+ int (*destroy_conn)(struct qed_dev *cdev, u32 handle, u8 abrt_conn);
+
+ int (*clear_sq)(struct qed_dev *cdev, u32 handle);
+
+ int (*get_stats)(struct qed_dev *cdev,
+ struct qed_iscsi_stats *stats);
+};
+
+const struct qed_iscsi_ops *qed_get_iscsi_ops(void);
+void qed_put_iscsi_ops(void);
+#endif
diff --git a/include/linux/qed/qede_roce.h b/include/linux/qed/qede_roce.h
index 99fbe6d55acb..f48d64b0e2fb 100644
--- a/include/linux/qed/qede_roce.h
+++ b/include/linux/qed/qede_roce.h
@@ -68,7 +68,7 @@ void qede_roce_unregister_driver(struct qedr_driver *drv);
bool qede_roce_supported(struct qede_dev *dev);
-#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
+#if IS_ENABLED(CONFIG_QED_RDMA)
int qede_roce_dev_add(struct qede_dev *dev);
void qede_roce_dev_event_open(struct qede_dev *dev);
void qede_roce_dev_event_close(struct qede_dev *dev);
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index af3581b8a451..744486057e9e 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -80,14 +80,11 @@ static inline bool radix_tree_is_internal_node(void *ptr)
#define RADIX_TREE_MAX_PATH (DIV_ROUND_UP(RADIX_TREE_INDEX_BITS, \
RADIX_TREE_MAP_SHIFT))
-/* Internally used bits of node->count */
-#define RADIX_TREE_COUNT_SHIFT (RADIX_TREE_MAP_SHIFT + 1)
-#define RADIX_TREE_COUNT_MASK ((1UL << RADIX_TREE_COUNT_SHIFT) - 1)
-
struct radix_tree_node {
- unsigned char shift; /* Bits remaining in each slot */
- unsigned char offset; /* Slot offset in parent */
- unsigned int count;
+ unsigned char shift; /* Bits remaining in each slot */
+ unsigned char offset; /* Slot offset in parent */
+ unsigned char count; /* Total entry count */
+ unsigned char exceptional; /* Exceptional entry count */
union {
struct {
/* Used when ascending tree */
@@ -248,20 +245,6 @@ static inline int radix_tree_exception(void *arg)
return unlikely((unsigned long)arg & RADIX_TREE_ENTRY_MASK);
}
-/**
- * radix_tree_replace_slot - replace item in a slot
- * @pslot: pointer to slot, returned by radix_tree_lookup_slot
- * @item: new item to store in the slot.
- *
- * For use with radix_tree_lookup_slot(). Caller must hold tree write locked
- * across slot lookup and replacement.
- */
-static inline void radix_tree_replace_slot(void **pslot, void *item)
-{
- BUG_ON(radix_tree_is_internal_node(item));
- rcu_assign_pointer(*pslot, item);
-}
-
int __radix_tree_create(struct radix_tree_root *root, unsigned long index,
unsigned order, struct radix_tree_node **nodep,
void ***slotp);
@@ -276,7 +259,14 @@ void *__radix_tree_lookup(struct radix_tree_root *root, unsigned long index,
struct radix_tree_node **nodep, void ***slotp);
void *radix_tree_lookup(struct radix_tree_root *, unsigned long);
void **radix_tree_lookup_slot(struct radix_tree_root *, unsigned long);
-bool __radix_tree_delete_node(struct radix_tree_root *root,
+typedef void (*radix_tree_update_node_t)(struct radix_tree_node *, void *);
+void __radix_tree_replace(struct radix_tree_root *root,
+ struct radix_tree_node *node,
+ void **slot, void *item,
+ radix_tree_update_node_t update_node, void *private);
+void radix_tree_replace_slot(struct radix_tree_root *root,
+ void **slot, void *item);
+void __radix_tree_delete_node(struct radix_tree_root *root,
struct radix_tree_node *node);
void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *);
void *radix_tree_delete(struct radix_tree_root *, unsigned long);
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 8beb98dcf14f..4f7a9561b8c4 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -45,19 +45,17 @@ static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
* This is only for internal list manipulation where we know
* the prev/next entries already!
*/
-#ifndef CONFIG_DEBUG_LIST
static inline void __list_add_rcu(struct list_head *new,
struct list_head *prev, struct list_head *next)
{
+ if (!__list_add_valid(new, prev, next))
+ return;
+
new->next = next;
new->prev = prev;
rcu_assign_pointer(list_next_rcu(prev), new);
next->prev = new;
}
-#else
-void __list_add_rcu(struct list_head *new,
- struct list_head *prev, struct list_head *next);
-#endif
/**
* list_add_rcu - add a new entry to rcu-protected list
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 9adc7b21903d..f6673132431d 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -15,6 +15,7 @@
#include <linux/list.h>
#include <linux/rbtree.h>
+#include <linux/delay.h>
#include <linux/err.h>
#include <linux/bug.h>
#include <linux/lockdep.h>
@@ -116,22 +117,22 @@ struct reg_sequence {
#define regmap_read_poll_timeout(map, addr, val, cond, sleep_us, timeout_us) \
({ \
ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \
- int ret; \
+ int pollret; \
might_sleep_if(sleep_us); \
for (;;) { \
- ret = regmap_read((map), (addr), &(val)); \
- if (ret) \
+ pollret = regmap_read((map), (addr), &(val)); \
+ if (pollret) \
break; \
if (cond) \
break; \
if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \
- ret = regmap_read((map), (addr), &(val)); \
+ pollret = regmap_read((map), (addr), &(val)); \
break; \
} \
if (sleep_us) \
usleep_range((sleep_us >> 2) + 1, sleep_us); \
} \
- ret ?: ((cond) ? 0 : -ETIMEDOUT); \
+ pollret ?: ((cond) ? 0 : -ETIMEDOUT); \
})
#ifdef CONFIG_REGMAP
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index 692108222271..ea0fffa5faeb 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -120,6 +120,25 @@ struct regmap;
#define REGULATOR_EVENT_PRE_DISABLE 0x400
#define REGULATOR_EVENT_ABORT_DISABLE 0x800
+/*
+ * Regulator errors that can be queried using regulator_get_error_flags
+ *
+ * UNDER_VOLTAGE Regulator output is under voltage.
+ * OVER_CURRENT Regulator output current is too high.
+ * REGULATION_OUT Regulator output is out of regulation.
+ * FAIL Regulator output has failed.
+ * OVER_TEMP Regulator over temp.
+ *
+ * NOTE: These errors can be OR'ed together.
+ */
+
+#define REGULATOR_ERROR_UNDER_VOLTAGE BIT(1)
+#define REGULATOR_ERROR_OVER_CURRENT BIT(2)
+#define REGULATOR_ERROR_REGULATION_OUT BIT(3)
+#define REGULATOR_ERROR_FAIL BIT(4)
+#define REGULATOR_ERROR_OVER_TEMP BIT(5)
+
+
/**
* struct pre_voltage_change_data - Data sent with PRE_VOLTAGE_CHANGE event
*
@@ -237,6 +256,8 @@ int regulator_get_current_limit(struct regulator *regulator);
int regulator_set_mode(struct regulator *regulator, unsigned int mode);
unsigned int regulator_get_mode(struct regulator *regulator);
+int regulator_get_error_flags(struct regulator *regulator,
+ unsigned int *flags);
int regulator_set_load(struct regulator *regulator, int load_uA);
int regulator_allow_bypass(struct regulator *regulator, bool allow);
@@ -477,6 +498,12 @@ static inline unsigned int regulator_get_mode(struct regulator *regulator)
return REGULATOR_MODE_NORMAL;
}
+static inline int regulator_get_error_flags(struct regulator *regulator,
+ unsigned int *flags)
+{
+ return -EINVAL;
+}
+
static inline int regulator_set_load(struct regulator *regulator, int load_uA)
{
return REGULATOR_MODE_NORMAL;
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 37b532410528..dac8e7b16bc6 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -100,6 +100,7 @@ struct regulator_linear_range {
*
* @set_mode: Set the configured operating mode for the regulator.
* @get_mode: Get the configured operating mode for the regulator.
+ * @get_error_flags: Get the current error(s) for the regulator.
* @get_status: Return actual (not as-configured) status of regulator, as a
* REGULATOR_STATUS value (or negative errno)
* @get_optimum_mode: Get the most efficient operating mode for the regulator
@@ -169,6 +170,9 @@ struct regulator_ops {
int (*set_mode) (struct regulator_dev *, unsigned int mode);
unsigned int (*get_mode) (struct regulator_dev *);
+ /* retrieve current error flags on the regulator */
+ int (*get_error_flags)(struct regulator_dev *, unsigned int *flags);
+
/* Time taken to enable or set voltage on the regulator */
int (*enable_time) (struct regulator_dev *);
int (*set_ramp_delay) (struct regulator_dev *, int ramp_delay);
diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h
index 930023b7c825..e2f3a3281d8f 100644
--- a/include/linux/remoteproc.h
+++ b/include/linux/remoteproc.h
@@ -400,6 +400,7 @@ enum rproc_crash_type {
* @firmware_loading_complete: marks e/o asynchronous firmware loading
* @bootaddr: address of first instruction to boot rproc with (optional)
* @rvdevs: list of remote virtio devices
+ * @subdevs: list of subdevices, to following the running state
* @notifyids: idr for dynamically assigning rproc-wide unique notify ids
* @index: index of this rproc device
* @crash_handler: workqueue for handling a crash
@@ -407,15 +408,14 @@ enum rproc_crash_type {
* @crash_comp: completion used to sync crash handler and the rproc reload
* @recovery_disabled: flag that state if recovery was disabled
* @max_notifyid: largest allocated notify id.
- * @table_ptr: pointer to the resource table in effect
- * @cached_table: copy of the resource table
+ * @table_ptr: our copy of the resource table
* @has_iommu: flag to indicate if remote processor is behind an MMU
*/
struct rproc {
struct list_head node;
struct iommu_domain *domain;
const char *name;
- const char *firmware;
+ char *firmware;
void *priv;
const struct rproc_ops *ops;
struct device dev;
@@ -431,6 +431,7 @@ struct rproc {
struct completion firmware_loading_complete;
u32 bootaddr;
struct list_head rvdevs;
+ struct list_head subdevs;
struct idr notifyids;
int index;
struct work_struct crash_handler;
@@ -439,11 +440,23 @@ struct rproc {
bool recovery_disabled;
int max_notifyid;
struct resource_table *table_ptr;
- struct resource_table *cached_table;
bool has_iommu;
bool auto_boot;
};
+/**
+ * struct rproc_subdev - subdevice tied to a remoteproc
+ * @node: list node related to the rproc subdevs list
+ * @probe: probe function, called as the rproc is started
+ * @remove: remove function, called as the rproc is stopped
+ */
+struct rproc_subdev {
+ struct list_head node;
+
+ int (*probe)(struct rproc_subdev *subdev);
+ void (*remove)(struct rproc_subdev *subdev);
+};
+
/* we currently support only two vrings per rvdev */
#define RVDEV_NUM_VRINGS 2
@@ -472,6 +485,9 @@ struct rproc_vring {
/**
* struct rproc_vdev - remoteproc state for a supported virtio device
+ * @refcount: reference counter for the vdev and vring allocations
+ * @subdev: handle for registering the vdev as a rproc subdevice
+ * @id: virtio device id (as in virtio_ids.h)
* @node: list node
* @rproc: the rproc handle
* @vdev: the virio device
@@ -479,6 +495,11 @@ struct rproc_vring {
* @rsc_offset: offset of the vdev's resource entry
*/
struct rproc_vdev {
+ struct kref refcount;
+
+ struct rproc_subdev subdev;
+
+ unsigned int id;
struct list_head node;
struct rproc *rproc;
struct virtio_device vdev;
@@ -511,4 +532,11 @@ static inline struct rproc *vdev_to_rproc(struct virtio_device *vdev)
return rvdev->rproc;
}
+void rproc_add_subdev(struct rproc *rproc,
+ struct rproc_subdev *subdev,
+ int (*probe)(struct rproc_subdev *subdev),
+ void (*remove)(struct rproc_subdev *subdev));
+
+void rproc_remove_subdev(struct rproc *rproc, struct rproc_subdev *subdev);
+
#endif /* REMOTEPROC_H */
diff --git a/include/linux/remoteproc/st_slim_rproc.h b/include/linux/remoteproc/st_slim_rproc.h
new file mode 100644
index 000000000000..4155556fa4b2
--- /dev/null
+++ b/include/linux/remoteproc/st_slim_rproc.h
@@ -0,0 +1,58 @@
+/*
+ * SLIM core rproc driver header
+ *
+ * Copyright (C) 2016 STMicroelectronics
+ *
+ * Author: Peter Griffin <peter.griffin@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef _ST_REMOTEPROC_SLIM_H
+#define _ST_REMOTEPROC_SLIM_H
+
+#define ST_SLIM_MEM_MAX 2
+#define ST_SLIM_MAX_CLK 4
+
+enum {
+ ST_SLIM_DMEM,
+ ST_SLIM_IMEM,
+};
+
+/**
+ * struct st_slim_mem - slim internal memory structure
+ * @cpu_addr: MPU virtual address of the memory region
+ * @bus_addr: Bus address used to access the memory region
+ * @size: Size of the memory region
+ */
+struct st_slim_mem {
+ void __iomem *cpu_addr;
+ phys_addr_t bus_addr;
+ size_t size;
+};
+
+/**
+ * struct st_slim_rproc - SLIM slim core
+ * @rproc: rproc handle
+ * @mem: slim memory information
+ * @slimcore: slim slimcore regs
+ * @peri: slim peripheral regs
+ * @clks: slim clocks
+ */
+struct st_slim_rproc {
+ struct rproc *rproc;
+ struct st_slim_mem mem[ST_SLIM_MEM_MAX];
+ void __iomem *slimcore;
+ void __iomem *peri;
+
+ /* st_slim_rproc private */
+ struct clk *clks[ST_SLIM_MAX_CLK];
+};
+
+struct st_slim_rproc *st_slim_rproc_alloc(struct platform_device *pdev,
+ char *fw_name);
+void st_slim_rproc_put(struct st_slim_rproc *slim_rproc);
+
+#endif
diff --git a/include/linux/reservation.h b/include/linux/reservation.h
index b0f305e77b7f..d9706a6f5ae2 100644
--- a/include/linux/reservation.h
+++ b/include/linux/reservation.h
@@ -40,7 +40,7 @@
#define _LINUX_RESERVATION_H
#include <linux/ww_mutex.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include <linux/slab.h>
#include <linux/seqlock.h>
#include <linux/rcupdate.h>
@@ -59,7 +59,7 @@ extern const char reservation_seqcount_string[];
struct reservation_object_list {
struct rcu_head rcu;
u32 shared_count, shared_max;
- struct fence __rcu *shared[];
+ struct dma_fence __rcu *shared[];
};
/**
@@ -74,7 +74,7 @@ struct reservation_object {
struct ww_mutex lock;
seqcount_t seq;
- struct fence __rcu *fence_excl;
+ struct dma_fence __rcu *fence_excl;
struct reservation_object_list __rcu *fence;
struct reservation_object_list *staged;
};
@@ -107,7 +107,7 @@ reservation_object_fini(struct reservation_object *obj)
{
int i;
struct reservation_object_list *fobj;
- struct fence *excl;
+ struct dma_fence *excl;
/*
* This object should be dead and all references must have
@@ -115,12 +115,12 @@ reservation_object_fini(struct reservation_object *obj)
*/
excl = rcu_dereference_protected(obj->fence_excl, 1);
if (excl)
- fence_put(excl);
+ dma_fence_put(excl);
fobj = rcu_dereference_protected(obj->fence, 1);
if (fobj) {
for (i = 0; i < fobj->shared_count; ++i)
- fence_put(rcu_dereference_protected(fobj->shared[i], 1));
+ dma_fence_put(rcu_dereference_protected(fobj->shared[i], 1));
kfree(fobj);
}
@@ -155,7 +155,7 @@ reservation_object_get_list(struct reservation_object *obj)
* RETURNS
* The exclusive fence or NULL
*/
-static inline struct fence *
+static inline struct dma_fence *
reservation_object_get_excl(struct reservation_object *obj)
{
return rcu_dereference_protected(obj->fence_excl,
@@ -173,35 +173,32 @@ reservation_object_get_excl(struct reservation_object *obj)
* RETURNS
* The exclusive fence or NULL if none
*/
-static inline struct fence *
+static inline struct dma_fence *
reservation_object_get_excl_rcu(struct reservation_object *obj)
{
- struct fence *fence;
- unsigned seq;
-retry:
- seq = read_seqcount_begin(&obj->seq);
+ struct dma_fence *fence;
+
+ if (!rcu_access_pointer(obj->fence_excl))
+ return NULL;
+
rcu_read_lock();
- fence = rcu_dereference(obj->fence_excl);
- if (read_seqcount_retry(&obj->seq, seq)) {
- rcu_read_unlock();
- goto retry;
- }
- fence = fence_get(fence);
+ fence = dma_fence_get_rcu_safe(&obj->fence_excl);
rcu_read_unlock();
+
return fence;
}
int reservation_object_reserve_shared(struct reservation_object *obj);
void reservation_object_add_shared_fence(struct reservation_object *obj,
- struct fence *fence);
+ struct dma_fence *fence);
void reservation_object_add_excl_fence(struct reservation_object *obj,
- struct fence *fence);
+ struct dma_fence *fence);
int reservation_object_get_fences_rcu(struct reservation_object *obj,
- struct fence **pfence_excl,
+ struct dma_fence **pfence_excl,
unsigned *pshared_count,
- struct fence ***pshared);
+ struct dma_fence ***pshared);
long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
bool wait_all, bool intr,
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 4acc552e9279..b6d4568795a7 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -198,4 +198,10 @@ enum ring_buffer_flags {
RB_FL_OVERWRITE = 1 << 0,
};
+#ifdef CONFIG_RING_BUFFER
+int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node);
+#else
+#define trace_rb_cpu_prepare NULL
+#endif
+
#endif /* _LINUX_RING_BUFFER_H */
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index b46bb5620a76..15321fb1df6b 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -137,11 +137,19 @@ static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
* anon_vma helper functions.
*/
void anon_vma_init(void); /* create anon_vma_cachep */
-int anon_vma_prepare(struct vm_area_struct *);
+int __anon_vma_prepare(struct vm_area_struct *);
void unlink_anon_vmas(struct vm_area_struct *);
int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
+static inline int anon_vma_prepare(struct vm_area_struct *vma)
+{
+ if (likely(vma->anon_vma))
+ return 0;
+
+ return __anon_vma_prepare(vma);
+}
+
static inline void anon_vma_merge(struct vm_area_struct *vma,
struct vm_area_struct *next)
{
diff --git a/include/linux/rpmsg.h b/include/linux/rpmsg.h
index 452d393cc8dd..18f9e1ae4b7e 100644
--- a/include/linux/rpmsg.h
+++ b/include/linux/rpmsg.h
@@ -37,6 +37,7 @@
#include <linux/types.h>
#include <linux/device.h>
+#include <linux/err.h>
#include <linux/mod_devicetable.h>
#include <linux/kref.h>
#include <linux/mutex.h>
@@ -64,6 +65,7 @@ struct rpmsg_channel_info {
* rpmsg_device - device that belong to the rpmsg bus
* @dev: the device struct
* @id: device id (used to match between rpmsg drivers and devices)
+ * @driver_override: driver name to force a match
* @src: local address
* @dst: destination address
* @ept: the rpmsg endpoint of this channel
@@ -72,6 +74,7 @@ struct rpmsg_channel_info {
struct rpmsg_device {
struct device dev;
struct rpmsg_device_id id;
+ char *driver_override;
u32 src;
u32 dst;
struct rpmsg_endpoint *ept;
@@ -132,6 +135,8 @@ struct rpmsg_driver {
int (*callback)(struct rpmsg_device *, void *, int, void *, u32);
};
+#if IS_ENABLED(CONFIG_RPMSG)
+
int register_rpmsg_device(struct rpmsg_device *dev);
void unregister_rpmsg_device(struct rpmsg_device *dev);
int __register_rpmsg_driver(struct rpmsg_driver *drv, struct module *owner);
@@ -141,6 +146,116 @@ struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_device *,
rpmsg_rx_cb_t cb, void *priv,
struct rpmsg_channel_info chinfo);
+int rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len);
+int rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst);
+int rpmsg_send_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst,
+ void *data, int len);
+
+int rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len);
+int rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst);
+int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst,
+ void *data, int len);
+
+#else
+
+static inline int register_rpmsg_device(struct rpmsg_device *dev)
+{
+ return -ENXIO;
+}
+
+static inline void unregister_rpmsg_device(struct rpmsg_device *dev)
+{
+ /* This shouldn't be possible */
+ WARN_ON(1);
+}
+
+static inline int __register_rpmsg_driver(struct rpmsg_driver *drv,
+ struct module *owner)
+{
+ /* This shouldn't be possible */
+ WARN_ON(1);
+
+ return -ENXIO;
+}
+
+static inline void unregister_rpmsg_driver(struct rpmsg_driver *drv)
+{
+ /* This shouldn't be possible */
+ WARN_ON(1);
+}
+
+static inline void rpmsg_destroy_ept(struct rpmsg_endpoint *ept)
+{
+ /* This shouldn't be possible */
+ WARN_ON(1);
+}
+
+static inline struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_device *rpdev,
+ rpmsg_rx_cb_t cb,
+ void *priv,
+ struct rpmsg_channel_info chinfo)
+{
+ /* This shouldn't be possible */
+ WARN_ON(1);
+
+ return ERR_PTR(-ENXIO);
+}
+
+static inline int rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len)
+{
+ /* This shouldn't be possible */
+ WARN_ON(1);
+
+ return -ENXIO;
+}
+
+static inline int rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len,
+ u32 dst)
+{
+ /* This shouldn't be possible */
+ WARN_ON(1);
+
+ return -ENXIO;
+
+}
+
+static inline int rpmsg_send_offchannel(struct rpmsg_endpoint *ept, u32 src,
+ u32 dst, void *data, int len)
+{
+ /* This shouldn't be possible */
+ WARN_ON(1);
+
+ return -ENXIO;
+}
+
+static inline int rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len)
+{
+ /* This shouldn't be possible */
+ WARN_ON(1);
+
+ return -ENXIO;
+}
+
+static inline int rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data,
+ int len, u32 dst)
+{
+ /* This shouldn't be possible */
+ WARN_ON(1);
+
+ return -ENXIO;
+}
+
+static inline int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src,
+ u32 dst, void *data, int len)
+{
+ /* This shouldn't be possible */
+ WARN_ON(1);
+
+ return -ENXIO;
+}
+
+#endif /* IS_ENABLED(CONFIG_RPMSG) */
+
/* use a macro to avoid include chaining to get THIS_MODULE */
#define register_rpmsg_driver(drv) \
__register_rpmsg_driver(drv, THIS_MODULE)
@@ -157,14 +272,4 @@ struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_device *,
module_driver(__rpmsg_driver, register_rpmsg_driver, \
unregister_rpmsg_driver)
-int rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len);
-int rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst);
-int rpmsg_send_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst,
- void *data, int len);
-
-int rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len);
-int rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst);
-int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst,
- void *data, int len);
-
#endif /* _LINUX_RPMSG_H */
diff --git a/include/linux/rpmsg/qcom_smd.h b/include/linux/rpmsg/qcom_smd.h
new file mode 100644
index 000000000000..e674b2e3074b
--- /dev/null
+++ b/include/linux/rpmsg/qcom_smd.h
@@ -0,0 +1,33 @@
+
+#ifndef _LINUX_RPMSG_QCOM_SMD_H
+#define _LINUX_RPMSG_QCOM_SMD_H
+
+#include <linux/device.h>
+
+struct qcom_smd_edge;
+
+#if IS_ENABLED(CONFIG_RPMSG_QCOM_SMD) || IS_ENABLED(CONFIG_QCOM_SMD)
+
+struct qcom_smd_edge *qcom_smd_register_edge(struct device *parent,
+ struct device_node *node);
+int qcom_smd_unregister_edge(struct qcom_smd_edge *edge);
+
+#else
+
+static inline struct qcom_smd_edge *
+qcom_smd_register_edge(struct device *parent,
+ struct device_node *node)
+{
+ return ERR_PTR(-ENXIO);
+}
+
+static inline int qcom_smd_unregister_edge(struct qcom_smd_edge *edge)
+{
+ /* This shouldn't be possible */
+ WARN_ON(1);
+ return -ENXIO;
+}
+
+#endif
+
+#endif
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 348f51b0ec92..0e90f2973719 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -262,20 +262,9 @@ extern char ___assert_task_state[1 - 2*!!(
#define set_task_state(tsk, state_value) \
do { \
(tsk)->task_state_change = _THIS_IP_; \
- smp_store_mb((tsk)->state, (state_value)); \
+ smp_store_mb((tsk)->state, (state_value)); \
} while (0)
-/*
- * set_current_state() includes a barrier so that the write of current->state
- * is correctly serialised wrt the caller's subsequent test of whether to
- * actually sleep:
- *
- * set_current_state(TASK_UNINTERRUPTIBLE);
- * if (do_i_need_to_sleep())
- * schedule();
- *
- * If the caller does not need such serialisation then use __set_current_state()
- */
#define __set_current_state(state_value) \
do { \
current->task_state_change = _THIS_IP_; \
@@ -284,11 +273,19 @@ extern char ___assert_task_state[1 - 2*!!(
#define set_current_state(state_value) \
do { \
current->task_state_change = _THIS_IP_; \
- smp_store_mb(current->state, (state_value)); \
+ smp_store_mb(current->state, (state_value)); \
} while (0)
#else
+/*
+ * @tsk had better be current, or you get to keep the pieces.
+ *
+ * The only reason is that computing current can be more expensive than
+ * using a pointer that's already available.
+ *
+ * Therefore, see set_current_state().
+ */
#define __set_task_state(tsk, state_value) \
do { (tsk)->state = (state_value); } while (0)
#define set_task_state(tsk, state_value) \
@@ -299,11 +296,34 @@ extern char ___assert_task_state[1 - 2*!!(
* is correctly serialised wrt the caller's subsequent test of whether to
* actually sleep:
*
+ * for (;;) {
* set_current_state(TASK_UNINTERRUPTIBLE);
- * if (do_i_need_to_sleep())
- * schedule();
+ * if (!need_sleep)
+ * break;
+ *
+ * schedule();
+ * }
+ * __set_current_state(TASK_RUNNING);
+ *
+ * If the caller does not need such serialisation (because, for instance, the
+ * condition test and condition change and wakeup are under the same lock) then
+ * use __set_current_state().
+ *
+ * The above is typically ordered against the wakeup, which does:
+ *
+ * need_sleep = false;
+ * wake_up_state(p, TASK_UNINTERRUPTIBLE);
+ *
+ * Where wake_up_state() (and all other wakeup primitives) imply enough
+ * barriers to order the store of the variable against wakeup.
*
- * If the caller does not need such serialisation then use __set_current_state()
+ * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is,
+ * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
+ * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
+ *
+ * This is obviously fine, since they both store the exact same value.
+ *
+ * Also see the comments of try_to_wake_up().
*/
#define __set_current_state(state_value) \
do { current->state = (state_value); } while (0)
@@ -520,7 +540,11 @@ static inline int get_dumpable(struct mm_struct *mm)
/* leave room for more dump flags */
#define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */
#define MMF_VM_HUGEPAGE 17 /* set when VM_HUGEPAGE is set on vma */
-#define MMF_EXE_FILE_CHANGED 18 /* see prctl_set_mm_exe_file() */
+/*
+ * This one-shot flag is dropped due to necessity of changing exe once again
+ * on NFS restore
+ */
+//#define MMF_EXE_FILE_CHANGED 18 /* see prctl_set_mm_exe_file() */
#define MMF_HAS_UPROBES 19 /* has uprobes */
#define MMF_RECALC_UPROBES 20 /* MMF_HAS_UPROBES can be wrong */
@@ -989,7 +1013,7 @@ enum cpu_idle_type {
* already in a wake queue, the wakeup will happen soon and the second
* waker can just skip it.
*
- * The WAKE_Q macro declares and initializes the list head.
+ * The DEFINE_WAKE_Q macro declares and initializes the list head.
* wake_up_q() does NOT reinitialize the list; it's expected to be
* called near the end of a function, where the fact that the queue is
* not used again will be easy to see by inspection.
@@ -1009,7 +1033,7 @@ struct wake_q_head {
#define WAKE_Q_TAIL ((struct wake_q_node *) 0x01)
-#define WAKE_Q(name) \
+#define DEFINE_WAKE_Q(name) \
struct wake_q_head name = { WAKE_Q_TAIL, &name.first }
extern void wake_q_add(struct wake_q_head *head,
@@ -1057,6 +1081,8 @@ static inline int cpu_numa_flags(void)
}
#endif
+extern int arch_asym_cpu_priority(int cpu);
+
struct sched_domain_attr {
int relax_domain_level;
};
@@ -1627,7 +1653,10 @@ struct task_struct {
int __user *set_child_tid; /* CLONE_CHILD_SETTID */
int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
- cputime_t utime, stime, utimescaled, stimescaled;
+ cputime_t utime, stime;
+#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
+ cputime_t utimescaled, stimescaled;
+#endif
cputime_t gtime;
struct prev_cputime prev_cputime;
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
@@ -2220,34 +2249,38 @@ struct task_struct *try_get_task_struct(struct task_struct **ptask);
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
extern void task_cputime(struct task_struct *t,
cputime_t *utime, cputime_t *stime);
-extern void task_cputime_scaled(struct task_struct *t,
- cputime_t *utimescaled, cputime_t *stimescaled);
extern cputime_t task_gtime(struct task_struct *t);
#else
static inline void task_cputime(struct task_struct *t,
cputime_t *utime, cputime_t *stime)
{
- if (utime)
- *utime = t->utime;
- if (stime)
- *stime = t->stime;
+ *utime = t->utime;
+ *stime = t->stime;
}
+static inline cputime_t task_gtime(struct task_struct *t)
+{
+ return t->gtime;
+}
+#endif
+
+#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
static inline void task_cputime_scaled(struct task_struct *t,
cputime_t *utimescaled,
cputime_t *stimescaled)
{
- if (utimescaled)
- *utimescaled = t->utimescaled;
- if (stimescaled)
- *stimescaled = t->stimescaled;
+ *utimescaled = t->utimescaled;
+ *stimescaled = t->stimescaled;
}
-
-static inline cputime_t task_gtime(struct task_struct *t)
+#else
+static inline void task_cputime_scaled(struct task_struct *t,
+ cputime_t *utimescaled,
+ cputime_t *stimescaled)
{
- return t->gtime;
+ task_cputime(t, utimescaled, stimescaled);
}
#endif
+
extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
@@ -2444,6 +2477,10 @@ static inline void calc_load_enter_idle(void) { }
static inline void calc_load_exit_idle(void) { }
#endif /* CONFIG_NO_HZ_COMMON */
+#ifndef cpu_relax_yield
+#define cpu_relax_yield() cpu_relax()
+#endif
+
/*
* Do not use outside of architecture code which knows its limitations.
*
@@ -2567,6 +2604,7 @@ extern void sched_autogroup_create_attach(struct task_struct *p);
extern void sched_autogroup_detach(struct task_struct *p);
extern void sched_autogroup_fork(struct signal_struct *sig);
extern void sched_autogroup_exit(struct signal_struct *sig);
+extern void sched_autogroup_exit_task(struct task_struct *p);
#ifdef CONFIG_PROC_FS
extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice);
@@ -2576,6 +2614,7 @@ static inline void sched_autogroup_create_attach(struct task_struct *p) { }
static inline void sched_autogroup_detach(struct task_struct *p) { }
static inline void sched_autogroup_fork(struct signal_struct *sig) { }
static inline void sched_autogroup_exit(struct signal_struct *sig) { }
+static inline void sched_autogroup_exit_task(struct task_struct *p) { }
#endif
extern int yield_to(struct task_struct *p, bool preempt);
@@ -3506,6 +3545,18 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
#endif /* CONFIG_SMP */
+/*
+ * In order to reduce various lock holder preemption latencies provide an
+ * interface to see if a vCPU is currently running or not.
+ *
+ * This allows us to terminate optimistic spin loops and block, analogous to
+ * the native optimistic spin heuristic of testing if the lock owner task is
+ * running or not.
+ */
+#ifndef vcpu_is_preempted
+# define vcpu_is_preempted(cpu) false
+#endif
+
extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 22db1e63707e..441145351301 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -36,7 +36,6 @@ extern unsigned int sysctl_numa_balancing_scan_size;
extern unsigned int sysctl_sched_migration_cost;
extern unsigned int sysctl_sched_nr_migrate;
extern unsigned int sysctl_sched_time_avg;
-extern unsigned int sysctl_sched_shares_window;
int sched_proc_update_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length,
diff --git a/include/linux/seg6.h b/include/linux/seg6.h
new file mode 100644
index 000000000000..7a66d2b4c5a6
--- /dev/null
+++ b/include/linux/seg6.h
@@ -0,0 +1,6 @@
+#ifndef _LINUX_SEG6_H
+#define _LINUX_SEG6_H
+
+#include <uapi/linux/seg6.h>
+
+#endif
diff --git a/include/linux/seg6_genl.h b/include/linux/seg6_genl.h
new file mode 100644
index 000000000000..d6c3fb4f3734
--- /dev/null
+++ b/include/linux/seg6_genl.h
@@ -0,0 +1,6 @@
+#ifndef _LINUX_SEG6_GENL_H
+#define _LINUX_SEG6_GENL_H
+
+#include <uapi/linux/seg6_genl.h>
+
+#endif
diff --git a/include/linux/seg6_hmac.h b/include/linux/seg6_hmac.h
new file mode 100644
index 000000000000..da437ebdc6cd
--- /dev/null
+++ b/include/linux/seg6_hmac.h
@@ -0,0 +1,6 @@
+#ifndef _LINUX_SEG6_HMAC_H
+#define _LINUX_SEG6_HMAC_H
+
+#include <uapi/linux/seg6_hmac.h>
+
+#endif
diff --git a/include/linux/seg6_iptunnel.h b/include/linux/seg6_iptunnel.h
new file mode 100644
index 000000000000..5377cf6a5a02
--- /dev/null
+++ b/include/linux/seg6_iptunnel.h
@@ -0,0 +1,6 @@
+#ifndef _LINUX_SEG6_IPTUNNEL_H
+#define _LINUX_SEG6_IPTUNNEL_H
+
+#include <uapi/linux/seg6_iptunnel.h>
+
+#endif
diff --git a/include/linux/seqno-fence.h b/include/linux/seqno-fence.h
index a1ba6a5ccdd6..c58c535d12a8 100644
--- a/include/linux/seqno-fence.h
+++ b/include/linux/seqno-fence.h
@@ -20,7 +20,7 @@
#ifndef __LINUX_SEQNO_FENCE_H
#define __LINUX_SEQNO_FENCE_H
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include <linux/dma-buf.h>
enum seqno_fence_condition {
@@ -29,15 +29,15 @@ enum seqno_fence_condition {
};
struct seqno_fence {
- struct fence base;
+ struct dma_fence base;
- const struct fence_ops *ops;
+ const struct dma_fence_ops *ops;
struct dma_buf *sync_buf;
uint32_t seqno_ofs;
enum seqno_fence_condition condition;
};
-extern const struct fence_ops seqno_fence_ops;
+extern const struct dma_fence_ops seqno_fence_ops;
/**
* to_seqno_fence - cast a fence to a seqno_fence
@@ -47,7 +47,7 @@ extern const struct fence_ops seqno_fence_ops;
* or the seqno_fence otherwise.
*/
static inline struct seqno_fence *
-to_seqno_fence(struct fence *fence)
+to_seqno_fence(struct dma_fence *fence)
{
if (fence->ops != &seqno_fence_ops)
return NULL;
@@ -83,9 +83,9 @@ to_seqno_fence(struct fence *fence)
* dma-buf for sync_buf, since mapping or unmapping the sync_buf to the
* device's vm can be expensive.
*
- * It is recommended for creators of seqno_fence to call fence_signal
+ * It is recommended for creators of seqno_fence to call dma_fence_signal()
* before destruction. This will prevent possible issues from wraparound at
- * time of issue vs time of check, since users can check fence_is_signaled
+ * time of issue vs time of check, since users can check dma_fence_is_signaled()
* before submitting instructions for the hardware to wait on the fence.
* However, when ops.enable_signaling is not called, it doesn't have to be
* done as soon as possible, just before there's any real danger of seqno
@@ -96,18 +96,18 @@ seqno_fence_init(struct seqno_fence *fence, spinlock_t *lock,
struct dma_buf *sync_buf, uint32_t context,
uint32_t seqno_ofs, uint32_t seqno,
enum seqno_fence_condition cond,
- const struct fence_ops *ops)
+ const struct dma_fence_ops *ops)
{
BUG_ON(!fence || !sync_buf || !ops);
BUG_ON(!ops->wait || !ops->enable_signaling ||
!ops->get_driver_name || !ops->get_timeline_name);
/*
- * ops is used in fence_init for get_driver_name, so needs to be
+ * ops is used in dma_fence_init for get_driver_name, so needs to be
* initialized first
*/
fence->ops = ops;
- fence_init(&fence->base, &seqno_fence_ops, lock, context, seqno);
+ dma_fence_init(&fence->base, &seqno_fence_ops, lock, context, seqno);
get_dma_buf(sync_buf);
fence->sync_buf = sync_buf;
fence->seqno_ofs = seqno_ofs;
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 601258f6e621..332e76756f54 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -645,8 +645,15 @@ struct sk_buff {
struct rb_node rbnode; /* used in netem & tcp stack */
};
struct sock *sk;
- struct net_device *dev;
+ union {
+ struct net_device *dev;
+ /* Some protocols might use this space to store information,
+ * while device pointer would be NULL.
+ * UDP receive path is one user.
+ */
+ unsigned long dev_scratch;
+ };
/*
* This is the control buffer. It is free to use for every
* layer. Please put your private variables there. If you
@@ -936,6 +943,7 @@ struct sk_buff_fclones {
/**
* skb_fclone_busy - check if fclone is busy
+ * @sk: socket
* @skb: buffer
*
* Returns true if skb is a fast clone, and its clone is not freed.
@@ -1086,7 +1094,7 @@ __skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
}
void __skb_get_hash(struct sk_buff *skb);
-u32 __skb_get_hash_symmetric(struct sk_buff *skb);
+u32 __skb_get_hash_symmetric(const struct sk_buff *skb);
u32 skb_get_poff(const struct sk_buff *skb);
u32 __skb_get_poff(const struct sk_buff *skb, void *data,
const struct flow_keys *keys, int hlen);
@@ -1798,11 +1806,11 @@ static inline unsigned int skb_headlen(const struct sk_buff *skb)
return skb->len - skb->data_len;
}
-static inline int skb_pagelen(const struct sk_buff *skb)
+static inline unsigned int skb_pagelen(const struct sk_buff *skb)
{
- int i, len = 0;
+ unsigned int i, len = 0;
- for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--)
+ for (i = skb_shinfo(skb)->nr_frags - 1; (int)i >= 0; i--)
len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
return len + skb_headlen(skb);
}
@@ -1965,6 +1973,8 @@ static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
}
+void skb_condense(struct sk_buff *skb);
+
/**
* skb_headroom - bytes at buffer head
* @skb: buffer to check
@@ -3032,9 +3042,13 @@ static inline void skb_frag_list_init(struct sk_buff *skb)
int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p,
const struct sk_buff *skb);
struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned flags,
+ void (*destructor)(struct sock *sk,
+ struct sk_buff *skb),
int *peeked, int *off, int *err,
struct sk_buff **last);
struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
+ void (*destructor)(struct sock *sk,
+ struct sk_buff *skb),
int *peeked, int *off, int *err);
struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
int *err);
diff --git a/include/linux/smc91x.h b/include/linux/smc91x.h
index e302c447e057..129bc674dcf5 100644
--- a/include/linux/smc91x.h
+++ b/include/linux/smc91x.h
@@ -39,6 +39,7 @@ struct smc91x_platdata {
unsigned long flags;
unsigned char leda;
unsigned char ledb;
+ bool pxa_u16_align4; /* PXA buggy u16 writes on 4*n+2 addresses */
};
#endif /* __SMC91X_H__ */
diff --git a/include/linux/soc/renesas/rcar-rst.h b/include/linux/soc/renesas/rcar-rst.h
new file mode 100644
index 000000000000..a18e0783946b
--- /dev/null
+++ b/include/linux/soc/renesas/rcar-rst.h
@@ -0,0 +1,6 @@
+#ifndef __LINUX_SOC_RENESAS_RCAR_RST_H__
+#define __LINUX_SOC_RENESAS_RCAR_RST_H__
+
+int rcar_rst_read_mode_pins(u32 *mode);
+
+#endif /* __LINUX_SOC_RENESAS_RCAR_RST_H__ */
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index 705840e0438f..266dab9ad782 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -88,6 +88,9 @@ struct stmmac_mdio_bus_data {
struct stmmac_dma_cfg {
int pbl;
+ int txpbl;
+ int rxpbl;
+ bool pblx8;
int fixed_burst;
int mixed_burst;
bool aal;
@@ -135,8 +138,6 @@ struct plat_stmmacenet_data {
void (*bus_setup)(void __iomem *ioaddr);
int (*init)(struct platform_device *pdev, void *priv);
void (*exit)(struct platform_device *pdev, void *priv);
- void (*suspend)(struct platform_device *pdev, void *priv);
- void (*resume)(struct platform_device *pdev, void *priv);
void *bsp_priv;
struct stmmac_axi *axi;
int has_gmac4;
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index ab02a457da1f..e5d193440374 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -25,6 +25,7 @@ struct svc_xprt_ops {
void (*xpo_detach)(struct svc_xprt *);
void (*xpo_free)(struct svc_xprt *);
int (*xpo_secure_port)(struct svc_rqst *);
+ void (*xpo_kill_temp_xprt)(struct svc_xprt *);
};
struct svc_xprt_class {
diff --git a/include/linux/swap.h b/include/linux/swap.h
index bfee1af1f54f..09f4be179ff3 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -246,39 +246,7 @@ struct swap_info_struct {
void *workingset_eviction(struct address_space *mapping, struct page *page);
bool workingset_refault(void *shadow);
void workingset_activation(struct page *page);
-extern struct list_lru workingset_shadow_nodes;
-
-static inline unsigned int workingset_node_pages(struct radix_tree_node *node)
-{
- return node->count & RADIX_TREE_COUNT_MASK;
-}
-
-static inline void workingset_node_pages_inc(struct radix_tree_node *node)
-{
- node->count++;
-}
-
-static inline void workingset_node_pages_dec(struct radix_tree_node *node)
-{
- VM_WARN_ON_ONCE(!workingset_node_pages(node));
- node->count--;
-}
-
-static inline unsigned int workingset_node_shadows(struct radix_tree_node *node)
-{
- return node->count >> RADIX_TREE_COUNT_SHIFT;
-}
-
-static inline void workingset_node_shadows_inc(struct radix_tree_node *node)
-{
- node->count += 1U << RADIX_TREE_COUNT_SHIFT;
-}
-
-static inline void workingset_node_shadows_dec(struct radix_tree_node *node)
-{
- VM_WARN_ON_ONCE(!workingset_node_shadows(node));
- node->count -= 1U << RADIX_TREE_COUNT_SHIFT;
-}
+void workingset_update_node(struct radix_tree_node *node, void *private);
/* linux/mm/page_alloc.c */
extern unsigned long totalram_pages;
diff --git a/include/linux/sync_file.h b/include/linux/sync_file.h
index aa17ccfc2f57..3e3ab84fc4cd 100644
--- a/include/linux/sync_file.h
+++ b/include/linux/sync_file.h
@@ -18,8 +18,8 @@
#include <linux/ktime.h>
#include <linux/list.h>
#include <linux/spinlock.h>
-#include <linux/fence.h>
-#include <linux/fence-array.h>
+#include <linux/dma-fence.h>
+#include <linux/dma-fence-array.h>
/**
* struct sync_file - sync file to export to the userspace
@@ -41,13 +41,13 @@ struct sync_file {
wait_queue_head_t wq;
- struct fence *fence;
- struct fence_cb cb;
+ struct dma_fence *fence;
+ struct dma_fence_cb cb;
};
-#define POLL_ENABLED FENCE_FLAG_USER_BITS
+#define POLL_ENABLED DMA_FENCE_FLAG_USER_BITS
-struct sync_file *sync_file_create(struct fence *fence);
-struct fence *sync_file_get_fence(int fd);
+struct sync_file *sync_file_create(struct dma_fence *fence);
+struct dma_fence *sync_file_get_fence(int fd);
#endif /* _LINUX_SYNC_H */
diff --git a/include/linux/sys_soc.h b/include/linux/sys_soc.h
index 2739ccb69571..bed223b70217 100644
--- a/include/linux/sys_soc.h
+++ b/include/linux/sys_soc.h
@@ -13,6 +13,7 @@ struct soc_device_attribute {
const char *family;
const char *revision;
const char *soc_id;
+ const void *data;
};
/**
@@ -34,4 +35,12 @@ void soc_device_unregister(struct soc_device *soc_dev);
*/
struct device *soc_device_to_device(struct soc_device *soc);
+#ifdef CONFIG_SOC_BUS
+const struct soc_device_attribute *soc_device_match(
+ const struct soc_device_attribute *matches);
+#else
+static inline const struct soc_device_attribute *soc_device_match(
+ const struct soc_device_attribute *matches) { return NULL; }
+#endif
+
#endif /* __SOC_BUS_H */
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index a17ae7b85218..fc5848dad7a4 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -123,6 +123,7 @@ struct tcp_request_sock {
u32 txhash;
u32 rcv_isn;
u32 snt_isn;
+ u32 ts_off;
u32 last_oow_ack_time; /* last SYNACK */
u32 rcv_nxt; /* the ack # by SYNACK. For
* FastOpen it's the seq#
@@ -176,8 +177,6 @@ struct tcp_sock {
* sum(delta(snd_una)), or how many bytes
* were acked.
*/
- struct u64_stats_sync syncp; /* protects 64bit vars (cf tcp_get_info()) */
-
u32 snd_una; /* First byte we want an ack for */
u32 snd_sml; /* Last byte of the most recently transmitted small packet */
u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */
@@ -187,7 +186,6 @@ struct tcp_sock {
u32 tsoffset; /* timestamp offset */
struct list_head tsq_node; /* anchor in tsq_tasklet.head list */
- unsigned long tsq_flags;
/* Data for direct copy to user */
struct {
@@ -213,8 +211,11 @@ struct tcp_sock {
u8 reord; /* reordering detected */
} rack;
u16 advmss; /* Advertised MSS */
- u8 rate_app_limited:1, /* rate_{delivered,interval_us} limited? */
- unused:7;
+ u32 chrono_start; /* Start time in jiffies of a TCP chrono */
+ u32 chrono_stat[3]; /* Time in jiffies for chrono_stat stats */
+ u8 chrono_type:2, /* current chronograph type */
+ rate_app_limited:1, /* rate_{delivered,interval_us} limited? */
+ unused:5;
u8 nonagle : 4,/* Disable Nagle algorithm? */
thin_lto : 1,/* Use linear timeouts for thin streams */
thin_dupack : 1,/* Fast retransmit on first dupack */
@@ -362,7 +363,7 @@ struct tcp_sock {
u32 *saved_syn;
};
-enum tsq_flags {
+enum tsq_enum {
TSQ_THROTTLED,
TSQ_QUEUED,
TCP_TSQ_DEFERRED, /* tcp_tasklet_func() found socket was owned */
@@ -373,6 +374,15 @@ enum tsq_flags {
*/
};
+enum tsq_flags {
+ TSQF_THROTTLED = (1UL << TSQ_THROTTLED),
+ TSQF_QUEUED = (1UL << TSQ_QUEUED),
+ TCPF_TSQ_DEFERRED = (1UL << TCP_TSQ_DEFERRED),
+ TCPF_WRITE_TIMER_DEFERRED = (1UL << TCP_WRITE_TIMER_DEFERRED),
+ TCPF_DELACK_TIMER_DEFERRED = (1UL << TCP_DELACK_TIMER_DEFERRED),
+ TCPF_MTU_REDUCED_DEFERRED = (1UL << TCP_MTU_REDUCED_DEFERRED),
+};
+
static inline struct tcp_sock *tcp_sk(const struct sock *sk)
{
return (struct tcp_sock *)sk;
@@ -427,4 +437,6 @@ static inline void tcp_saved_syn_free(struct tcp_sock *tp)
tp->saved_syn = NULL;
}
+struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk);
+
#endif /* _LINUX_TCP_H */
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index 511182a88e76..e275e98bdceb 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -28,6 +28,7 @@
#include <linux/of.h>
#include <linux/idr.h>
#include <linux/device.h>
+#include <linux/sysfs.h>
#include <linux/workqueue.h>
#include <uapi/linux/thermal.h>
@@ -204,6 +205,7 @@ struct thermal_zone_device {
int id;
char type[THERMAL_NAME_LENGTH];
struct device device;
+ struct attribute_group trips_attribute_group;
struct thermal_attr *trip_temp_attrs;
struct thermal_attr *trip_type_attrs;
struct thermal_attr *trip_hyst_attrs;
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index 45f004e9cc59..2873baf5372a 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -14,17 +14,6 @@ struct timespec;
struct compat_timespec;
#ifdef CONFIG_THREAD_INFO_IN_TASK
-struct thread_info {
- unsigned long flags; /* low level flags */
-};
-
-#define INIT_THREAD_INFO(tsk) \
-{ \
- .flags = 0, \
-}
-#endif
-
-#ifdef CONFIG_THREAD_INFO_IN_TASK
#define current_thread_info() ((struct thread_info *)current)
#endif
diff --git a/include/linux/time.h b/include/linux/time.h
index 4cea09d94208..23f0f5ce3090 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -172,8 +172,6 @@ extern int do_setitimer(int which, struct itimerval *value,
struct itimerval *ovalue);
extern int do_getitimer(int which, struct itimerval *value);
-extern unsigned int alarm_setitimer(unsigned int seconds);
-
extern long do_utimes(int dfd, const char __user *filename, struct timespec *times, int flags);
struct tms;
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index 09168c52ab64..361f8bf1429d 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -249,6 +249,7 @@ static inline u64 ktime_get_raw_ns(void)
extern u64 ktime_get_mono_fast_ns(void);
extern u64 ktime_get_raw_fast_ns(void);
+extern u64 ktime_get_boot_fast_ns(void);
/*
* Timespec interfaces utilizing the ktime based ones
diff --git a/include/linux/udp.h b/include/linux/udp.h
index d1fd8cd39478..c0f530809d1f 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -79,6 +79,9 @@ struct udp_sock {
int (*gro_complete)(struct sock *sk,
struct sk_buff *skb,
int nhoff);
+
+ /* This field is dirtied by udp_recvmsg() */
+ int forward_deficit;
};
static inline struct udp_sock *udp_sk(const struct sock *sk)
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
index 4a29c75b146e..0a294e950df8 100644
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -27,6 +27,7 @@
#include <linux/errno.h>
#include <linux/rbtree.h>
#include <linux/types.h>
+#include <linux/wait.h>
struct vm_area_struct;
struct mm_struct;
diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h
index 3a375d07d0dc..00d232406f18 100644
--- a/include/linux/usb/cdc_ncm.h
+++ b/include/linux/usb/cdc_ncm.h
@@ -81,7 +81,8 @@
#define CDC_NCM_TIMER_INTERVAL_MAX (U32_MAX / NSEC_PER_USEC)
/* Driver flags */
-#define CDC_NCM_FLAG_NDP_TO_END 0x02 /* NDP is placed at end of frame */
+#define CDC_NCM_FLAG_NDP_TO_END 0x02 /* NDP is placed at end of frame */
+#define CDC_MBIM_FLAG_AVOID_ALTSETTING_TOGGLE 0x04 /* Avoid altsetting toggle during init */
#define cdc_ncm_comm_intf_is_mbim(x) ((x)->desc.bInterfaceSubClass == USB_CDC_SUBCLASS_MBIM && \
(x)->desc.bInterfaceProtocol == USB_CDC_PROTO_NONE)
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
index 0ecae0b1cd34..edf9b2cad277 100644
--- a/include/linux/vfio.h
+++ b/include/linux/vfio.h
@@ -75,7 +75,16 @@ struct vfio_iommu_driver_ops {
struct iommu_group *group);
void (*detach_group)(void *iommu_data,
struct iommu_group *group);
-
+ int (*pin_pages)(void *iommu_data, unsigned long *user_pfn,
+ int npage, int prot,
+ unsigned long *phys_pfn);
+ int (*unpin_pages)(void *iommu_data,
+ unsigned long *user_pfn, int npage);
+ int (*register_notifier)(void *iommu_data,
+ unsigned long *events,
+ struct notifier_block *nb);
+ int (*unregister_notifier)(void *iommu_data,
+ struct notifier_block *nb);
};
extern int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops);
@@ -92,6 +101,36 @@ extern int vfio_external_user_iommu_id(struct vfio_group *group);
extern long vfio_external_check_extension(struct vfio_group *group,
unsigned long arg);
+#define VFIO_PIN_PAGES_MAX_ENTRIES (PAGE_SIZE/sizeof(unsigned long))
+
+extern int vfio_pin_pages(struct device *dev, unsigned long *user_pfn,
+ int npage, int prot, unsigned long *phys_pfn);
+extern int vfio_unpin_pages(struct device *dev, unsigned long *user_pfn,
+ int npage);
+
+/* each type has independent events */
+enum vfio_notify_type {
+ VFIO_IOMMU_NOTIFY = 0,
+ VFIO_GROUP_NOTIFY = 1,
+};
+
+/* events for VFIO_IOMMU_NOTIFY */
+#define VFIO_IOMMU_NOTIFY_DMA_UNMAP BIT(0)
+
+/* events for VFIO_GROUP_NOTIFY */
+#define VFIO_GROUP_NOTIFY_SET_KVM BIT(0)
+
+extern int vfio_register_notifier(struct device *dev,
+ enum vfio_notify_type type,
+ unsigned long *required_events,
+ struct notifier_block *nb);
+extern int vfio_unregister_notifier(struct device *dev,
+ enum vfio_notify_type type,
+ struct notifier_block *nb);
+
+struct kvm;
+extern void vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm);
+
/*
* Sub-module helpers
*/
@@ -103,6 +142,13 @@ extern struct vfio_info_cap_header *vfio_info_cap_add(
struct vfio_info_cap *caps, size_t size, u16 id, u16 version);
extern void vfio_info_cap_shift(struct vfio_info_cap *caps, size_t offset);
+extern int vfio_info_add_capability(struct vfio_info_cap *caps,
+ int cap_type_id, void *cap_type);
+
+extern int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set *hdr,
+ int num_irqs, int max_irq_type,
+ size_t *data_size);
+
struct pci_dev;
#ifdef CONFIG_EEH
extern void vfio_spapr_pci_eeh_open(struct pci_dev *pdev);
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index 1c912f85e041..66204007d7ac 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -58,7 +58,7 @@ static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
struct virtio_net_hdr *hdr,
bool little_endian)
{
- memset(hdr, 0, sizeof(*hdr));
+ memset(hdr, 0, sizeof(*hdr)); /* no info leak */
if (skb_is_gso(skb)) {
struct skb_shared_info *sinfo = skb_shinfo(skb);
@@ -98,4 +98,4 @@ static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
return 0;
}
-#endif /* _LINUX_VIRTIO_BYTEORDER */
+#endif /* _LINUX_VIRTIO_NET_H */
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 3d9d786a943c..d68edffbf142 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -82,6 +82,7 @@ extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
const void *caller);
extern void vfree(const void *addr);
+extern void vfree_atomic(const void *addr);
extern void *vmap(struct page **pages, unsigned int count,
unsigned long flags, pgprot_t prot);
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index fc6e22186405..d4f16cf6281c 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -119,18 +119,30 @@ struct delayed_work {
int cpu;
};
-/*
- * A struct for workqueue attributes. This can be used to change
- * attributes of an unbound workqueue.
+/**
+ * struct workqueue_attrs - A struct for workqueue attributes.
*
- * Unlike other fields, ->no_numa isn't a property of a worker_pool. It
- * only modifies how apply_workqueue_attrs() select pools and thus doesn't
- * participate in pool hash calculations or equality comparisons.
+ * This can be used to change attributes of an unbound workqueue.
*/
struct workqueue_attrs {
- int nice; /* nice level */
- cpumask_var_t cpumask; /* allowed CPUs */
- bool no_numa; /* disable NUMA affinity */
+ /**
+ * @nice: nice level
+ */
+ int nice;
+
+ /**
+ * @cpumask: allowed CPUs
+ */
+ cpumask_var_t cpumask;
+
+ /**
+ * @no_numa: disable NUMA affinity
+ *
+ * Unlike other fields, ``no_numa`` isn't a property of a worker_pool. It
+ * only modifies how :c:func:`apply_workqueue_attrs` select pools and thus
+ * doesn't participate in pool hash calculations or equality comparisons.
+ */
+ bool no_numa;
};
static inline struct delayed_work *to_delayed_work(struct work_struct *work)
@@ -272,7 +284,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
/*
* Workqueue flags and constants. For details, please refer to
- * Documentation/workqueue.txt.
+ * Documentation/core-api/workqueue.rst.
*/
enum {
WQ_UNBOUND = 1 << 1, /* not bound to any cpu */
@@ -370,7 +382,8 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
* @args...: args for @fmt
*
* Allocate a workqueue with the specified parameters. For detailed
- * information on WQ_* flags, please refer to Documentation/workqueue.txt.
+ * information on WQ_* flags, please refer to
+ * Documentation/core-api/workqueue.rst.
*
* The __lock_name macro dance is to guarantee that single lock_class_key
* doesn't end up with different namesm, which isn't allowed by lockdep.
diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h
index 2bb5deb0012e..7b0066814fa0 100644
--- a/include/linux/ww_mutex.h
+++ b/include/linux/ww_mutex.h
@@ -120,7 +120,7 @@ static inline void ww_acquire_init(struct ww_acquire_ctx *ctx,
struct ww_class *ww_class)
{
ctx->task = current;
- ctx->stamp = atomic_long_inc_return(&ww_class->stamp);
+ ctx->stamp = atomic_long_inc_return_relaxed(&ww_class->stamp);
ctx->acquired = 0;
#ifdef CONFIG_DEBUG_MUTEXES
ctx->ww_class = ww_class;