summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS6
-rw-r--r--drivers/isdn/gigaset/bas-gigaset.c32
-rw-r--r--drivers/isdn/gigaset/ser-gigaset.c32
-rw-r--r--drivers/isdn/gigaset/usb-gigaset.c32
-rw-r--r--drivers/isdn/hisax/config.c16
-rw-r--r--drivers/isdn/hisax/hisax.h4
-rw-r--r--drivers/isdn/i4l/isdn_concap.c6
-rw-r--r--drivers/isdn/i4l/isdn_x25iface.c16
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c6
-rw-r--r--drivers/net/ethernet/3com/3c515.c15
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_enet.c8
-rw-r--r--drivers/net/ethernet/cadence/Kconfig9
-rw-r--r--drivers/net/ethernet/cadence/Makefile1
-rw-r--r--drivers/net/ethernet/cadence/macb.c31
-rw-r--r--drivers/net/ethernet/cadence/macb_pci.c153
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c64
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c65
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c14
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c14
-rw-r--r--drivers/net/ethernet/freescale/dpaa/Kconfig2
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c71
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hisi_femac.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c12
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c2
-rw-r--r--drivers/net/ethernet/microchip/encx24j600-regmap.c17
-rw-r--r--drivers/net/ethernet/microchip/encx24j600.c19
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.c2
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c9
-rw-r--r--drivers/net/ethernet/rdc/r6040.c10
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c35
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c60
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h12
-rw-r--r--drivers/net/gtp.c8
-rw-r--r--drivers/net/irda/w83977af_ir.c6
-rw-r--r--drivers/net/virtio_net.c369
-rw-r--r--drivers/net/vrf.c6
-rw-r--r--drivers/net/wan/lmc/lmc_media.c97
-rw-r--r--include/linux/bpf-cgroup.h2
-rw-r--r--include/linux/bpf.h13
-rw-r--r--include/linux/filter.h15
-rw-r--r--include/linux/miscdevice.h1
-rw-r--r--include/linux/platform_data/macb.h6
-rw-r--r--include/net/inet6_connection_sock.h3
-rw-r--r--include/net/inet_connection_sock.h6
-rw-r--r--include/net/netlink.h3
-rw-r--r--init/Kconfig3
-rw-r--r--kernel/bpf/core.c43
-rw-r--r--kernel/bpf/syscall.c38
-rw-r--r--kernel/bpf/verifier.c28
-rw-r--r--net/atm/lec.c6
-rw-r--r--net/atm/mpoa_caches.c43
-rw-r--r--net/core/filter.c6
-rw-r--r--net/decnet/dn_dev.c2
-rw-r--r--net/ipv4/inet_connection_sock.c16
-rw-r--r--net/ipv6/inet6_connection_sock.c7
-rw-r--r--net/ipv6/route.c2
-rw-r--r--net/irda/irnet/irnet.h1
-rw-r--r--net/irda/irnet/irnet_ppp.h11
-rw-r--r--net/irda/irproc.c1
-rw-r--r--net/mac80211/key.c3
-rw-r--r--net/mac80211/rx.c2
-rw-r--r--net/mac80211/sta_info.c14
-rw-r--r--net/sched/cls_flower.c6
-rw-r--r--net/sctp/endpointola.c5
-rw-r--r--net/sctp/socket.c7
-rw-r--r--net/vmw_vsock/vmci_transport_notify.c30
-rw-r--r--net/vmw_vsock/vmci_transport_notify_qstate.c30
-rw-r--r--net/x25/sysctl_net_x25.c2
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c30
71 files changed, 1206 insertions, 446 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index bf8690d0a1e1..f39414662ab6 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -5163,6 +5163,12 @@ S: Maintained
F: drivers/net/ethernet/freescale/fman
F: Documentation/devicetree/bindings/powerpc/fsl/fman.txt
+FREESCALE QORIQ DPAA ETHERNET DRIVER
+M: Madalin Bucur <madalin.bucur@nxp.com>
+L: netdev@vger.kernel.org
+S: Maintained
+F: drivers/net/ethernet/freescale/dpaa
+
FREESCALE SOC DRIVERS
M: Scott Wood <oss@buserror.net>
L: linuxppc-dev@lists.ozlabs.org
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index aecec6d32463..11e13c56126f 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -2565,22 +2565,22 @@ static int gigaset_post_reset(struct usb_interface *intf)
static const struct gigaset_ops gigops = {
- gigaset_write_cmd,
- gigaset_write_room,
- gigaset_chars_in_buffer,
- gigaset_brkchars,
- gigaset_init_bchannel,
- gigaset_close_bchannel,
- gigaset_initbcshw,
- gigaset_freebcshw,
- gigaset_reinitbcshw,
- gigaset_initcshw,
- gigaset_freecshw,
- gigaset_set_modem_ctrl,
- gigaset_baud_rate,
- gigaset_set_line_ctrl,
- gigaset_isoc_send_skb,
- gigaset_isoc_input,
+ .write_cmd = gigaset_write_cmd,
+ .write_room = gigaset_write_room,
+ .chars_in_buffer = gigaset_chars_in_buffer,
+ .brkchars = gigaset_brkchars,
+ .init_bchannel = gigaset_init_bchannel,
+ .close_bchannel = gigaset_close_bchannel,
+ .initbcshw = gigaset_initbcshw,
+ .freebcshw = gigaset_freebcshw,
+ .reinitbcshw = gigaset_reinitbcshw,
+ .initcshw = gigaset_initcshw,
+ .freecshw = gigaset_freecshw,
+ .set_modem_ctrl = gigaset_set_modem_ctrl,
+ .baud_rate = gigaset_baud_rate,
+ .set_line_ctrl = gigaset_set_line_ctrl,
+ .send_skb = gigaset_isoc_send_skb,
+ .handle_input = gigaset_isoc_input,
};
/* bas_gigaset_init
diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
index b90776ef56ec..ab0b63a4d045 100644
--- a/drivers/isdn/gigaset/ser-gigaset.c
+++ b/drivers/isdn/gigaset/ser-gigaset.c
@@ -445,22 +445,22 @@ static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
}
static const struct gigaset_ops ops = {
- gigaset_write_cmd,
- gigaset_write_room,
- gigaset_chars_in_buffer,
- gigaset_brkchars,
- gigaset_init_bchannel,
- gigaset_close_bchannel,
- gigaset_initbcshw,
- gigaset_freebcshw,
- gigaset_reinitbcshw,
- gigaset_initcshw,
- gigaset_freecshw,
- gigaset_set_modem_ctrl,
- gigaset_baud_rate,
- gigaset_set_line_ctrl,
- gigaset_m10x_send_skb, /* asyncdata.c */
- gigaset_m10x_input, /* asyncdata.c */
+ .write_cmd = gigaset_write_cmd,
+ .write_room = gigaset_write_room,
+ .chars_in_buffer = gigaset_chars_in_buffer,
+ .brkchars = gigaset_brkchars,
+ .init_bchannel = gigaset_init_bchannel,
+ .close_bchannel = gigaset_close_bchannel,
+ .initbcshw = gigaset_initbcshw,
+ .freebcshw = gigaset_freebcshw,
+ .reinitbcshw = gigaset_reinitbcshw,
+ .initcshw = gigaset_initcshw,
+ .freecshw = gigaset_freecshw,
+ .set_modem_ctrl = gigaset_set_modem_ctrl,
+ .baud_rate = gigaset_baud_rate,
+ .set_line_ctrl = gigaset_set_line_ctrl,
+ .send_skb = gigaset_m10x_send_skb, /* asyncdata.c */
+ .handle_input = gigaset_m10x_input, /* asyncdata.c */
};
diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
index 5f306e2eece5..eade36dafa34 100644
--- a/drivers/isdn/gigaset/usb-gigaset.c
+++ b/drivers/isdn/gigaset/usb-gigaset.c
@@ -862,22 +862,22 @@ static int gigaset_pre_reset(struct usb_interface *intf)
}
static const struct gigaset_ops ops = {
- gigaset_write_cmd,
- gigaset_write_room,
- gigaset_chars_in_buffer,
- gigaset_brkchars,
- gigaset_init_bchannel,
- gigaset_close_bchannel,
- gigaset_initbcshw,
- gigaset_freebcshw,
- gigaset_reinitbcshw,
- gigaset_initcshw,
- gigaset_freecshw,
- gigaset_set_modem_ctrl,
- gigaset_baud_rate,
- gigaset_set_line_ctrl,
- gigaset_m10x_send_skb,
- gigaset_m10x_input,
+ .write_cmd = gigaset_write_cmd,
+ .write_room = gigaset_write_room,
+ .chars_in_buffer = gigaset_chars_in_buffer,
+ .brkchars = gigaset_brkchars,
+ .init_bchannel = gigaset_init_bchannel,
+ .close_bchannel = gigaset_close_bchannel,
+ .initbcshw = gigaset_initbcshw,
+ .freebcshw = gigaset_freebcshw,
+ .reinitbcshw = gigaset_reinitbcshw,
+ .initcshw = gigaset_initcshw,
+ .freecshw = gigaset_freecshw,
+ .set_modem_ctrl = gigaset_set_modem_ctrl,
+ .baud_rate = gigaset_baud_rate,
+ .set_line_ctrl = gigaset_set_line_ctrl,
+ .send_skb = gigaset_m10x_send_skb,
+ .handle_input = gigaset_m10x_input,
};
/*
diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c
index bf04d2a3cf4a..2d12c6ceeb89 100644
--- a/drivers/isdn/hisax/config.c
+++ b/drivers/isdn/hisax/config.c
@@ -659,7 +659,7 @@ int jiftime(char *s, long mark)
static u_char tmpbuf[HISAX_STATUS_BUFSIZE];
-void VHiSax_putstatus(struct IsdnCardState *cs, char *head, char *fmt,
+void VHiSax_putstatus(struct IsdnCardState *cs, char *head, const char *fmt,
va_list args)
{
/* if head == NULL the fmt contains the full info */
@@ -669,23 +669,24 @@ void VHiSax_putstatus(struct IsdnCardState *cs, char *head, char *fmt,
u_char *p;
isdn_ctrl ic;
int len;
+ const u_char *data;
if (!cs) {
printk(KERN_WARNING "HiSax: No CardStatus for message");
return;
}
spin_lock_irqsave(&cs->statlock, flags);
- p = tmpbuf;
if (head) {
+ p = tmpbuf;
p += jiftime(p, jiffies);
p += sprintf(p, " %s", head);
p += vsprintf(p, fmt, args);
*p++ = '\n';
*p = 0;
len = p - tmpbuf;
- p = tmpbuf;
+ data = tmpbuf;
} else {
- p = fmt;
+ data = fmt;
len = strlen(fmt);
}
if (len > HISAX_STATUS_BUFSIZE) {
@@ -699,13 +700,12 @@ void VHiSax_putstatus(struct IsdnCardState *cs, char *head, char *fmt,
if (i >= len)
i = len;
len -= i;
- memcpy(cs->status_write, p, i);
+ memcpy(cs->status_write, data, i);
cs->status_write += i;
if (cs->status_write > cs->status_end)
cs->status_write = cs->status_buf;
- p += i;
if (len) {
- memcpy(cs->status_write, p, len);
+ memcpy(cs->status_write, data + i, len);
cs->status_write += len;
}
#ifdef KERNELSTACK_DEBUG
@@ -729,7 +729,7 @@ void VHiSax_putstatus(struct IsdnCardState *cs, char *head, char *fmt,
}
}
-void HiSax_putstatus(struct IsdnCardState *cs, char *head, char *fmt, ...)
+void HiSax_putstatus(struct IsdnCardState *cs, char *head, const char *fmt, ...)
{
va_list args;
diff --git a/drivers/isdn/hisax/hisax.h b/drivers/isdn/hisax/hisax.h
index 6ead6314e6d2..338d0408b377 100644
--- a/drivers/isdn/hisax/hisax.h
+++ b/drivers/isdn/hisax/hisax.h
@@ -1288,9 +1288,9 @@ int jiftime(char *s, long mark);
int HiSax_command(isdn_ctrl *ic);
int HiSax_writebuf_skb(int id, int chan, int ack, struct sk_buff *skb);
__printf(3, 4)
-void HiSax_putstatus(struct IsdnCardState *cs, char *head, char *fmt, ...);
+void HiSax_putstatus(struct IsdnCardState *cs, char *head, const char *fmt, ...);
__printf(3, 0)
-void VHiSax_putstatus(struct IsdnCardState *cs, char *head, char *fmt, va_list args);
+void VHiSax_putstatus(struct IsdnCardState *cs, char *head, const char *fmt, va_list args);
void HiSax_reportcard(int cardnr, int sel);
int QuickHex(char *txt, u_char *p, int cnt);
void LogFrame(struct IsdnCardState *cs, u_char *p, int size);
diff --git a/drivers/isdn/i4l/isdn_concap.c b/drivers/isdn/i4l/isdn_concap.c
index 91d57304d4d3..336523ec077c 100644
--- a/drivers/isdn/i4l/isdn_concap.c
+++ b/drivers/isdn/i4l/isdn_concap.c
@@ -80,9 +80,9 @@ static int isdn_concap_dl_disconn_req(struct concap_proto *concap)
}
struct concap_device_ops isdn_concap_reliable_dl_dops = {
- &isdn_concap_dl_data_req,
- &isdn_concap_dl_connect_req,
- &isdn_concap_dl_disconn_req
+ .data_req = &isdn_concap_dl_data_req,
+ .connect_req = &isdn_concap_dl_connect_req,
+ .disconn_req = &isdn_concap_dl_disconn_req
};
/* The following should better go into a dedicated source file such that
diff --git a/drivers/isdn/i4l/isdn_x25iface.c b/drivers/isdn/i4l/isdn_x25iface.c
index 0c5d8de41b23..ba60076e0b95 100644
--- a/drivers/isdn/i4l/isdn_x25iface.c
+++ b/drivers/isdn/i4l/isdn_x25iface.c
@@ -53,14 +53,14 @@ static int isdn_x25iface_disconn_ind(struct concap_proto *);
static struct concap_proto_ops ix25_pops = {
- &isdn_x25iface_proto_new,
- &isdn_x25iface_proto_del,
- &isdn_x25iface_proto_restart,
- &isdn_x25iface_proto_close,
- &isdn_x25iface_xmit,
- &isdn_x25iface_receive,
- &isdn_x25iface_connect_ind,
- &isdn_x25iface_disconn_ind
+ .proto_new = &isdn_x25iface_proto_new,
+ .proto_del = &isdn_x25iface_proto_del,
+ .restart = &isdn_x25iface_proto_restart,
+ .close = &isdn_x25iface_proto_close,
+ .encap_and_xmit = &isdn_x25iface_xmit,
+ .data_ind = &isdn_x25iface_receive,
+ .connect_ind = &isdn_x25iface_connect_ind,
+ .disconn_ind = &isdn_x25iface_disconn_ind
};
/* error message helper function */
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 4da379f28d5d..f7222dc6581d 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -1775,6 +1775,9 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
if (dsa_is_dsa_port(ds, i) || dsa_is_cpu_port(ds, i))
continue;
+ if (!ds->ports[port].netdev)
+ continue;
+
if (vlan.data[i] ==
GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER)
continue;
@@ -1783,6 +1786,9 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
chip->ports[port].bridge_dev)
break; /* same bridge, check next VLAN */
+ if (!chip->ports[i].bridge_dev)
+ continue;
+
netdev_warn(ds->ports[port].netdev,
"hardware VLAN %d already used by %s\n",
vlan.vid,
diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c
index b9f4c463e516..be5b80103bec 100644
--- a/drivers/net/ethernet/3com/3c515.c
+++ b/drivers/net/ethernet/3com/3c515.c
@@ -627,6 +627,8 @@ static int corkscrew_setup(struct net_device *dev, int ioaddr,
spin_lock_init(&vp->lock);
+ setup_timer(&vp->timer, corkscrew_timer, (unsigned long) dev);
+
/* Read the station address from the EEPROM. */
EL3WINDOW(0);
for (i = 0; i < 0x18; i++) {
@@ -707,6 +709,7 @@ static int corkscrew_open(struct net_device *dev)
{
int ioaddr = dev->base_addr;
struct corkscrew_private *vp = netdev_priv(dev);
+ bool armtimer = false;
__u32 config;
int i;
@@ -731,12 +734,7 @@ static int corkscrew_open(struct net_device *dev)
if (corkscrew_debug > 1)
pr_debug("%s: Initial media type %s.\n",
dev->name, media_tbl[dev->if_port].name);
-
- init_timer(&vp->timer);
- vp->timer.expires = jiffies + media_tbl[dev->if_port].wait;
- vp->timer.data = (unsigned long) dev;
- vp->timer.function = corkscrew_timer; /* timer handler */
- add_timer(&vp->timer);
+ armtimer = true;
} else
dev->if_port = vp->default_media;
@@ -776,6 +774,9 @@ static int corkscrew_open(struct net_device *dev)
return -EAGAIN;
}
+ if (armtimer)
+ mod_timer(&vp->timer, jiffies + media_tbl[dev->if_port].wait);
+
if (corkscrew_debug > 1) {
EL3WINDOW(4);
pr_debug("%s: corkscrew_open() irq %d media status %4.4x.\n",
@@ -1426,7 +1427,7 @@ static int corkscrew_close(struct net_device *dev)
dev->name, rx_nocopy, rx_copy, queued_packet);
}
- del_timer(&vp->timer);
+ del_timer_sync(&vp->timer);
/* Turn off statistics ASAP. We update lp->stats below. */
outw(StatsDisable, ioaddr + EL3_CMD);
diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
index 4e5c3874a50f..bba81735ce87 100644
--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
@@ -1676,10 +1676,10 @@ bna_cb_ioceth_reset(void *arg)
}
static struct bfa_ioc_cbfn bna_ioceth_cbfn = {
- bna_cb_ioceth_enable,
- bna_cb_ioceth_disable,
- bna_cb_ioceth_hbfail,
- bna_cb_ioceth_reset
+ .enable_cbfn = bna_cb_ioceth_enable,
+ .disable_cbfn = bna_cb_ioceth_disable,
+ .hbfail_cbfn = bna_cb_ioceth_hbfail,
+ .reset_cbfn = bna_cb_ioceth_reset
};
static void bna_attr_init(struct bna_ioceth *ioceth)
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
index f0bcb15d3fec..608bea171956 100644
--- a/drivers/net/ethernet/cadence/Kconfig
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -31,4 +31,13 @@ config MACB
To compile this driver as a module, choose M here: the module
will be called macb.
+config MACB_PCI
+ tristate "Cadence PCI MACB/GEM support"
+ depends on MACB && PCI && COMMON_CLK
+ ---help---
+ This is PCI wrapper for MACB driver.
+
+ To compile this driver as a module, choose M here: the module
+ will be called macb_pci.
+
endif # NET_CADENCE
diff --git a/drivers/net/ethernet/cadence/Makefile b/drivers/net/ethernet/cadence/Makefile
index 91f79b1f0505..4ba75594d5c5 100644
--- a/drivers/net/ethernet/cadence/Makefile
+++ b/drivers/net/ethernet/cadence/Makefile
@@ -3,3 +3,4 @@
#
obj-$(CONFIG_MACB) += macb.o
+obj-$(CONFIG_MACB_PCI) += macb_pci.o
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 538544a7c642..c0fb80acc2da 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -404,6 +404,8 @@ static int macb_mii_probe(struct net_device *dev)
phy_irq = gpio_to_irq(pdata->phy_irq_pin);
phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
}
+ } else {
+ phydev->irq = PHY_POLL;
}
/* attach the mac to the phy */
@@ -482,6 +484,9 @@ static int macb_mii_init(struct macb *bp)
goto err_out_unregister_bus;
}
} else {
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ bp->mii_bus->irq[i] = PHY_POLL;
+
if (pdata)
bp->mii_bus->phy_mask = pdata->phy_mask;
@@ -2523,16 +2528,24 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
struct clk **hclk, struct clk **tx_clk,
struct clk **rx_clk)
{
+ struct macb_platform_data *pdata;
int err;
- *pclk = devm_clk_get(&pdev->dev, "pclk");
+ pdata = dev_get_platdata(&pdev->dev);
+ if (pdata) {
+ *pclk = pdata->pclk;
+ *hclk = pdata->hclk;
+ } else {
+ *pclk = devm_clk_get(&pdev->dev, "pclk");
+ *hclk = devm_clk_get(&pdev->dev, "hclk");
+ }
+
if (IS_ERR(*pclk)) {
err = PTR_ERR(*pclk);
dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err);
return err;
}
- *hclk = devm_clk_get(&pdev->dev, "hclk");
if (IS_ERR(*hclk)) {
err = PTR_ERR(*hclk);
dev_err(&pdev->dev, "failed to get hclk (%u)\n", err);
@@ -3107,15 +3120,23 @@ static const struct of_device_id macb_dt_ids[] = {
MODULE_DEVICE_TABLE(of, macb_dt_ids);
#endif /* CONFIG_OF */
+static const struct macb_config default_gem_config = {
+ .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO,
+ .dma_burst_length = 16,
+ .clk_init = macb_clk_init,
+ .init = macb_init,
+ .jumbo_max_len = 10240,
+};
+
static int macb_probe(struct platform_device *pdev)
{
+ const struct macb_config *macb_config = &default_gem_config;
int (*clk_init)(struct platform_device *, struct clk **,
struct clk **, struct clk **, struct clk **)
- = macb_clk_init;
- int (*init)(struct platform_device *) = macb_init;
+ = macb_config->clk_init;
+ int (*init)(struct platform_device *) = macb_config->init;
struct device_node *np = pdev->dev.of_node;
struct device_node *phy_node;
- const struct macb_config *macb_config = NULL;
struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL;
unsigned int queue_mask, num_queues;
struct macb_platform_data *pdata;
diff --git a/drivers/net/ethernet/cadence/macb_pci.c b/drivers/net/ethernet/cadence/macb_pci.c
new file mode 100644
index 000000000000..92be2cd8f817
--- /dev/null
+++ b/drivers/net/ethernet/cadence/macb_pci.c
@@ -0,0 +1,153 @@
+/**
+ * macb_pci.c - Cadence GEM PCI wrapper.
+ *
+ * Copyright (C) 2016 Cadence Design Systems - http://www.cadence.com
+ *
+ * Authors: Rafal Ozieblo <rafalo@cadence.com>
+ * Bartosz Folta <bfolta@cadence.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/etherdevice.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_data/macb.h>
+#include <linux/platform_device.h>
+#include "macb.h"
+
+#define PCI_DRIVER_NAME "macb_pci"
+#define PLAT_DRIVER_NAME "macb"
+
+#define CDNS_VENDOR_ID 0x17cd
+#define CDNS_DEVICE_ID 0xe007
+
+#define GEM_PCLK_RATE 50000000
+#define GEM_HCLK_RATE 50000000
+
+static int macb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ int err;
+ struct platform_device *plat_dev;
+ struct platform_device_info plat_info;
+ struct macb_platform_data plat_data;
+ struct resource res[2];
+
+ /* sanity check */
+ if (!id)
+ return -EINVAL;
+
+ /* enable pci device */
+ err = pci_enable_device(pdev);
+ if (err < 0) {
+ dev_err(&pdev->dev, "Enabling PCI device has failed: 0x%04X",
+ err);
+ return -EACCES;
+ }
+
+ pci_set_master(pdev);
+
+ /* set up resources */
+ memset(res, 0x00, sizeof(struct resource) * ARRAY_SIZE(res));
+ res[0].start = pdev->resource[0].start;
+ res[0].end = pdev->resource[0].end;
+ res[0].name = PCI_DRIVER_NAME;
+ res[0].flags = IORESOURCE_MEM;
+ res[1].start = pdev->irq;
+ res[1].name = PCI_DRIVER_NAME;
+ res[1].flags = IORESOURCE_IRQ;
+
+ dev_info(&pdev->dev, "EMAC physical base addr = 0x%p\n",
+ (void *)(uintptr_t)pci_resource_start(pdev, 0));
+
+ /* set up macb platform data */
+ memset(&plat_data, 0, sizeof(plat_data));
+
+ /* initialize clocks */
+ plat_data.pclk = clk_register_fixed_rate(&pdev->dev, "pclk", NULL, 0,
+ GEM_PCLK_RATE);
+ if (IS_ERR(plat_data.pclk)) {
+ err = PTR_ERR(plat_data.pclk);
+ goto err_pclk_register;
+ }
+
+ plat_data.hclk = clk_register_fixed_rate(&pdev->dev, "hclk", NULL, 0,
+ GEM_HCLK_RATE);
+ if (IS_ERR(plat_data.hclk)) {
+ err = PTR_ERR(plat_data.hclk);
+ goto err_hclk_register;
+ }
+
+ /* set up platform device info */
+ memset(&plat_info, 0, sizeof(plat_info));
+ plat_info.parent = &pdev->dev;
+ plat_info.fwnode = pdev->dev.fwnode;
+ plat_info.name = PLAT_DRIVER_NAME;
+ plat_info.id = pdev->devfn;
+ plat_info.res = res;
+ plat_info.num_res = ARRAY_SIZE(res);
+ plat_info.data = &plat_data;
+ plat_info.size_data = sizeof(plat_data);
+ plat_info.dma_mask = DMA_BIT_MASK(32);
+
+ /* register platform device */
+ plat_dev = platform_device_register_full(&plat_info);
+ if (IS_ERR(plat_dev)) {
+ err = PTR_ERR(plat_dev);
+ goto err_plat_dev_register;
+ }
+
+ pci_set_drvdata(pdev, plat_dev);
+
+ return 0;
+
+err_plat_dev_register:
+ clk_unregister(plat_data.hclk);
+
+err_hclk_register:
+ clk_unregister(plat_data.pclk);
+
+err_pclk_register:
+ pci_disable_device(pdev);
+ return err;
+}
+
+static void macb_remove(struct pci_dev *pdev)
+{
+ struct platform_device *plat_dev = pci_get_drvdata(pdev);
+ struct macb_platform_data *plat_data = dev_get_platdata(&plat_dev->dev);
+
+ platform_device_unregister(plat_dev);
+ pci_disable_device(pdev);
+ clk_unregister(plat_data->pclk);
+ clk_unregister(plat_data->hclk);
+}
+
+static struct pci_device_id dev_id_table[] = {
+ { PCI_DEVICE(CDNS_VENDOR_ID, CDNS_DEVICE_ID), },
+ { 0, }
+};
+
+static struct pci_driver macb_pci_driver = {
+ .name = PCI_DRIVER_NAME,
+ .id_table = dev_id_table,
+ .probe = macb_probe,
+ .remove = macb_remove,
+};
+
+module_pci_driver(macb_pci_driver);
+MODULE_DEVICE_TABLE(pci, dev_id_table);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Cadence NIC PCI wrapper");
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index 81d1d0bc7553..3a05f9098e75 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -568,28 +568,33 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
reg_block_dump(ap, buf, A_MC5_CONFIG, A_MC5_MASK_WRITE_CMD);
}
-static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct adapter *adapter = dev->ml_priv;
struct port_info *p = &adapter->port[dev->if_port];
+ u32 supported, advertising;
- cmd->supported = p->link_config.supported;
- cmd->advertising = p->link_config.advertising;
+ supported = p->link_config.supported;
+ advertising = p->link_config.advertising;
if (netif_carrier_ok(dev)) {
- ethtool_cmd_speed_set(cmd, p->link_config.speed);
- cmd->duplex = p->link_config.duplex;
+ cmd->base.speed = p->link_config.speed;
+ cmd->base.duplex = p->link_config.duplex;
} else {
- ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
- cmd->duplex = DUPLEX_UNKNOWN;
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
}
- cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
- cmd->phy_address = p->phy->mdio.prtad;
- cmd->transceiver = XCVR_EXTERNAL;
- cmd->autoneg = p->link_config.autoneg;
- cmd->maxtxpkt = 0;
- cmd->maxrxpkt = 0;
+ cmd->base.port = (supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
+ cmd->base.phy_address = p->phy->mdio.prtad;
+ cmd->base.autoneg = p->link_config.autoneg;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
+
return 0;
}
@@ -628,36 +633,41 @@ static int speed_duplex_to_caps(int speed, int duplex)
ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
ADVERTISED_10000baseT_Full)
-static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct adapter *adapter = dev->ml_priv;
struct port_info *p = &adapter->port[dev->if_port];
struct link_config *lc = &p->link_config;
+ u32 advertising;
+
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
if (!(lc->supported & SUPPORTED_Autoneg))
return -EOPNOTSUPP; /* can't change speed/duplex */
- if (cmd->autoneg == AUTONEG_DISABLE) {
- u32 speed = ethtool_cmd_speed(cmd);
- int cap = speed_duplex_to_caps(speed, cmd->duplex);
+ if (cmd->base.autoneg == AUTONEG_DISABLE) {
+ u32 speed = cmd->base.speed;
+ int cap = speed_duplex_to_caps(speed, cmd->base.duplex);
if (!(lc->supported & cap) || (speed == SPEED_1000))
return -EINVAL;
lc->requested_speed = speed;
- lc->requested_duplex = cmd->duplex;
+ lc->requested_duplex = cmd->base.duplex;
lc->advertising = 0;
} else {
- cmd->advertising &= ADVERTISED_MASK;
- if (cmd->advertising & (cmd->advertising - 1))
- cmd->advertising = lc->supported;
- cmd->advertising &= lc->supported;
- if (!cmd->advertising)
+ advertising &= ADVERTISED_MASK;
+ if (advertising & (advertising - 1))
+ advertising = lc->supported;
+ advertising &= lc->supported;
+ if (!advertising)
return -EINVAL;
lc->requested_speed = SPEED_INVALID;
lc->requested_duplex = DUPLEX_INVALID;
- lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
+ lc->advertising = advertising | ADVERTISED_Autoneg;
}
- lc->autoneg = cmd->autoneg;
+ lc->autoneg = cmd->base.autoneg;
if (netif_running(dev))
t1_link_start(p->phy, p->mac, lc);
return 0;
@@ -788,8 +798,6 @@ static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
}
static const struct ethtool_ops t1_ethtool_ops = {
- .get_settings = get_settings,
- .set_settings = set_settings,
.get_drvinfo = get_drvinfo,
.get_msglevel = get_msglevel,
.set_msglevel = set_msglevel,
@@ -807,6 +815,8 @@ static const struct ethtool_ops t1_ethtool_ops = {
.get_ethtool_stats = get_stats,
.get_regs_len = get_regs_len,
.get_regs = get_regs,
+ .get_link_ksettings = get_link_ksettings,
+ .set_link_ksettings = set_link_ksettings,
};
static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 092b3c16440b..7b2224ae72f2 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -1801,27 +1801,31 @@ static int set_phys_id(struct net_device *dev,
return 0;
}
-static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct port_info *p = netdev_priv(dev);
+ u32 supported;
- cmd->supported = p->link_config.supported;
- cmd->advertising = p->link_config.advertising;
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ p->link_config.supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ p->link_config.advertising);
if (netif_carrier_ok(dev)) {
- ethtool_cmd_speed_set(cmd, p->link_config.speed);
- cmd->duplex = p->link_config.duplex;
+ cmd->base.speed = p->link_config.speed;
+ cmd->base.duplex = p->link_config.duplex;
} else {
- ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
- cmd->duplex = DUPLEX_UNKNOWN;
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
}
- cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
- cmd->phy_address = p->phy.mdio.prtad;
- cmd->transceiver = XCVR_EXTERNAL;
- cmd->autoneg = p->link_config.autoneg;
- cmd->maxtxpkt = 0;
- cmd->maxrxpkt = 0;
+ ethtool_convert_link_mode_to_legacy_u32(&supported,
+ cmd->link_modes.supported);
+
+ cmd->base.port = (supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
+ cmd->base.phy_address = p->phy.mdio.prtad;
+ cmd->base.autoneg = p->link_config.autoneg;
return 0;
}
@@ -1860,44 +1864,49 @@ static int speed_duplex_to_caps(int speed, int duplex)
ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
ADVERTISED_10000baseT_Full)
-static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct port_info *p = netdev_priv(dev);
struct link_config *lc = &p->link_config;
+ u32 advertising;
+
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
if (!(lc->supported & SUPPORTED_Autoneg)) {
/*
* PHY offers a single speed/duplex. See if that's what's
* being requested.
*/
- if (cmd->autoneg == AUTONEG_DISABLE) {
- u32 speed = ethtool_cmd_speed(cmd);
- int cap = speed_duplex_to_caps(speed, cmd->duplex);
+ if (cmd->base.autoneg == AUTONEG_DISABLE) {
+ u32 speed = cmd->base.speed;
+ int cap = speed_duplex_to_caps(speed, cmd->base.duplex);
if (lc->supported & cap)
return 0;
}
return -EINVAL;
}
- if (cmd->autoneg == AUTONEG_DISABLE) {
- u32 speed = ethtool_cmd_speed(cmd);
- int cap = speed_duplex_to_caps(speed, cmd->duplex);
+ if (cmd->base.autoneg == AUTONEG_DISABLE) {
+ u32 speed = cmd->base.speed;
+ int cap = speed_duplex_to_caps(speed, cmd->base.duplex);
if (!(lc->supported & cap) || (speed == SPEED_1000))
return -EINVAL;
lc->requested_speed = speed;
- lc->requested_duplex = cmd->duplex;
+ lc->requested_duplex = cmd->base.duplex;
lc->advertising = 0;
} else {
- cmd->advertising &= ADVERTISED_MASK;
- cmd->advertising &= lc->supported;
- if (!cmd->advertising)
+ advertising &= ADVERTISED_MASK;
+ advertising &= lc->supported;
+ if (!advertising)
return -EINVAL;
lc->requested_speed = SPEED_INVALID;
lc->requested_duplex = DUPLEX_INVALID;
- lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
+ lc->advertising = advertising | ADVERTISED_Autoneg;
}
- lc->autoneg = cmd->autoneg;
+ lc->autoneg = cmd->base.autoneg;
if (netif_running(dev))
t3_link_start(&p->phy, &p->mac, lc);
return 0;
@@ -2097,8 +2106,6 @@ static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
}
static const struct ethtool_ops cxgb_ethtool_ops = {
- .get_settings = get_settings,
- .set_settings = set_settings,
.get_drvinfo = get_drvinfo,
.get_msglevel = get_msglevel,
.set_msglevel = set_msglevel,
@@ -2120,6 +2127,8 @@ static const struct ethtool_ops cxgb_ethtool_ops = {
.get_regs_len = get_regs_len,
.get_regs = get_regs,
.get_wol = get_wol,
+ .get_link_ksettings = get_link_ksettings,
+ .set_link_ksettings = set_link_ksettings,
};
static int in_range(int val, int lo, int hi)
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index a1de0d12927d..396c88678eab 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -715,16 +715,18 @@ static void ep93xx_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *i
strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
}
-static int ep93xx_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int ep93xx_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct ep93xx_priv *ep = netdev_priv(dev);
- return mii_ethtool_gset(&ep->mii, cmd);
+ return mii_ethtool_get_link_ksettings(&ep->mii, cmd);
}
-static int ep93xx_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int ep93xx_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct ep93xx_priv *ep = netdev_priv(dev);
- return mii_ethtool_sset(&ep->mii, cmd);
+ return mii_ethtool_set_link_ksettings(&ep->mii, cmd);
}
static int ep93xx_nway_reset(struct net_device *dev)
@@ -741,10 +743,10 @@ static u32 ep93xx_get_link(struct net_device *dev)
static const struct ethtool_ops ep93xx_ethtool_ops = {
.get_drvinfo = ep93xx_get_drvinfo,
- .get_settings = ep93xx_get_settings,
- .set_settings = ep93xx_set_settings,
.nway_reset = ep93xx_nway_reset,
.get_link = ep93xx_get_link,
+ .get_link_ksettings = ep93xx_get_link_ksettings,
+ .set_link_ksettings = ep93xx_set_link_ksettings,
};
static const struct net_device_ops ep93xx_netdev_ops = {
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index f1a81c52afe3..008dc8161775 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -570,19 +570,21 @@ static void dm9000_set_msglevel(struct net_device *dev, u32 value)
dm->msg_enable = value;
}
-static int dm9000_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int dm9000_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct board_info *dm = to_dm9000_board(dev);
- mii_ethtool_gset(&dm->mii, cmd);
+ mii_ethtool_get_link_ksettings(&dm->mii, cmd);
return 0;
}
-static int dm9000_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int dm9000_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct board_info *dm = to_dm9000_board(dev);
- return mii_ethtool_sset(&dm->mii, cmd);
+ return mii_ethtool_set_link_ksettings(&dm->mii, cmd);
}
static int dm9000_nway_reset(struct net_device *dev)
@@ -741,8 +743,6 @@ static int dm9000_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
static const struct ethtool_ops dm9000_ethtool_ops = {
.get_drvinfo = dm9000_get_drvinfo,
- .get_settings = dm9000_get_settings,
- .set_settings = dm9000_set_settings,
.get_msglevel = dm9000_get_msglevel,
.set_msglevel = dm9000_set_msglevel,
.nway_reset = dm9000_nway_reset,
@@ -752,6 +752,8 @@ static const struct ethtool_ops dm9000_ethtool_ops = {
.get_eeprom_len = dm9000_get_eeprom_len,
.get_eeprom = dm9000_get_eeprom,
.set_eeprom = dm9000_set_eeprom,
+ .get_link_ksettings = dm9000_get_link_ksettings,
+ .set_link_ksettings = dm9000_set_link_ksettings,
};
static void dm9000_show_carrier(struct board_info *db,
diff --git a/drivers/net/ethernet/freescale/dpaa/Kconfig b/drivers/net/ethernet/freescale/dpaa/Kconfig
index f3a3454805f9..a654736237a9 100644
--- a/drivers/net/ethernet/freescale/dpaa/Kconfig
+++ b/drivers/net/ethernet/freescale/dpaa/Kconfig
@@ -1,6 +1,6 @@
menuconfig FSL_DPAA_ETH
tristate "DPAA Ethernet"
- depends on FSL_SOC && FSL_DPAA && FSL_FMAN
+ depends on FSL_DPAA && FSL_FMAN
select PHYLIB
select FSL_FMAN_MAC
---help---
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 3c48a84dec86..624ba9058dc4 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -733,7 +733,7 @@ static int dpaa_eth_cgr_init(struct dpaa_priv *priv)
priv->cgr_data.cgr.cb = dpaa_eth_cgscn;
/* Enable Congestion State Change Notifications and CS taildrop */
- initcgr.we_mask = QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES;
+ initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES);
initcgr.cgr.cscn_en = QM_CGR_EN;
/* Set different thresholds based on the MAC speed.
@@ -747,7 +747,7 @@ static int dpaa_eth_cgr_init(struct dpaa_priv *priv)
cs_th = DPAA_CS_THRESHOLD_1G;
qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
- initcgr.we_mask |= QM_CGR_WE_CSTD_EN;
+ initcgr.we_mask |= cpu_to_be16(QM_CGR_WE_CSTD_EN);
initcgr.cgr.cstd_en = QM_CGR_EN;
err = qman_create_cgr(&priv->cgr_data.cgr, QMAN_CGR_FLAG_USE_INIT,
@@ -896,18 +896,18 @@ static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
if (dpaa_fq->init) {
memset(&initfq, 0, sizeof(initfq));
- initfq.we_mask = QM_INITFQ_WE_FQCTRL;
+ initfq.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL);
/* Note: we may get to keep an empty FQ in cache */
- initfq.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE;
+ initfq.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_PREFERINCACHE);
/* Try to reduce the number of portal interrupts for
* Tx Confirmation FQs.
*/
if (dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM)
- initfq.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
+ initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE);
/* FQ placement */
- initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
+ initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_DESTWQ);
qm_fqd_set_destwq(&initfq.fqd, dpaa_fq->channel, dpaa_fq->wq);
@@ -920,8 +920,8 @@ static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
if (dpaa_fq->fq_type == FQ_TYPE_TX ||
dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM ||
dpaa_fq->fq_type == FQ_TYPE_TX_CONF_MQ) {
- initfq.we_mask |= QM_INITFQ_WE_CGID;
- initfq.fqd.fq_ctrl |= QM_FQCTRL_CGE;
+ initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CGID);
+ initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_CGE);
initfq.fqd.cgid = (u8)priv->cgr_data.cgr.cgrid;
/* Set a fixed overhead accounting, in an attempt to
* reduce the impact of fixed-size skb shells and the
@@ -932,7 +932,7 @@ static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
* insufficient value, but even that is better than
* no overhead accounting at all.
*/
- initfq.we_mask |= QM_INITFQ_WE_OAC;
+ initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_OAC);
qm_fqd_set_oac(&initfq.fqd, QM_OAC_CG);
qm_fqd_set_oal(&initfq.fqd,
min(sizeof(struct sk_buff) +
@@ -941,9 +941,9 @@ static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
}
if (td_enable) {
- initfq.we_mask |= QM_INITFQ_WE_TDTHRESH;
+ initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_TDTHRESH);
qm_fqd_set_taildrop(&initfq.fqd, DPAA_FQ_TD, 1);
- initfq.fqd.fq_ctrl = QM_FQCTRL_TDE;
+ initfq.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_TDE);
}
if (dpaa_fq->fq_type == FQ_TYPE_TX) {
@@ -951,7 +951,8 @@ static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
if (queue_id >= 0)
confq = priv->conf_fqs[queue_id];
if (confq) {
- initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
+ initfq.we_mask |=
+ cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
/* ContextA: OVOM=1(use contextA2 bits instead of ICAD)
* A2V=1 (contextA A2 field is valid)
* A0V=1 (contextA A0 field is valid)
@@ -959,8 +960,8 @@ static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
* ContextA A2: EBD=1 (deallocate buffers inside FMan)
* ContextB B0(ASPID): 0 (absolute Virtual Storage ID)
*/
- initfq.fqd.context_a.hi = 0x1e000000;
- initfq.fqd.context_a.lo = 0x80000000;
+ qm_fqd_context_a_set64(&initfq.fqd,
+ 0x1e00000080000000ULL);
}
}
@@ -968,13 +969,13 @@ static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
if (priv->use_ingress_cgr &&
(dpaa_fq->fq_type == FQ_TYPE_RX_DEFAULT ||
dpaa_fq->fq_type == FQ_TYPE_RX_ERROR)) {
- initfq.we_mask |= QM_INITFQ_WE_CGID;
- initfq.fqd.fq_ctrl |= QM_FQCTRL_CGE;
+ initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CGID);
+ initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_CGE);
initfq.fqd.cgid = (u8)priv->ingress_cgr.cgrid;
/* Set a fixed overhead accounting, just like for the
* egress CGR.
*/
- initfq.we_mask |= QM_INITFQ_WE_OAC;
+ initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_OAC);
qm_fqd_set_oac(&initfq.fqd, QM_OAC_CG);
qm_fqd_set_oal(&initfq.fqd,
min(sizeof(struct sk_buff) +
@@ -984,9 +985,8 @@ static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
/* Initialization common to all ingress queues */
if (dpaa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) {
- initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
- initfq.fqd.fq_ctrl |=
- QM_FQCTRL_HOLDACTIVE;
+ initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
+ initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE);
initfq.fqd.context_a.stashing.exclusive =
QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX |
QM_STASHING_EXCL_ANNOTATION;
@@ -1350,7 +1350,7 @@ static int dpaa_enable_tx_csum(struct dpaa_priv *priv,
parse_result->l4_off = (u8)skb_transport_offset(skb);
/* Enable L3 (and L4, if TCP or UDP) HW checksum. */
- fd->cmd |= FM_FD_CMD_RPD | FM_FD_CMD_DTC;
+ fd->cmd |= cpu_to_be32(FM_FD_CMD_RPD | FM_FD_CMD_DTC);
/* On P1023 and similar platforms fd->cmd interpretation could
* be disabled by setting CONTEXT_A bit ICMD; currently this bit
@@ -1732,7 +1732,7 @@ static int skb_to_contig_fd(struct dpaa_priv *priv,
/* Fill in the rest of the FD fields */
qm_fd_set_contig(fd, priv->tx_headroom, skb->len);
- fd->cmd |= FM_FD_CMD_FCO;
+ fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO);
/* Map the entire buffer size that may be seen by FMan, but no more */
addr = dma_map_single(dev, skbh,
@@ -1840,7 +1840,7 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
}
fd->bpid = FSL_DPAA_BPID_INV;
- fd->cmd |= FM_FD_CMD_FCO;
+ fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO);
qm_fd_addr_set64(fd, addr);
return 0;
@@ -1867,7 +1867,7 @@ static inline int dpaa_xmit(struct dpaa_priv *priv,
egress_fq = priv->egress_fqs[queue];
if (fd->bpid == FSL_DPAA_BPID_INV)
- fd->cmd |= qman_fq_fqid(priv->conf_fqs[queue]);
+ fd->cmd |= cpu_to_be32(qman_fq_fqid(priv->conf_fqs[queue]));
/* Trace this Tx fd */
trace_dpaa_tx_fd(priv->net_dev, egress_fq, fd);
@@ -1960,17 +1960,17 @@ static void dpaa_rx_error(struct net_device *net_dev,
{
if (net_ratelimit())
netif_err(priv, hw, net_dev, "Err FD status = 0x%08x\n",
- fd->status & FM_FD_STAT_RX_ERRORS);
+ be32_to_cpu(fd->status) & FM_FD_STAT_RX_ERRORS);
percpu_priv->stats.rx_errors++;
- if (fd->status & FM_FD_ERR_DMA)
+ if (be32_to_cpu(fd->status) & FM_FD_ERR_DMA)
percpu_priv->rx_errors.dme++;
- if (fd->status & FM_FD_ERR_PHYSICAL)
+ if (be32_to_cpu(fd->status) & FM_FD_ERR_PHYSICAL)
percpu_priv->rx_errors.fpe++;
- if (fd->status & FM_FD_ERR_SIZE)
+ if (be32_to_cpu(fd->status) & FM_FD_ERR_SIZE)
percpu_priv->rx_errors.fse++;
- if (fd->status & FM_FD_ERR_PRS_HDR_ERR)
+ if (be32_to_cpu(fd->status) & FM_FD_ERR_PRS_HDR_ERR)
percpu_priv->rx_errors.phe++;
dpaa_fd_release(net_dev, fd);
@@ -1986,7 +1986,7 @@ static void dpaa_tx_error(struct net_device *net_dev,
if (net_ratelimit())
netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
- fd->status & FM_FD_STAT_TX_ERRORS);
+ be32_to_cpu(fd->status) & FM_FD_STAT_TX_ERRORS);
percpu_priv->stats.tx_errors++;
@@ -2020,10 +2020,11 @@ static void dpaa_tx_conf(struct net_device *net_dev,
{
struct sk_buff *skb;
- if (unlikely(fd->status & FM_FD_STAT_TX_ERRORS) != 0) {
+ if (unlikely(be32_to_cpu(fd->status) & FM_FD_STAT_TX_ERRORS)) {
if (net_ratelimit())
netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
- fd->status & FM_FD_STAT_TX_ERRORS);
+ be32_to_cpu(fd->status) &
+ FM_FD_STAT_TX_ERRORS);
percpu_priv->stats.tx_errors++;
}
@@ -2100,6 +2101,8 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
struct sk_buff *skb;
int *count_ptr;
+ fd_status = be32_to_cpu(fd->status);
+ fd_format = qm_fd_get_format(fd);
net_dev = ((struct dpaa_fq *)fq)->net_dev;
priv = netdev_priv(net_dev);
dpaa_bp = dpaa_bpid2pool(dq->fd.bpid);
@@ -2417,12 +2420,12 @@ static int dpaa_ingress_cgr_init(struct dpaa_priv *priv)
}
/* Enable CS TD, but disable Congestion State Change Notifications. */
- initcgr.we_mask = QM_CGR_WE_CS_THRES;
+ initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CS_THRES);
initcgr.cgr.cscn_en = QM_CGR_EN;
cs_th = DPAA_INGRESS_CS_THRESHOLD;
qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
- initcgr.we_mask |= QM_CGR_WE_CSTD_EN;
+ initcgr.we_mask |= cpu_to_be16(QM_CGR_WE_CSTD_EN);
initcgr.cgr.cstd_en = QM_CGR_EN;
/* This CGR will be associated with the SWP affined to the current CPU.
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index 854befde0a08..97b184774784 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -828,6 +828,7 @@ static int hip04_mac_probe(struct platform_device *pdev)
priv = netdev_priv(ndev);
priv->ndev = ndev;
platform_set_drvdata(pdev, ndev);
+ SET_NETDEV_DEV(ndev, &pdev->dev);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv->base = devm_ioremap_resource(d, res);
@@ -903,7 +904,6 @@ static int hip04_mac_probe(struct platform_device *pdev)
ndev->priv_flags |= IFF_UNICAST_FLT;
ndev->irq = irq;
netif_napi_add(ndev, &priv->napi, hip04_rx_poll, NAPI_POLL_WEIGHT);
- SET_NETDEV_DEV(ndev, &pdev->dev);
hip04_reset_ppe(priv);
if (priv->phy_mode == PHY_INTERFACE_MODE_MII)
diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c b/drivers/net/ethernet/hisilicon/hisi_femac.c
index 49863068c59e..979852d56f31 100644
--- a/drivers/net/ethernet/hisilicon/hisi_femac.c
+++ b/drivers/net/ethernet/hisilicon/hisi_femac.c
@@ -805,6 +805,7 @@ static int hisi_femac_drv_probe(struct platform_device *pdev)
return -ENOMEM;
platform_set_drvdata(pdev, ndev);
+ SET_NETDEV_DEV(ndev, &pdev->dev);
priv = netdev_priv(ndev);
priv->dev = dev;
@@ -882,7 +883,6 @@ static int hisi_femac_drv_probe(struct platform_device *pdev)
ndev->netdev_ops = &hisi_femac_netdev_ops;
ndev->ethtool_ops = &hisi_femac_ethtools_ops;
netif_napi_add(ndev, &priv->napi, hisi_femac_poll, FEMAC_POLL_WEIGHT);
- SET_NETDEV_DEV(ndev, &pdev->dev);
hisi_femac_port_init(priv);
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index fbece63395a8..a831f947ca8c 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1181,7 +1181,9 @@ map_failed:
static void ibmveth_rx_mss_helper(struct sk_buff *skb, u16 mss, int lrg_pkt)
{
+ struct tcphdr *tcph;
int offset = 0;
+ int hdr_len;
/* only TCP packets will be aggregated */
if (skb->protocol == htons(ETH_P_IP)) {
@@ -1208,14 +1210,20 @@ static void ibmveth_rx_mss_helper(struct sk_buff *skb, u16 mss, int lrg_pkt)
/* if mss is not set through Large Packet bit/mss in rx buffer,
* expect that the mss will be written to the tcp header checksum.
*/
+ tcph = (struct tcphdr *)(skb->data + offset);
if (lrg_pkt) {
skb_shinfo(skb)->gso_size = mss;
} else if (offset) {
- struct tcphdr *tcph = (struct tcphdr *)(skb->data + offset);
-
skb_shinfo(skb)->gso_size = ntohs(tcph->check);
tcph->check = 0;
}
+
+ if (skb_shinfo(skb)->gso_size) {
+ hdr_len = offset + tcph->doff * 4;
+ skb_shinfo(skb)->gso_segs =
+ DIV_ROUND_UP(skb->len - hdr_len,
+ skb_shinfo(skb)->gso_size);
+ }
}
static int ibmveth_poll(struct napi_struct *napi, int budget)
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 5f62c3d70df9..1fa7c03edec2 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -2713,7 +2713,7 @@ static const struct of_device_id mv643xx_eth_shared_ids[] = {
MODULE_DEVICE_TABLE(of, mv643xx_eth_shared_ids);
#endif
-#if defined(CONFIG_OF) && !defined(CONFIG_MV64X60)
+#if defined(CONFIG_OF_IRQ) && !defined(CONFIG_MV64X60)
#define mv643xx_eth_property(_np, _name, _v) \
do { \
u32 tmp; \
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index fece974b4edd..d768c7b6c6d6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -2404,7 +2404,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
local_port);
return err;
}
- err = __mlxsw_sp_port_create(mlxsw_sp, local_port, false,
+ err = __mlxsw_sp_port_create(mlxsw_sp, local_port, split,
module, width, lane);
if (err)
goto err_port_create;
diff --git a/drivers/net/ethernet/microchip/encx24j600-regmap.c b/drivers/net/ethernet/microchip/encx24j600-regmap.c
index f3bb9055a292..44bb04d4d21b 100644
--- a/drivers/net/ethernet/microchip/encx24j600-regmap.c
+++ b/drivers/net/ethernet/microchip/encx24j600-regmap.c
@@ -26,11 +26,11 @@ static inline bool is_bits_set(int value, int mask)
}
static int encx24j600_switch_bank(struct encx24j600_context *ctx,
- int bank)
+ int bank)
{
int ret = 0;
-
int bank_opcode = BANK_SELECT(bank);
+
ret = spi_write(ctx->spi, &bank_opcode, 1);
if (ret == 0)
ctx->bank = bank;
@@ -39,7 +39,7 @@ static int encx24j600_switch_bank(struct encx24j600_context *ctx,
}
static int encx24j600_cmdn(struct encx24j600_context *ctx, u8 opcode,
- const void *buf, size_t len)
+ const void *buf, size_t len)
{
struct spi_message m;
struct spi_transfer t[2] = { { .tx_buf = &opcode, .len = 1, },
@@ -54,12 +54,14 @@ static int encx24j600_cmdn(struct encx24j600_context *ctx, u8 opcode,
static void regmap_lock_mutex(void *context)
{
struct encx24j600_context *ctx = context;
+
mutex_lock(&ctx->mutex);
}
static void regmap_unlock_mutex(void *context)
{
struct encx24j600_context *ctx = context;
+
mutex_unlock(&ctx->mutex);
}
@@ -128,6 +130,7 @@ static int regmap_encx24j600_sfr_update(struct encx24j600_context *ctx,
if (reg < 0x80) {
int ret = 0;
+
cmd = banked_code | banked_reg;
if ((banked_reg < 0x16) && (ctx->bank != bank))
ret = encx24j600_switch_bank(ctx, bank);
@@ -174,6 +177,7 @@ static int regmap_encx24j600_sfr_write(void *context, u8 reg, u8 *val,
size_t len)
{
struct encx24j600_context *ctx = context;
+
return regmap_encx24j600_sfr_update(ctx, reg, val, len, WCRU, WCRCODE);
}
@@ -228,9 +232,9 @@ int regmap_encx24j600_spi_write(void *context, u8 reg, const u8 *data,
if (reg < 0xc0)
return encx24j600_cmdn(ctx, reg, data, count);
- else
- /* SPI 1-byte command. Ignore data */
- return spi_write(ctx->spi, &reg, 1);
+
+ /* SPI 1-byte command. Ignore data */
+ return spi_write(ctx->spi, &reg, 1);
}
EXPORT_SYMBOL_GPL(regmap_encx24j600_spi_write);
@@ -495,6 +499,7 @@ static struct regmap_config phycfg = {
.writeable_reg = encx24j600_phymap_writeable,
.volatile_reg = encx24j600_phymap_volatile,
};
+
static struct regmap_bus phymap_encx24j600 = {
.reg_write = regmap_encx24j600_phy_reg_write,
.reg_read = regmap_encx24j600_phy_reg_read,
diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c
index b14f0305aa31..fbce6166504e 100644
--- a/drivers/net/ethernet/microchip/encx24j600.c
+++ b/drivers/net/ethernet/microchip/encx24j600.c
@@ -30,7 +30,7 @@
#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
static int debug = -1;
-module_param(debug, int, 0);
+module_param(debug, int, 0000);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
/* SRAM memory layout:
@@ -105,6 +105,7 @@ static u16 encx24j600_read_reg(struct encx24j600_priv *priv, u8 reg)
struct net_device *dev = priv->ndev;
unsigned int val = 0;
int ret = regmap_read(priv->ctx.regmap, reg, &val);
+
if (unlikely(ret))
netif_err(priv, drv, dev, "%s: error %d reading reg %02x\n",
__func__, ret, reg);
@@ -115,6 +116,7 @@ static void encx24j600_write_reg(struct encx24j600_priv *priv, u8 reg, u16 val)
{
struct net_device *dev = priv->ndev;
int ret = regmap_write(priv->ctx.regmap, reg, val);
+
if (unlikely(ret))
netif_err(priv, drv, dev, "%s: error %d writing reg %02x=%04x\n",
__func__, ret, reg, val);
@@ -125,6 +127,7 @@ static void encx24j600_update_reg(struct encx24j600_priv *priv, u8 reg,
{
struct net_device *dev = priv->ndev;
int ret = regmap_update_bits(priv->ctx.regmap, reg, mask, val);
+
if (unlikely(ret))
netif_err(priv, drv, dev, "%s: error %d updating reg %02x=%04x~%04x\n",
__func__, ret, reg, val, mask);
@@ -135,6 +138,7 @@ static u16 encx24j600_read_phy(struct encx24j600_priv *priv, u8 reg)
struct net_device *dev = priv->ndev;
unsigned int val = 0;
int ret = regmap_read(priv->ctx.phymap, reg, &val);
+
if (unlikely(ret))
netif_err(priv, drv, dev, "%s: error %d reading %02x\n",
__func__, ret, reg);
@@ -145,6 +149,7 @@ static void encx24j600_write_phy(struct encx24j600_priv *priv, u8 reg, u16 val)
{
struct net_device *dev = priv->ndev;
int ret = regmap_write(priv->ctx.phymap, reg, val);
+
if (unlikely(ret))
netif_err(priv, drv, dev, "%s: error %d writing reg %02x=%04x\n",
__func__, ret, reg, val);
@@ -164,6 +169,7 @@ static void encx24j600_cmd(struct encx24j600_priv *priv, u8 cmd)
{
struct net_device *dev = priv->ndev;
int ret = regmap_write(priv->ctx.regmap, cmd, 0);
+
if (unlikely(ret))
netif_err(priv, drv, dev, "%s: error %d with cmd %02x\n",
__func__, ret, cmd);
@@ -173,6 +179,7 @@ static int encx24j600_raw_read(struct encx24j600_priv *priv, u8 reg, u8 *data,
size_t count)
{
int ret;
+
mutex_lock(&priv->ctx.mutex);
ret = regmap_encx24j600_spi_read(&priv->ctx, reg, data, count);
mutex_unlock(&priv->ctx.mutex);
@@ -184,6 +191,7 @@ static int encx24j600_raw_write(struct encx24j600_priv *priv, u8 reg,
const u8 *data, size_t count)
{
int ret;
+
mutex_lock(&priv->ctx.mutex);
ret = regmap_encx24j600_spi_write(&priv->ctx, reg, data, count);
mutex_unlock(&priv->ctx.mutex);
@@ -194,6 +202,7 @@ static int encx24j600_raw_write(struct encx24j600_priv *priv, u8 reg,
static void encx24j600_update_phcon1(struct encx24j600_priv *priv)
{
u16 phcon1 = encx24j600_read_phy(priv, PHCON1);
+
if (priv->autoneg == AUTONEG_ENABLE) {
phcon1 |= ANEN | RENEG;
} else {
@@ -328,6 +337,7 @@ static int encx24j600_receive_packet(struct encx24j600_priv *priv,
{
struct net_device *dev = priv->ndev;
struct sk_buff *skb = netdev_alloc_skb(dev, rsv->len + NET_IP_ALIGN);
+
if (!skb) {
pr_err_ratelimited("RX: OOM: packet dropped\n");
dev->stats.rx_dropped++;
@@ -346,7 +356,6 @@ static int encx24j600_receive_packet(struct encx24j600_priv *priv,
/* Maintain stats */
dev->stats.rx_packets++;
dev->stats.rx_bytes += rsv->len;
- priv->next_packet = rsv->next_packet;
netif_rx(skb);
@@ -383,6 +392,8 @@ static void encx24j600_rx_packets(struct encx24j600_priv *priv, u8 packet_count)
encx24j600_receive_packet(priv, &rsv);
}
+ priv->next_packet = rsv.next_packet;
+
newrxtail = priv->next_packet - 2;
if (newrxtail == ENC_RX_BUF_START)
newrxtail = SRAM_SIZE - 2;
@@ -827,6 +838,7 @@ static void encx24j600_set_multicast_list(struct net_device *dev)
static void encx24j600_hw_tx(struct encx24j600_priv *priv)
{
struct net_device *dev = priv->ndev;
+
netif_info(priv, tx_queued, dev, "TX Packet Len:%d\n",
priv->tx_skb->len);
@@ -894,7 +906,6 @@ static void encx24j600_tx_timeout(struct net_device *dev)
dev->stats.tx_errors++;
netif_wake_queue(dev);
- return;
}
static int encx24j600_get_regs_len(struct net_device *dev)
@@ -957,12 +968,14 @@ static int encx24j600_set_settings(struct net_device *dev,
static u32 encx24j600_get_msglevel(struct net_device *dev)
{
struct encx24j600_priv *priv = netdev_priv(dev);
+
return priv->msg_enable;
}
static void encx24j600_set_msglevel(struct net_device *dev, u32 val)
{
struct encx24j600_priv *priv = netdev_priv(dev);
+
priv->msg_enable = val;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
index 00efb1c4c57e..17a70122df05 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
@@ -1265,7 +1265,7 @@ static const struct qed_iscsi_ops qed_iscsi_ops_pass = {
.get_stats = &qed_iscsi_stats,
};
-const struct qed_iscsi_ops *qed_get_iscsi_ops()
+const struct qed_iscsi_ops *qed_get_iscsi_ops(void)
{
return &qed_iscsi_ops_pass;
}
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index ae32f855e31b..422289c232bc 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -460,6 +460,12 @@ static int emac_clks_phase1_init(struct platform_device *pdev,
{
int ret;
+ /* On ACPI platforms, clocks are controlled by firmware and/or
+ * ACPI, not by drivers.
+ */
+ if (has_acpi_companion(&pdev->dev))
+ return 0;
+
ret = emac_clks_get(pdev, adpt);
if (ret)
return ret;
@@ -485,6 +491,9 @@ static int emac_clks_phase2_init(struct platform_device *pdev,
{
int ret;
+ if (has_acpi_companion(&pdev->dev))
+ return 0;
+
ret = clk_set_rate(adpt->clk[EMAC_CLK_TX], 125000000);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index 4ff4e0491406..aa11b70b9ca4 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -472,8 +472,6 @@ static void r6040_down(struct net_device *dev)
iowrite16(adrp[0], ioaddr + MID_0L);
iowrite16(adrp[1], ioaddr + MID_0M);
iowrite16(adrp[2], ioaddr + MID_0H);
-
- phy_stop(dev->phydev);
}
static int r6040_close(struct net_device *dev)
@@ -481,12 +479,12 @@ static int r6040_close(struct net_device *dev)
struct r6040_private *lp = netdev_priv(dev);
struct pci_dev *pdev = lp->pdev;
- spin_lock_irq(&lp->lock);
+ phy_stop(dev->phydev);
napi_disable(&lp->napi);
netif_stop_queue(dev);
- r6040_down(dev);
- free_irq(dev->irq, dev);
+ spin_lock_irq(&lp->lock);
+ r6040_down(dev);
/* Free RX buffer */
r6040_free_rxbufs(dev);
@@ -496,6 +494,8 @@ static int r6040_close(struct net_device *dev)
spin_unlock_irq(&lp->lock);
+ free_irq(dev->irq, dev);
+
/* Free Descriptor memory */
if (lp->rx_ring) {
pci_free_consistent(pdev,
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index f644216eda1b..87bdc56b4e3a 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -120,44 +120,53 @@ static int efx_ethtool_phys_id(struct net_device *net_dev,
}
/* This must be called with rtnl_lock held. */
-static int efx_ethtool_get_settings(struct net_device *net_dev,
- struct ethtool_cmd *ecmd)
+static int
+efx_ethtool_get_link_ksettings(struct net_device *net_dev,
+ struct ethtool_link_ksettings *cmd)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_link_state *link_state = &efx->link_state;
+ u32 supported;
mutex_lock(&efx->mac_lock);
- efx->phy_op->get_settings(efx, ecmd);
+ efx->phy_op->get_link_ksettings(efx, cmd);
mutex_unlock(&efx->mac_lock);
/* Both MACs support pause frames (bidirectional and respond-only) */
- ecmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ ethtool_convert_link_mode_to_legacy_u32(&supported,
+ cmd->link_modes.supported);
+
+ supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
if (LOOPBACK_INTERNAL(efx)) {
- ethtool_cmd_speed_set(ecmd, link_state->speed);
- ecmd->duplex = link_state->fd ? DUPLEX_FULL : DUPLEX_HALF;
+ cmd->base.speed = link_state->speed;
+ cmd->base.duplex = link_state->fd ? DUPLEX_FULL : DUPLEX_HALF;
}
return 0;
}
/* This must be called with rtnl_lock held. */
-static int efx_ethtool_set_settings(struct net_device *net_dev,
- struct ethtool_cmd *ecmd)
+static int
+efx_ethtool_set_link_ksettings(struct net_device *net_dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct efx_nic *efx = netdev_priv(net_dev);
int rc;
/* GMAC does not support 1000Mbps HD */
- if ((ethtool_cmd_speed(ecmd) == SPEED_1000) &&
- (ecmd->duplex != DUPLEX_FULL)) {
+ if ((cmd->base.speed == SPEED_1000) &&
+ (cmd->base.duplex != DUPLEX_FULL)) {
netif_dbg(efx, drv, efx->net_dev,
"rejecting unsupported 1000Mbps HD setting\n");
return -EINVAL;
}
mutex_lock(&efx->mac_lock);
- rc = efx->phy_op->set_settings(efx, ecmd);
+ rc = efx->phy_op->set_link_ksettings(efx, cmd);
mutex_unlock(&efx->mac_lock);
return rc;
}
@@ -1342,8 +1351,6 @@ static int efx_ethtool_get_module_info(struct net_device *net_dev,
}
const struct ethtool_ops efx_ethtool_ops = {
- .get_settings = efx_ethtool_get_settings,
- .set_settings = efx_ethtool_set_settings,
.get_drvinfo = efx_ethtool_get_drvinfo,
.get_regs_len = efx_ethtool_get_regs_len,
.get_regs = efx_ethtool_get_regs,
@@ -1373,4 +1380,6 @@ const struct ethtool_ops efx_ethtool_ops = {
.get_ts_info = efx_ethtool_get_ts_info,
.get_module_info = efx_ethtool_get_module_info,
.get_module_eeprom = efx_ethtool_get_module_eeprom,
+ .get_link_ksettings = efx_ethtool_get_link_ksettings,
+ .set_link_ksettings = efx_ethtool_set_link_ksettings,
};
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index 9dcd396784ae..c905971c5f3a 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -503,45 +503,59 @@ static void efx_mcdi_phy_remove(struct efx_nic *efx)
kfree(phy_data);
}
-static void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
+static void efx_mcdi_phy_get_link_ksettings(struct efx_nic *efx,
+ struct ethtool_link_ksettings *cmd)
{
struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
int rc;
-
- ecmd->supported =
- mcdi_to_ethtool_cap(phy_cfg->media, phy_cfg->supported_cap);
- ecmd->advertising = efx->link_advertising;
- ethtool_cmd_speed_set(ecmd, efx->link_state.speed);
- ecmd->duplex = efx->link_state.fd;
- ecmd->port = mcdi_to_ethtool_media(phy_cfg->media);
- ecmd->phy_address = phy_cfg->port;
- ecmd->transceiver = XCVR_INTERNAL;
- ecmd->autoneg = !!(efx->link_advertising & ADVERTISED_Autoneg);
- ecmd->mdio_support = (efx->mdio.mode_support &
+ u32 supported, advertising, lp_advertising;
+
+ supported = mcdi_to_ethtool_cap(phy_cfg->media, phy_cfg->supported_cap);
+ advertising = efx->link_advertising;
+ cmd->base.speed = efx->link_state.speed;
+ cmd->base.duplex = efx->link_state.fd;
+ cmd->base.port = mcdi_to_ethtool_media(phy_cfg->media);
+ cmd->base.phy_address = phy_cfg->port;
+ cmd->base.autoneg = !!(efx->link_advertising & ADVERTISED_Autoneg);
+ cmd->base.mdio_support = (efx->mdio.mode_support &
(MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22));
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
+
BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
outbuf, sizeof(outbuf), NULL);
if (rc)
return;
- ecmd->lp_advertising =
+ lp_advertising =
mcdi_to_ethtool_cap(phy_cfg->media,
MCDI_DWORD(outbuf, GET_LINK_OUT_LP_CAP));
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
+ lp_advertising);
}
-static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
+static int
+efx_mcdi_phy_set_link_ksettings(struct efx_nic *efx,
+ const struct ethtool_link_ksettings *cmd)
{
struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
u32 caps;
int rc;
+ u32 advertising;
+
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
- if (ecmd->autoneg) {
- caps = (ethtool_to_mcdi_cap(ecmd->advertising) |
+ if (cmd->base.autoneg) {
+ caps = (ethtool_to_mcdi_cap(advertising) |
1 << MC_CMD_PHY_CAP_AN_LBN);
- } else if (ecmd->duplex) {
- switch (ethtool_cmd_speed(ecmd)) {
+ } else if (cmd->base.duplex) {
+ switch (cmd->base.speed) {
case 10: caps = 1 << MC_CMD_PHY_CAP_10FDX_LBN; break;
case 100: caps = 1 << MC_CMD_PHY_CAP_100FDX_LBN; break;
case 1000: caps = 1 << MC_CMD_PHY_CAP_1000FDX_LBN; break;
@@ -550,7 +564,7 @@ static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ec
default: return -EINVAL;
}
} else {
- switch (ethtool_cmd_speed(ecmd)) {
+ switch (cmd->base.speed) {
case 10: caps = 1 << MC_CMD_PHY_CAP_10HDX_LBN; break;
case 100: caps = 1 << MC_CMD_PHY_CAP_100HDX_LBN; break;
case 1000: caps = 1 << MC_CMD_PHY_CAP_1000HDX_LBN; break;
@@ -563,9 +577,9 @@ static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ec
if (rc)
return rc;
- if (ecmd->autoneg) {
+ if (cmd->base.autoneg) {
efx_link_set_advertising(
- efx, ecmd->advertising | ADVERTISED_Autoneg);
+ efx, advertising | ADVERTISED_Autoneg);
phy_cfg->forced_cap = 0;
} else {
efx_link_set_advertising(efx, 0);
@@ -812,8 +826,8 @@ static const struct efx_phy_operations efx_mcdi_phy_ops = {
.poll = efx_mcdi_phy_poll,
.fini = efx_port_dummy_op_void,
.remove = efx_mcdi_phy_remove,
- .get_settings = efx_mcdi_phy_get_settings,
- .set_settings = efx_mcdi_phy_set_settings,
+ .get_link_ksettings = efx_mcdi_phy_get_link_ksettings,
+ .set_link_ksettings = efx_mcdi_phy_set_link_ksettings,
.test_alive = efx_mcdi_phy_test_alive,
.run_tests = efx_mcdi_phy_run_tests,
.test_name = efx_mcdi_phy_test_name,
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 8692e829b40f..1a635ced62d0 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -720,8 +720,8 @@ static inline bool efx_link_state_equal(const struct efx_link_state *left,
* @reconfigure: Reconfigure PHY (e.g. for new link parameters)
* @poll: Update @link_state and report whether it changed.
* Serialised by the mac_lock.
- * @get_settings: Get ethtool settings. Serialised by the mac_lock.
- * @set_settings: Set ethtool settings. Serialised by the mac_lock.
+ * @get_link_ksettings: Get ethtool settings. Serialised by the mac_lock.
+ * @set_link_ksettings: Set ethtool settings. Serialised by the mac_lock.
* @set_npage_adv: Set abilities advertised in (Extended) Next Page
* (only needed where AN bit is set in mmds)
* @test_alive: Test that PHY is 'alive' (online)
@@ -736,10 +736,10 @@ struct efx_phy_operations {
void (*remove) (struct efx_nic *efx);
int (*reconfigure) (struct efx_nic *efx);
bool (*poll) (struct efx_nic *efx);
- void (*get_settings) (struct efx_nic *efx,
- struct ethtool_cmd *ecmd);
- int (*set_settings) (struct efx_nic *efx,
- struct ethtool_cmd *ecmd);
+ void (*get_link_ksettings)(struct efx_nic *efx,
+ struct ethtool_link_ksettings *cmd);
+ int (*set_link_ksettings)(struct efx_nic *efx,
+ const struct ethtool_link_ksettings *cmd);
void (*set_npage_adv) (struct efx_nic *efx, u32);
int (*test_alive) (struct efx_nic *efx);
const char *(*test_name) (struct efx_nic *efx, unsigned int index);
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 98f10c216521..8b6810bad54b 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -158,9 +158,9 @@ static bool gtp_check_src_ms_ipv4(struct sk_buff *skb, struct pdp_ctx *pctx,
if (!pskb_may_pull(skb, hdrlen + sizeof(struct iphdr)))
return false;
- iph = (struct iphdr *)(skb->data + hdrlen + sizeof(struct iphdr));
+ iph = (struct iphdr *)(skb->data + hdrlen);
- return iph->saddr != pctx->ms_addr_ip4.s_addr;
+ return iph->saddr == pctx->ms_addr_ip4.s_addr;
}
/* Check if the inner IP source address in this packet is assigned to any
@@ -423,11 +423,11 @@ static inline void gtp1_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
/* Bits 8 7 6 5 4 3 2 1
* +--+--+--+--+--+--+--+--+
- * |version |PT| 1| E| S|PN|
+ * |version |PT| 0| E| S|PN|
* +--+--+--+--+--+--+--+--+
* 0 0 1 1 1 0 0 0
*/
- gtp1->flags = 0x38; /* v1, GTP-non-prime. */
+ gtp1->flags = 0x30; /* v1, GTP-non-prime. */
gtp1->type = GTP_TPDU;
gtp1->length = htons(payload_len);
gtp1->tid = htonl(pctx->u.v1.o_tei);
diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c
index f293d33fb28f..8d5b903d1d9d 100644
--- a/drivers/net/irda/w83977af_ir.c
+++ b/drivers/net/irda/w83977af_ir.c
@@ -517,9 +517,9 @@ static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
mtt = irda_get_mtt(skb);
pr_debug("%s: %ld, mtt=%d\n", __func__, jiffies, mtt);
- if (mtt > 1000)
- mdelay(mtt / 1000);
- else if (mtt)
+ if (mtt > 1000)
+ mdelay(mtt / 1000);
+ else if (mtt)
udelay(mtt);
/* Enable DMA interrupt */
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index b425fa1013af..08327e005ccc 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -22,6 +22,7 @@
#include <linux/module.h>
#include <linux/virtio.h>
#include <linux/virtio_net.h>
+#include <linux/bpf.h>
#include <linux/scatterlist.h>
#include <linux/if_vlan.h>
#include <linux/slab.h>
@@ -81,6 +82,8 @@ struct receive_queue {
struct napi_struct napi;
+ struct bpf_prog __rcu *xdp_prog;
+
/* Chain pages by the private ptr. */
struct page *pages;
@@ -111,6 +114,9 @@ struct virtnet_info {
/* # of queue pairs currently used by the driver */
u16 curr_queue_pairs;
+ /* # of XDP queue pairs currently used by the driver */
+ u16 xdp_queue_pairs;
+
/* I like... big packets and I cannot lie! */
bool big_packets;
@@ -324,6 +330,90 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
return skb;
}
+static void virtnet_xdp_xmit(struct virtnet_info *vi,
+ struct receive_queue *rq,
+ struct send_queue *sq,
+ struct xdp_buff *xdp)
+{
+ struct page *page = virt_to_head_page(xdp->data);
+ struct virtio_net_hdr_mrg_rxbuf *hdr;
+ unsigned int num_sg, len;
+ void *xdp_sent;
+ int err;
+
+ /* Free up any pending old buffers before queueing new ones. */
+ while ((xdp_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) {
+ struct page *sent_page = virt_to_head_page(xdp_sent);
+
+ if (vi->mergeable_rx_bufs)
+ put_page(sent_page);
+ else
+ give_pages(rq, sent_page);
+ }
+
+ /* Zero header and leave csum up to XDP layers */
+ hdr = xdp->data;
+ memset(hdr, 0, vi->hdr_len);
+
+ num_sg = 1;
+ sg_init_one(sq->sg, xdp->data, xdp->data_end - xdp->data);
+ err = virtqueue_add_outbuf(sq->vq, sq->sg, num_sg,
+ xdp->data, GFP_ATOMIC);
+ if (unlikely(err)) {
+ if (vi->mergeable_rx_bufs)
+ put_page(page);
+ else
+ give_pages(rq, page);
+ return; // On error abort to avoid unnecessary kick
+ } else if (!vi->mergeable_rx_bufs) {
+ /* If not mergeable bufs must be big packets so cleanup pages */
+ give_pages(rq, (struct page *)page->private);
+ page->private = 0;
+ }
+
+ virtqueue_kick(sq->vq);
+}
+
+static u32 do_xdp_prog(struct virtnet_info *vi,
+ struct receive_queue *rq,
+ struct bpf_prog *xdp_prog,
+ struct page *page, int offset, int len)
+{
+ int hdr_padded_len;
+ struct xdp_buff xdp;
+ unsigned int qp;
+ u32 act;
+ u8 *buf;
+
+ buf = page_address(page) + offset;
+
+ if (vi->mergeable_rx_bufs)
+ hdr_padded_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
+ else
+ hdr_padded_len = sizeof(struct padded_vnet_hdr);
+
+ xdp.data = buf + hdr_padded_len;
+ xdp.data_end = xdp.data + (len - vi->hdr_len);
+
+ act = bpf_prog_run_xdp(xdp_prog, &xdp);
+ switch (act) {
+ case XDP_PASS:
+ return XDP_PASS;
+ case XDP_TX:
+ qp = vi->curr_queue_pairs -
+ vi->xdp_queue_pairs +
+ smp_processor_id();
+ xdp.data = buf + (vi->mergeable_rx_bufs ? 0 : 4);
+ virtnet_xdp_xmit(vi, rq, &vi->sq[qp], &xdp);
+ return XDP_TX;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ case XDP_ABORTED:
+ case XDP_DROP:
+ return XDP_DROP;
+ }
+}
+
static struct sk_buff *receive_small(struct virtnet_info *vi, void *buf, unsigned int len)
{
struct sk_buff * skb = buf;
@@ -340,17 +430,102 @@ static struct sk_buff *receive_big(struct net_device *dev,
void *buf,
unsigned int len)
{
+ struct bpf_prog *xdp_prog;
struct page *page = buf;
- struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE);
+ struct sk_buff *skb;
+ rcu_read_lock();
+ xdp_prog = rcu_dereference(rq->xdp_prog);
+ if (xdp_prog) {
+ struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
+ u32 act;
+
+ if (unlikely(hdr->hdr.gso_type || hdr->hdr.flags))
+ goto err_xdp;
+ act = do_xdp_prog(vi, rq, xdp_prog, page, 0, len);
+ switch (act) {
+ case XDP_PASS:
+ break;
+ case XDP_TX:
+ rcu_read_unlock();
+ goto xdp_xmit;
+ case XDP_DROP:
+ default:
+ goto err_xdp;
+ }
+ }
+ rcu_read_unlock();
+
+ skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE);
if (unlikely(!skb))
goto err;
return skb;
+err_xdp:
+ rcu_read_unlock();
err:
dev->stats.rx_dropped++;
give_pages(rq, page);
+xdp_xmit:
+ return NULL;
+}
+
+/* The conditions to enable XDP should preclude the underlying device from
+ * sending packets across multiple buffers (num_buf > 1). However per spec
+ * it does not appear to be illegal to do so but rather just against convention.
+ * So in order to avoid making a system unresponsive the packets are pushed
+ * into a page and the XDP program is run. This will be extremely slow and we
+ * push a warning to the user to fix this as soon as possible. Fixing this may
+ * require resolving the underlying hardware to determine why multiple buffers
+ * are being received or simply loading the XDP program in the ingress stack
+ * after the skb is built because there is no advantage to running it here
+ * anymore.
+ */
+static struct page *xdp_linearize_page(struct receive_queue *rq,
+ u16 num_buf,
+ struct page *p,
+ int offset,
+ unsigned int *len)
+{
+ struct page *page = alloc_page(GFP_ATOMIC);
+ unsigned int page_off = 0;
+
+ if (!page)
+ return NULL;
+
+ memcpy(page_address(page) + page_off, page_address(p) + offset, *len);
+ page_off += *len;
+
+ while (--num_buf) {
+ unsigned int buflen;
+ unsigned long ctx;
+ void *buf;
+ int off;
+
+ ctx = (unsigned long)virtqueue_get_buf(rq->vq, &buflen);
+ if (unlikely(!ctx))
+ goto err_buf;
+
+ /* guard against a misconfigured or uncooperative backend that
+ * is sending packet larger than the MTU.
+ */
+ if ((page_off + buflen) > PAGE_SIZE)
+ goto err_buf;
+
+ buf = mergeable_ctx_to_buf_address(ctx);
+ p = virt_to_head_page(buf);
+ off = buf - page_address(p);
+
+ memcpy(page_address(page) + page_off,
+ page_address(p) + off, buflen);
+ page_off += buflen;
+ }
+
+ *len = page_off;
+ return page;
+err_buf:
+ __free_pages(page, 0);
return NULL;
}
@@ -365,11 +540,67 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
struct page *page = virt_to_head_page(buf);
int offset = buf - page_address(page);
- unsigned int truesize = max(len, mergeable_ctx_to_buf_truesize(ctx));
+ struct sk_buff *head_skb, *curr_skb;
+ struct bpf_prog *xdp_prog;
+ unsigned int truesize;
+
+ head_skb = NULL;
+
+ rcu_read_lock();
+ xdp_prog = rcu_dereference(rq->xdp_prog);
+ if (xdp_prog) {
+ struct page *xdp_page;
+ u32 act;
+
+ /* No known backend devices should send packets with
+ * more than a single buffer when XDP conditions are
+ * met. However it is not strictly illegal so the case
+ * is handled as an exception and a warning is thrown.
+ */
+ if (unlikely(num_buf > 1)) {
+ bpf_warn_invalid_xdp_buffer();
+
+ /* linearize data for XDP */
+ xdp_page = xdp_linearize_page(rq, num_buf,
+ page, offset, &len);
+ if (!xdp_page)
+ goto err_xdp;
+ offset = 0;
+ } else {
+ xdp_page = page;
+ }
+
+ /* Transient failure which in theory could occur if
+ * in-flight packets from before XDP was enabled reach
+ * the receive path after XDP is loaded. In practice I
+ * was not able to create this condition.
+ */
+ if (unlikely(hdr->hdr.gso_type || hdr->hdr.flags))
+ goto err_xdp;
+
+ act = do_xdp_prog(vi, rq, xdp_prog, page, offset, len);
+ switch (act) {
+ case XDP_PASS:
+ if (unlikely(xdp_page != page))
+ __free_pages(xdp_page, 0);
+ break;
+ case XDP_TX:
+ if (unlikely(xdp_page != page))
+ goto err_xdp;
+ rcu_read_unlock();
+ goto xdp_xmit;
+ case XDP_DROP:
+ default:
+ if (unlikely(xdp_page != page))
+ __free_pages(xdp_page, 0);
+ goto err_xdp;
+ }
+ }
+ rcu_read_unlock();
- struct sk_buff *head_skb = page_to_skb(vi, rq, page, offset, len,
- truesize);
- struct sk_buff *curr_skb = head_skb;
+ truesize = max(len, mergeable_ctx_to_buf_truesize(ctx));
+ head_skb = page_to_skb(vi, rq, page, offset, len, truesize);
+ curr_skb = head_skb;
if (unlikely(!curr_skb))
goto err_skb;
@@ -423,6 +654,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
return head_skb;
+err_xdp:
+ rcu_read_unlock();
err_skb:
put_page(page);
while (--num_buf) {
@@ -439,6 +672,7 @@ err_skb:
err_buf:
dev->stats.rx_dropped++;
dev_kfree_skb(head_skb);
+xdp_xmit:
return NULL;
}
@@ -1337,6 +1571,13 @@ static int virtnet_set_channels(struct net_device *dev,
if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0)
return -EINVAL;
+ /* For now we don't support modifying channels while XDP is loaded
+ * also when XDP is loaded all RX queues have XDP programs so we only
+ * need to check a single RX queue.
+ */
+ if (vi->rq[0].xdp_prog)
+ return -EINVAL;
+
get_online_cpus();
err = virtnet_set_queues(vi, queue_pairs);
if (!err) {
@@ -1428,6 +1669,93 @@ static const struct ethtool_ops virtnet_ethtool_ops = {
.set_settings = virtnet_set_settings,
};
+static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog)
+{
+ unsigned long int max_sz = PAGE_SIZE - sizeof(struct padded_vnet_hdr);
+ struct virtnet_info *vi = netdev_priv(dev);
+ struct bpf_prog *old_prog;
+ u16 xdp_qp = 0, curr_qp;
+ int i, err;
+
+ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
+ virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6)) {
+ netdev_warn(dev, "can't set XDP while host is implementing LRO, disable LRO first\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (vi->mergeable_rx_bufs && !vi->any_header_sg) {
+ netdev_warn(dev, "XDP expects header/data in single page, any_header_sg required\n");
+ return -EINVAL;
+ }
+
+ if (dev->mtu > max_sz) {
+ netdev_warn(dev, "XDP requires MTU less than %lu\n", max_sz);
+ return -EINVAL;
+ }
+
+ curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs;
+ if (prog)
+ xdp_qp = nr_cpu_ids;
+
+ /* XDP requires extra queues for XDP_TX */
+ if (curr_qp + xdp_qp > vi->max_queue_pairs) {
+ netdev_warn(dev, "request %i queues but max is %i\n",
+ curr_qp + xdp_qp, vi->max_queue_pairs);
+ return -ENOMEM;
+ }
+
+ err = virtnet_set_queues(vi, curr_qp + xdp_qp);
+ if (err) {
+ dev_warn(&dev->dev, "XDP Device queue allocation failure.\n");
+ return err;
+ }
+
+ if (prog) {
+ prog = bpf_prog_add(prog, vi->max_queue_pairs - 1);
+ if (IS_ERR(prog)) {
+ virtnet_set_queues(vi, curr_qp);
+ return PTR_ERR(prog);
+ }
+ }
+
+ vi->xdp_queue_pairs = xdp_qp;
+ netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
+
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
+ rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
+ }
+
+ return 0;
+}
+
+static bool virtnet_xdp_query(struct net_device *dev)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+ int i;
+
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ if (vi->rq[i].xdp_prog)
+ return true;
+ }
+ return false;
+}
+
+static int virtnet_xdp(struct net_device *dev, struct netdev_xdp *xdp)
+{
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return virtnet_xdp_set(dev, xdp->prog);
+ case XDP_QUERY_PROG:
+ xdp->prog_attached = virtnet_xdp_query(dev);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct net_device_ops virtnet_netdev = {
.ndo_open = virtnet_open,
.ndo_stop = virtnet_close,
@@ -1444,6 +1772,7 @@ static const struct net_device_ops virtnet_netdev = {
#ifdef CONFIG_NET_RX_BUSY_POLL
.ndo_busy_poll = virtnet_busy_poll,
#endif
+ .ndo_xdp = virtnet_xdp,
};
static void virtnet_config_changed_work(struct work_struct *work)
@@ -1505,12 +1834,20 @@ static void virtnet_free_queues(struct virtnet_info *vi)
static void free_receive_bufs(struct virtnet_info *vi)
{
+ struct bpf_prog *old_prog;
int i;
+ rtnl_lock();
for (i = 0; i < vi->max_queue_pairs; i++) {
while (vi->rq[i].pages)
__free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0);
+
+ old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
+ RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL);
+ if (old_prog)
+ bpf_prog_put(old_prog);
}
+ rtnl_unlock();
}
static void free_receive_page_frags(struct virtnet_info *vi)
@@ -1521,6 +1858,16 @@ static void free_receive_page_frags(struct virtnet_info *vi)
put_page(vi->rq[i].alloc_frag.page);
}
+static bool is_xdp_queue(struct virtnet_info *vi, int q)
+{
+ if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
+ return false;
+ else if (q < vi->curr_queue_pairs)
+ return true;
+ else
+ return false;
+}
+
static void free_unused_bufs(struct virtnet_info *vi)
{
void *buf;
@@ -1528,8 +1875,12 @@ static void free_unused_bufs(struct virtnet_info *vi)
for (i = 0; i < vi->max_queue_pairs; i++) {
struct virtqueue *vq = vi->sq[i].vq;
- while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
- dev_kfree_skb(buf);
+ while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
+ if (!is_xdp_queue(vi, i))
+ dev_kfree_skb(buf);
+ else
+ put_page(virt_to_head_page(buf));
+ }
}
for (i = 0; i < vi->max_queue_pairs; i++) {
@@ -1930,7 +2281,9 @@ static int virtnet_probe(struct virtio_device *vdev)
goto free_unregister_netdev;
}
- virtnet_set_affinity(vi);
+ rtnl_lock();
+ virtnet_set_queues(vi, vi->curr_queue_pairs);
+ rtnl_unlock();
/* Assume link up if device can't report link status,
otherwise get link status from config. */
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 3bca24651dc0..7532646c3b7b 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -366,6 +366,8 @@ static int vrf_finish_output6(struct net *net, struct sock *sk,
struct in6_addr *nexthop;
int ret;
+ nf_reset(skb);
+
skb->protocol = htons(ETH_P_IPV6);
skb->dev = dev;
@@ -547,6 +549,8 @@ static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *s
u32 nexthop;
int ret = -EINVAL;
+ nf_reset(skb);
+
/* Be paranoid, rather than too clever. */
if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
struct sk_buff *skb2;
@@ -849,8 +853,6 @@ static struct sk_buff *vrf_rcv_nfhook(u8 pf, unsigned int hook,
{
struct net *net = dev_net(dev);
- nf_reset(skb);
-
if (NF_HOOK(pf, hook, net, NULL, skb, dev, NULL, vrf_rcv_finish) < 0)
skb = NULL; /* kfree_skb(skb) handled by nf code */
diff --git a/drivers/net/wan/lmc/lmc_media.c b/drivers/net/wan/lmc/lmc_media.c
index 5920c996fcdf..ff2e4a5654c7 100644
--- a/drivers/net/wan/lmc/lmc_media.c
+++ b/drivers/net/wan/lmc/lmc_media.c
@@ -95,62 +95,63 @@ static inline void write_av9110_bit (lmc_softc_t *, int);
static void write_av9110(lmc_softc_t *, u32, u32, u32, u32, u32);
lmc_media_t lmc_ds3_media = {
- lmc_ds3_init, /* special media init stuff */
- lmc_ds3_default, /* reset to default state */
- lmc_ds3_set_status, /* reset status to state provided */
- lmc_dummy_set_1, /* set clock source */
- lmc_dummy_set2_1, /* set line speed */
- lmc_ds3_set_100ft, /* set cable length */
- lmc_ds3_set_scram, /* set scrambler */
- lmc_ds3_get_link_status, /* get link status */
- lmc_dummy_set_1, /* set link status */
- lmc_ds3_set_crc_length, /* set CRC length */
- lmc_dummy_set_1, /* set T1 or E1 circuit type */
- lmc_ds3_watchdog
+ .init = lmc_ds3_init, /* special media init stuff */
+ .defaults = lmc_ds3_default, /* reset to default state */
+ .set_status = lmc_ds3_set_status, /* reset status to state provided */
+ .set_clock_source = lmc_dummy_set_1, /* set clock source */
+ .set_speed = lmc_dummy_set2_1, /* set line speed */
+ .set_cable_length = lmc_ds3_set_100ft, /* set cable length */
+ .set_scrambler = lmc_ds3_set_scram, /* set scrambler */
+ .get_link_status = lmc_ds3_get_link_status, /* get link status */
+ .set_link_status = lmc_dummy_set_1, /* set link status */
+ .set_crc_length = lmc_ds3_set_crc_length, /* set CRC length */
+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
+ .watchdog = lmc_ds3_watchdog
};
lmc_media_t lmc_hssi_media = {
- lmc_hssi_init, /* special media init stuff */
- lmc_hssi_default, /* reset to default state */
- lmc_hssi_set_status, /* reset status to state provided */
- lmc_hssi_set_clock, /* set clock source */
- lmc_dummy_set2_1, /* set line speed */
- lmc_dummy_set_1, /* set cable length */
- lmc_dummy_set_1, /* set scrambler */
- lmc_hssi_get_link_status, /* get link status */
- lmc_hssi_set_link_status, /* set link status */
- lmc_hssi_set_crc_length, /* set CRC length */
- lmc_dummy_set_1, /* set T1 or E1 circuit type */
- lmc_hssi_watchdog
+ .init = lmc_hssi_init, /* special media init stuff */
+ .defaults = lmc_hssi_default, /* reset to default state */
+ .set_status = lmc_hssi_set_status, /* reset status to state provided */
+ .set_clock_source = lmc_hssi_set_clock, /* set clock source */
+ .set_speed = lmc_dummy_set2_1, /* set line speed */
+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
+ .get_link_status = lmc_hssi_get_link_status, /* get link status */
+ .set_link_status = lmc_hssi_set_link_status, /* set link status */
+ .set_crc_length = lmc_hssi_set_crc_length, /* set CRC length */
+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
+ .watchdog = lmc_hssi_watchdog
};
-lmc_media_t lmc_ssi_media = { lmc_ssi_init, /* special media init stuff */
- lmc_ssi_default, /* reset to default state */
- lmc_ssi_set_status, /* reset status to state provided */
- lmc_ssi_set_clock, /* set clock source */
- lmc_ssi_set_speed, /* set line speed */
- lmc_dummy_set_1, /* set cable length */
- lmc_dummy_set_1, /* set scrambler */
- lmc_ssi_get_link_status, /* get link status */
- lmc_ssi_set_link_status, /* set link status */
- lmc_ssi_set_crc_length, /* set CRC length */
- lmc_dummy_set_1, /* set T1 or E1 circuit type */
- lmc_ssi_watchdog
+lmc_media_t lmc_ssi_media = {
+ .init = lmc_ssi_init, /* special media init stuff */
+ .defaults = lmc_ssi_default, /* reset to default state */
+ .set_status = lmc_ssi_set_status, /* reset status to state provided */
+ .set_clock_source = lmc_ssi_set_clock, /* set clock source */
+ .set_speed = lmc_ssi_set_speed, /* set line speed */
+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
+ .get_link_status = lmc_ssi_get_link_status, /* get link status */
+ .set_link_status = lmc_ssi_set_link_status, /* set link status */
+ .set_crc_length = lmc_ssi_set_crc_length, /* set CRC length */
+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
+ .watchdog = lmc_ssi_watchdog
};
lmc_media_t lmc_t1_media = {
- lmc_t1_init, /* special media init stuff */
- lmc_t1_default, /* reset to default state */
- lmc_t1_set_status, /* reset status to state provided */
- lmc_t1_set_clock, /* set clock source */
- lmc_dummy_set2_1, /* set line speed */
- lmc_dummy_set_1, /* set cable length */
- lmc_dummy_set_1, /* set scrambler */
- lmc_t1_get_link_status, /* get link status */
- lmc_dummy_set_1, /* set link status */
- lmc_t1_set_crc_length, /* set CRC length */
- lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
- lmc_t1_watchdog
+ .init = lmc_t1_init, /* special media init stuff */
+ .defaults = lmc_t1_default, /* reset to default state */
+ .set_status = lmc_t1_set_status, /* reset status to state provided */
+ .set_clock_source = lmc_t1_set_clock, /* set clock source */
+ .set_speed = lmc_dummy_set2_1, /* set line speed */
+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
+ .get_link_status = lmc_t1_get_link_status, /* get link status */
+ .set_link_status = lmc_dummy_set_1, /* set link status */
+ .set_crc_length = lmc_t1_set_crc_length, /* set CRC length */
+ .set_circuit_type = lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
+ .watchdog = lmc_t1_watchdog
};
static void
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index 7b6e5d168c95..92bc89ae7e20 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -20,7 +20,7 @@ struct cgroup_bpf {
* when this cgroup is accessed.
*/
struct bpf_prog *prog[MAX_BPF_ATTACH_TYPE];
- struct bpf_prog *effective[MAX_BPF_ATTACH_TYPE];
+ struct bpf_prog __rcu *effective[MAX_BPF_ATTACH_TYPE];
};
void cgroup_bpf_put(struct cgroup *cgrp);
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 8796ff03f472..f74ae68086dc 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -216,7 +216,7 @@ u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5);
u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
-void bpf_prog_calc_digest(struct bpf_prog *fp);
+int bpf_prog_calc_digest(struct bpf_prog *fp);
const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
@@ -238,6 +238,8 @@ struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, int i);
void bpf_prog_sub(struct bpf_prog *prog, int i);
struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog);
void bpf_prog_put(struct bpf_prog *prog);
+int __bpf_prog_charge(struct user_struct *user, u32 pages);
+void __bpf_prog_uncharge(struct user_struct *user, u32 pages);
struct bpf_map *bpf_map_get_with_uref(u32 ufd);
struct bpf_map *__bpf_map_get(struct fd f);
@@ -318,6 +320,15 @@ static inline struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog)
{
return ERR_PTR(-EOPNOTSUPP);
}
+
+static inline int __bpf_prog_charge(struct user_struct *user, u32 pages)
+{
+ return 0;
+}
+
+static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
+{
+}
#endif /* CONFIG_BPF_SYSCALL */
/* verifier prototypes for helper functions called from eBPF programs */
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 6a1658308612..702314253797 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -57,9 +57,6 @@ struct bpf_prog_aux;
/* BPF program can access up to 512 bytes of stack space. */
#define MAX_BPF_STACK 512
-/* Maximum BPF program size in bytes. */
-#define MAX_BPF_SIZE (BPF_MAXINSNS * sizeof(struct bpf_insn))
-
/* Helper macros for filter block array initializers. */
/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
@@ -517,6 +514,17 @@ static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
return BPF_PROG_RUN(prog, xdp);
}
+static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog)
+{
+ return prog->len * sizeof(struct bpf_insn);
+}
+
+static inline u32 bpf_prog_digest_scratch_size(const struct bpf_prog *prog)
+{
+ return round_up(bpf_prog_insn_size(prog) +
+ sizeof(__be64) + 1, SHA_MESSAGE_BYTES);
+}
+
static inline unsigned int bpf_prog_size(unsigned int proglen)
{
return max(sizeof(struct bpf_prog),
@@ -602,6 +610,7 @@ bool bpf_helper_changes_pkt_data(void *func);
struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
const struct bpf_insn *patch, u32 len);
void bpf_warn_invalid_xdp_action(u32 act);
+void bpf_warn_invalid_xdp_buffer(void);
#ifdef CONFIG_BPF_JIT
extern int bpf_jit_enable;
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
index a426cb55dc43..ed30d5d713e3 100644
--- a/include/linux/miscdevice.h
+++ b/include/linux/miscdevice.h
@@ -32,6 +32,7 @@
#define STORE_QUEUE_MINOR 155 /* unused */
#define I2O_MINOR 166
#define MICROCODE_MINOR 184
+#define IRNET_MINOR 187
#define VFIO_MINOR 196
#define TUN_MINOR 200
#define CUSE_MINOR 203
diff --git a/include/linux/platform_data/macb.h b/include/linux/platform_data/macb.h
index 21b15f6fee25..7815d50c26ff 100644
--- a/include/linux/platform_data/macb.h
+++ b/include/linux/platform_data/macb.h
@@ -8,6 +8,8 @@
#ifndef __MACB_PDATA_H__
#define __MACB_PDATA_H__
+#include <linux/clk.h>
+
/**
* struct macb_platform_data - platform data for MACB Ethernet
* @phy_mask: phy mask passed when register the MDIO bus
@@ -15,12 +17,16 @@
* @phy_irq_pin: PHY IRQ
* @is_rmii: using RMII interface?
* @rev_eth_addr: reverse Ethernet address byte order
+ * @pclk: platform clock
+ * @hclk: AHB clock
*/
struct macb_platform_data {
u32 phy_mask;
int phy_irq_pin;
u8 is_rmii;
u8 rev_eth_addr;
+ struct clk *pclk;
+ struct clk *hclk;
};
#endif /* __MACB_PDATA_H__ */
diff --git a/include/net/inet6_connection_sock.h b/include/net/inet6_connection_sock.h
index 954ad6bfb56a..3212b39b5bfc 100644
--- a/include/net/inet6_connection_sock.h
+++ b/include/net/inet6_connection_sock.h
@@ -22,7 +22,8 @@ struct sock;
struct sockaddr;
int inet6_csk_bind_conflict(const struct sock *sk,
- const struct inet_bind_bucket *tb, bool relax);
+ const struct inet_bind_bucket *tb, bool relax,
+ bool soreuseport_ok);
struct dst_entry *inet6_csk_route_req(const struct sock *sk, struct flowi6 *fl6,
const struct request_sock *req, u8 proto);
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 146054ceea8e..85ee3879499e 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -63,7 +63,8 @@ struct inet_connection_sock_af_ops {
#endif
void (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
int (*bind_conflict)(const struct sock *sk,
- const struct inet_bind_bucket *tb, bool relax);
+ const struct inet_bind_bucket *tb,
+ bool relax, bool soreuseport_ok);
void (*mtu_reduced)(struct sock *sk);
};
@@ -261,7 +262,8 @@ inet_csk_rto_backoff(const struct inet_connection_sock *icsk,
struct sock *inet_csk_accept(struct sock *sk, int flags, int *err);
int inet_csk_bind_conflict(const struct sock *sk,
- const struct inet_bind_bucket *tb, bool relax);
+ const struct inet_bind_bucket *tb, bool relax,
+ bool soreuseport_ok);
int inet_csk_get_port(struct sock *sk, unsigned short snum);
struct dst_entry *inet_csk_route_req(const struct sock *sk, struct flowi4 *fl4,
diff --git a/include/net/netlink.h b/include/net/netlink.h
index dd657a33f8c3..d3938f11ae52 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -698,7 +698,8 @@ static inline int nla_len(const struct nlattr *nla)
*/
static inline int nla_ok(const struct nlattr *nla, int remaining)
{
- return nla->nla_len >= sizeof(*nla) &&
+ return remaining >= (int) sizeof(*nla) &&
+ nla->nla_len >= sizeof(*nla) &&
nla->nla_len <= remaining;
}
diff --git a/init/Kconfig b/init/Kconfig
index aafafeb0c117..223b734abccd 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1156,7 +1156,8 @@ config CGROUP_PERF
config CGROUP_BPF
bool "Support for eBPF programs attached to cgroups"
- depends on BPF_SYSCALL && SOCK_CGROUP_DATA
+ depends on BPF_SYSCALL
+ select SOCK_CGROUP_DATA
help
Allow attaching eBPF programs to a cgroup using the bpf(2)
syscall command BPF_PROG_ATTACH.
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 83e0d153b0b4..1eb4f1303756 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -105,19 +105,29 @@ struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO |
gfp_extra_flags;
struct bpf_prog *fp;
+ u32 pages, delta;
+ int ret;
BUG_ON(fp_old == NULL);
size = round_up(size, PAGE_SIZE);
- if (size <= fp_old->pages * PAGE_SIZE)
+ pages = size / PAGE_SIZE;
+ if (pages <= fp_old->pages)
return fp_old;
+ delta = pages - fp_old->pages;
+ ret = __bpf_prog_charge(fp_old->aux->user, delta);
+ if (ret)
+ return NULL;
+
fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
- if (fp != NULL) {
+ if (fp == NULL) {
+ __bpf_prog_uncharge(fp_old->aux->user, delta);
+ } else {
kmemcheck_annotate_bitfield(fp, meta);
memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
- fp->pages = size / PAGE_SIZE;
+ fp->pages = pages;
fp->aux->prog = fp;
/* We keep fp->aux from fp_old around in the new
@@ -136,28 +146,29 @@ void __bpf_prog_free(struct bpf_prog *fp)
vfree(fp);
}
-#define SHA_BPF_RAW_SIZE \
- round_up(MAX_BPF_SIZE + sizeof(__be64) + 1, SHA_MESSAGE_BYTES)
-
-/* Called under verifier mutex. */
-void bpf_prog_calc_digest(struct bpf_prog *fp)
+int bpf_prog_calc_digest(struct bpf_prog *fp)
{
const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64);
- static u32 ws[SHA_WORKSPACE_WORDS];
- static u8 raw[SHA_BPF_RAW_SIZE];
- struct bpf_insn *dst = (void *)raw;
+ u32 raw_size = bpf_prog_digest_scratch_size(fp);
+ u32 ws[SHA_WORKSPACE_WORDS];
u32 i, bsize, psize, blocks;
+ struct bpf_insn *dst;
bool was_ld_map;
- u8 *todo = raw;
+ u8 *raw, *todo;
__be32 *result;
__be64 *bits;
+ raw = vmalloc(raw_size);
+ if (!raw)
+ return -ENOMEM;
+
sha_init(fp->digest);
memset(ws, 0, sizeof(ws));
/* We need to take out the map fd for the digest calculation
* since they are unstable from user space side.
*/
+ dst = (void *)raw;
for (i = 0, was_ld_map = false; i < fp->len; i++) {
dst[i] = fp->insnsi[i];
if (!was_ld_map &&
@@ -177,12 +188,13 @@ void bpf_prog_calc_digest(struct bpf_prog *fp)
}
}
- psize = fp->len * sizeof(struct bpf_insn);
- memset(&raw[psize], 0, sizeof(raw) - psize);
+ psize = bpf_prog_insn_size(fp);
+ memset(&raw[psize], 0, raw_size - psize);
raw[psize++] = 0x80;
bsize = round_up(psize, SHA_MESSAGE_BYTES);
blocks = bsize / SHA_MESSAGE_BYTES;
+ todo = raw;
if (bsize - psize >= sizeof(__be64)) {
bits = (__be64 *)(todo + bsize - sizeof(__be64));
} else {
@@ -199,6 +211,9 @@ void bpf_prog_calc_digest(struct bpf_prog *fp)
result = (__force __be32 *)fp->digest;
for (i = 0; i < SHA_DIGEST_WORDS; i++)
result[i] = cpu_to_be32(fp->digest[i]);
+
+ vfree(raw);
+ return 0;
}
static bool bpf_is_jmp_and_has_target(const struct bpf_insn *insn)
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 4819ec9d95f6..e89acea22ecf 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -615,19 +615,39 @@ static void free_used_maps(struct bpf_prog_aux *aux)
kfree(aux->used_maps);
}
+int __bpf_prog_charge(struct user_struct *user, u32 pages)
+{
+ unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+ unsigned long user_bufs;
+
+ if (user) {
+ user_bufs = atomic_long_add_return(pages, &user->locked_vm);
+ if (user_bufs > memlock_limit) {
+ atomic_long_sub(pages, &user->locked_vm);
+ return -EPERM;
+ }
+ }
+
+ return 0;
+}
+
+void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
+{
+ if (user)
+ atomic_long_sub(pages, &user->locked_vm);
+}
+
static int bpf_prog_charge_memlock(struct bpf_prog *prog)
{
struct user_struct *user = get_current_user();
- unsigned long memlock_limit;
-
- memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+ int ret;
- atomic_long_add(prog->pages, &user->locked_vm);
- if (atomic_long_read(&user->locked_vm) > memlock_limit) {
- atomic_long_sub(prog->pages, &user->locked_vm);
+ ret = __bpf_prog_charge(user, prog->pages);
+ if (ret) {
free_uid(user);
- return -EPERM;
+ return ret;
}
+
prog->aux->user = user;
return 0;
}
@@ -636,7 +656,7 @@ static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
{
struct user_struct *user = prog->aux->user;
- atomic_long_sub(prog->pages, &user->locked_vm);
+ __bpf_prog_uncharge(user, prog->pages);
free_uid(user);
}
@@ -811,7 +831,7 @@ static int bpf_prog_load(union bpf_attr *attr)
err = -EFAULT;
if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns),
- prog->len * sizeof(struct bpf_insn)) != 0)
+ bpf_prog_insn_size(prog)) != 0)
goto free_prog;
prog->orig_prog = NULL;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index d28f9a3380a9..83ed2f8f6f22 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -462,14 +462,19 @@ static void init_reg_state(struct bpf_reg_state *regs)
regs[BPF_REG_1].type = PTR_TO_CTX;
}
-static void mark_reg_unknown_value(struct bpf_reg_state *regs, u32 regno)
+static void __mark_reg_unknown_value(struct bpf_reg_state *regs, u32 regno)
{
- BUG_ON(regno >= MAX_BPF_REG);
regs[regno].type = UNKNOWN_VALUE;
regs[regno].id = 0;
regs[regno].imm = 0;
}
+static void mark_reg_unknown_value(struct bpf_reg_state *regs, u32 regno)
+{
+ BUG_ON(regno >= MAX_BPF_REG);
+ __mark_reg_unknown_value(regs, regno);
+}
+
static void reset_reg_range_values(struct bpf_reg_state *regs, u32 regno)
{
regs[regno].min_value = BPF_REGISTER_MIN_RANGE;
@@ -1970,8 +1975,13 @@ static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id,
if (reg->type == PTR_TO_MAP_VALUE_OR_NULL && reg->id == id) {
reg->type = type;
+ /* We don't need id from this point onwards anymore, thus we
+ * should better reset it, so that state pruning has chances
+ * to take effect.
+ */
+ reg->id = 0;
if (type == UNKNOWN_VALUE)
- mark_reg_unknown_value(regs, regno);
+ __mark_reg_unknown_value(regs, regno);
}
}
@@ -1982,16 +1992,16 @@ static void mark_map_regs(struct bpf_verifier_state *state, u32 regno,
enum bpf_reg_type type)
{
struct bpf_reg_state *regs = state->regs;
+ u32 id = regs[regno].id;
int i;
for (i = 0; i < MAX_BPF_REG; i++)
- mark_map_reg(regs, i, regs[regno].id, type);
+ mark_map_reg(regs, i, id, type);
for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {
if (state->stack_slot_type[i] != STACK_SPILL)
continue;
- mark_map_reg(state->spilled_regs, i / BPF_REG_SIZE,
- regs[regno].id, type);
+ mark_map_reg(state->spilled_regs, i / BPF_REG_SIZE, id, type);
}
}
@@ -2926,6 +2936,10 @@ static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
int insn_cnt = env->prog->len;
int i, j, err;
+ err = bpf_prog_calc_digest(env->prog);
+ if (err)
+ return err;
+
for (i = 0; i < insn_cnt; i++, insn++) {
if (BPF_CLASS(insn->code) == BPF_LDX &&
(BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) {
@@ -3173,8 +3187,6 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
log_level = 0;
}
- bpf_prog_calc_digest(env->prog);
-
ret = replace_map_fd_with_map_ptr(env);
if (ret < 0)
goto skip_full_check;
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 779b3fa6052d..019557d0a11d 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -111,9 +111,9 @@ static inline void lec_arp_put(struct lec_arp_table *entry)
}
static struct lane2_ops lane2_ops = {
- lane2_resolve, /* resolve, spec 3.1.3 */
- lane2_associate_req, /* associate_req, spec 3.1.4 */
- NULL /* associate indicator, spec 3.1.5 */
+ .resolve = lane2_resolve, /* spec 3.1.3 */
+ .associate_req = lane2_associate_req, /* spec 3.1.4 */
+ .associate_indicator = NULL /* spec 3.1.5 */
};
static unsigned char bus_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
index 9e60e74c807d..a89fdebeffda 100644
--- a/net/atm/mpoa_caches.c
+++ b/net/atm/mpoa_caches.c
@@ -535,33 +535,32 @@ static void eg_destroy_cache(struct mpoa_client *mpc)
static const struct in_cache_ops ingress_ops = {
- in_cache_add_entry, /* add_entry */
- in_cache_get, /* get */
- in_cache_get_with_mask, /* get_with_mask */
- in_cache_get_by_vcc, /* get_by_vcc */
- in_cache_put, /* put */
- in_cache_remove_entry, /* remove_entry */
- cache_hit, /* cache_hit */
- clear_count_and_expired, /* clear_count */
- check_resolving_entries, /* check_resolving */
- refresh_entries, /* refresh */
- in_destroy_cache /* destroy_cache */
+ .add_entry = in_cache_add_entry,
+ .get = in_cache_get,
+ .get_with_mask = in_cache_get_with_mask,
+ .get_by_vcc = in_cache_get_by_vcc,
+ .put = in_cache_put,
+ .remove_entry = in_cache_remove_entry,
+ .cache_hit = cache_hit,
+ .clear_count = clear_count_and_expired,
+ .check_resolving = check_resolving_entries,
+ .refresh = refresh_entries,
+ .destroy_cache = in_destroy_cache
};
static const struct eg_cache_ops egress_ops = {
- eg_cache_add_entry, /* add_entry */
- eg_cache_get_by_cache_id, /* get_by_cache_id */
- eg_cache_get_by_tag, /* get_by_tag */
- eg_cache_get_by_vcc, /* get_by_vcc */
- eg_cache_get_by_src_ip, /* get_by_src_ip */
- eg_cache_put, /* put */
- eg_cache_remove_entry, /* remove_entry */
- update_eg_cache_entry, /* update */
- clear_expired, /* clear_expired */
- eg_destroy_cache /* destroy_cache */
+ .add_entry = eg_cache_add_entry,
+ .get_by_cache_id = eg_cache_get_by_cache_id,
+ .get_by_tag = eg_cache_get_by_tag,
+ .get_by_vcc = eg_cache_get_by_vcc,
+ .get_by_src_ip = eg_cache_get_by_src_ip,
+ .put = eg_cache_put,
+ .remove_entry = eg_cache_remove_entry,
+ .update = update_eg_cache_entry,
+ .clear_expired = clear_expired,
+ .destroy_cache = eg_destroy_cache
};
-
void atm_mpoa_init_cache(struct mpoa_client *mpc)
{
mpc->in_ops = &ingress_ops;
diff --git a/net/core/filter.c b/net/core/filter.c
index b1461708a977..7190bd648154 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -2972,6 +2972,12 @@ void bpf_warn_invalid_xdp_action(u32 act)
}
EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action);
+void bpf_warn_invalid_xdp_buffer(void)
+{
+ WARN_ONCE(1, "Illegal XDP buffer encountered, expect throughput degradation\n");
+}
+EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_buffer);
+
static u32 sk_filter_convert_ctx_access(enum bpf_access_type type, int dst_reg,
int src_reg, int ctx_off,
struct bpf_insn *insn_buf,
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index b2c26b081134..41f803e35da3 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -201,7 +201,7 @@ static struct dn_dev_sysctl_table {
.extra1 = &min_t3,
.extra2 = &max_t3
},
- {0}
+ { }
},
};
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index d5d3ead0a6c3..19ea045c50ed 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -45,11 +45,12 @@ void inet_get_local_port_range(struct net *net, int *low, int *high)
EXPORT_SYMBOL(inet_get_local_port_range);
int inet_csk_bind_conflict(const struct sock *sk,
- const struct inet_bind_bucket *tb, bool relax)
+ const struct inet_bind_bucket *tb, bool relax,
+ bool reuseport_ok)
{
struct sock *sk2;
- int reuse = sk->sk_reuse;
- int reuseport = sk->sk_reuseport;
+ bool reuse = sk->sk_reuse;
+ bool reuseport = !!sk->sk_reuseport && reuseport_ok;
kuid_t uid = sock_i_uid((struct sock *)sk);
/*
@@ -105,6 +106,7 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum)
struct inet_bind_bucket *tb;
kuid_t uid = sock_i_uid(sk);
u32 remaining, offset;
+ bool reuseport_ok = !!snum;
if (port) {
have_port:
@@ -165,7 +167,8 @@ other_parity_scan:
smallest_size = tb->num_owners;
smallest_port = port;
}
- if (!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, false))
+ if (!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, false,
+ reuseport_ok))
goto tb_found;
goto next_port;
}
@@ -206,13 +209,14 @@ tb_found:
sk->sk_reuseport && uid_eq(tb->fastuid, uid))) &&
smallest_size == -1)
goto success;
- if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, true)) {
+ if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, true,
+ reuseport_ok)) {
if ((reuse ||
(tb->fastreuseport > 0 &&
sk->sk_reuseport &&
!rcu_access_pointer(sk->sk_reuseport_cb) &&
uid_eq(tb->fastuid, uid))) &&
- smallest_size != -1 && --attempts >= 0) {
+ !snum && smallest_size != -1 && --attempts >= 0) {
spin_unlock_bh(&head->lock);
goto again;
}
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 1c86c478f578..7396e75e161b 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -29,11 +29,12 @@
#include <net/sock_reuseport.h>
int inet6_csk_bind_conflict(const struct sock *sk,
- const struct inet_bind_bucket *tb, bool relax)
+ const struct inet_bind_bucket *tb, bool relax,
+ bool reuseport_ok)
{
const struct sock *sk2;
- int reuse = sk->sk_reuse;
- int reuseport = sk->sk_reuseport;
+ bool reuse = !!sk->sk_reuse;
+ bool reuseport = !!sk->sk_reuseport && reuseport_ok;
kuid_t uid = sock_i_uid((struct sock *)sk);
/* We must walk the whole port owner list in this case. -DaveM */
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 2413a0637d99..890acace01d0 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2174,6 +2174,8 @@ static int ip6_route_del(struct fib6_config *cfg)
continue;
if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric)
continue;
+ if (cfg->fc_protocol && cfg->fc_protocol != rt->rt6i_protocol)
+ continue;
dst_hold(&rt->dst);
read_unlock_bh(&table->tb6_lock);
diff --git a/net/irda/irnet/irnet.h b/net/irda/irnet/irnet.h
index 8d65bb9477fc..c69f0f38f566 100644
--- a/net/irda/irnet/irnet.h
+++ b/net/irda/irnet/irnet.h
@@ -245,7 +245,6 @@
#include <linux/tty.h>
#include <linux/proc_fs.h>
#include <linux/netdevice.h>
-#include <linux/miscdevice.h>
#include <linux/poll.h>
#include <linux/capability.h>
#include <linux/ctype.h> /* isspace() */
diff --git a/net/irda/irnet/irnet_ppp.h b/net/irda/irnet/irnet_ppp.h
index 940225866da0..32061442cc8e 100644
--- a/net/irda/irnet/irnet_ppp.h
+++ b/net/irda/irnet/irnet_ppp.h
@@ -15,13 +15,10 @@
/***************************** INCLUDES *****************************/
#include "irnet.h" /* Module global include */
+#include <linux/miscdevice.h>
/************************ CONSTANTS & MACROS ************************/
-/* /dev/irnet file constants */
-#define IRNET_MAJOR 10 /* Misc range */
-#define IRNET_MINOR 187 /* Official allocation */
-
/* IrNET control channel stuff */
#define IRNET_MAX_COMMAND 256 /* Max length of a command line */
@@ -111,9 +108,9 @@ static const struct file_operations irnet_device_fops =
/* Structure so that the misc major (drivers/char/misc.c) take care of us... */
static struct miscdevice irnet_misc_device =
{
- IRNET_MINOR,
- "irnet",
- &irnet_device_fops
+ .minor = IRNET_MINOR,
+ .name = "irnet",
+ .fops = &irnet_device_fops
};
#endif /* IRNET_PPP_H */
diff --git a/net/irda/irproc.c b/net/irda/irproc.c
index b9ac598e2116..77cfdde9d82f 100644
--- a/net/irda/irproc.c
+++ b/net/irda/irproc.c
@@ -23,7 +23,6 @@
*
********************************************************************/
-#include <linux/miscdevice.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/module.h>
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index edd6f2945f69..a98fc2b5e0dc 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -265,7 +265,8 @@ static void __ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata,
if (uni) {
rcu_assign_pointer(sdata->default_unicast_key, key);
ieee80211_check_fast_xmit_iface(sdata);
- drv_set_default_unicast_key(sdata->local, sdata, idx);
+ if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN)
+ drv_set_default_unicast_key(sdata->local, sdata, idx);
}
if (multi)
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index eeab7250f4b9..3e289a64ed43 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -2472,7 +2472,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
if (!ifmsh->mshcfg.dot11MeshForwarding)
goto out;
- fwd_skb = skb_copy(skb, GFP_ATOMIC);
+ fwd_skb = skb_copy_expand(skb, local->tx_headroom, 0, GFP_ATOMIC);
if (!fwd_skb) {
net_info_ratelimited("%s: failed to clone mesh frame\n",
sdata->name);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 1711bae4abf2..b6cfcf038c11 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -1972,6 +1972,7 @@ static void sta_stats_decode_rate(struct ieee80211_local *local, u16 rate,
u16 brate;
unsigned int shift;
+ rinfo->flags = 0;
sband = local->hw.wiphy->bands[(rate >> 4) & 0xf];
brate = sband->bitrates[rate & 0xf].bitrate;
if (rinfo->bw == RATE_INFO_BW_5)
@@ -1987,14 +1988,15 @@ static void sta_stats_decode_rate(struct ieee80211_local *local, u16 rate,
rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
}
-static void sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo)
+static int sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo)
{
u16 rate = ACCESS_ONCE(sta_get_last_rx_stats(sta)->last_rate);
if (rate == STA_STATS_RATE_INVALID)
- rinfo->flags = 0;
- else
- sta_stats_decode_rate(sta->local, rate, rinfo);
+ return -EINVAL;
+
+ sta_stats_decode_rate(sta->local, rate, rinfo);
+ return 0;
}
static void sta_set_tidstats(struct sta_info *sta,
@@ -2199,8 +2201,8 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
}
if (!(sinfo->filled & BIT(NL80211_STA_INFO_RX_BITRATE))) {
- sta_set_rate_info_rx(sta, &sinfo->rxrate);
- sinfo->filled |= BIT(NL80211_STA_INFO_RX_BITRATE);
+ if (sta_set_rate_info_rx(sta, &sinfo->rxrate) == 0)
+ sinfo->filled |= BIT(NL80211_STA_INFO_RX_BITRATE);
}
sinfo->filled |= BIT(NL80211_STA_INFO_TID_STATS);
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index e040c5140f61..35ac28d0720c 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -252,7 +252,7 @@ static int fl_hw_replace_filter(struct tcf_proto *tp,
offload.cookie = (unsigned long)f;
offload.dissector = dissector;
offload.mask = mask;
- offload.key = &f->key;
+ offload.key = &f->mkey;
offload.exts = &f->exts;
tc->type = TC_SETUP_CLSFLOWER;
@@ -509,6 +509,7 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
+ mask->control.addr_type = ~0;
fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
&mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
sizeof(key->ipv4.src));
@@ -517,6 +518,7 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
sizeof(key->ipv4.dst));
} else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+ mask->control.addr_type = ~0;
fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
&mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
sizeof(key->ipv6.src));
@@ -571,6 +573,7 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
+ mask->enc_control.addr_type = ~0;
fl_set_key_val(tb, &key->enc_ipv4.src,
TCA_FLOWER_KEY_ENC_IPV4_SRC,
&mask->enc_ipv4.src,
@@ -586,6 +589,7 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] ||
tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) {
key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+ mask->enc_control.addr_type = ~0;
fl_set_key_val(tb, &key->enc_ipv6.src,
TCA_FLOWER_KEY_ENC_IPV6_SRC,
&mask->enc_ipv6.src,
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 1f03065686fe..410ddc1e3443 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -331,7 +331,9 @@ struct sctp_association *sctp_endpoint_lookup_assoc(
* on this endpoint.
*/
if (!ep->base.bind_addr.port)
- goto out;
+ return NULL;
+
+ rcu_read_lock();
t = sctp_epaddr_lookup_transport(ep, paddr);
if (!t)
goto out;
@@ -339,6 +341,7 @@ struct sctp_association *sctp_endpoint_lookup_assoc(
*transport = t;
asoc = t->asoc;
out:
+ rcu_read_unlock();
return asoc;
}
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index d5f4b4a8369b..318c6786d653 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4472,18 +4472,17 @@ int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *),
const union sctp_addr *paddr, void *p)
{
struct sctp_transport *transport;
- int err = -ENOENT;
+ int err;
rcu_read_lock();
transport = sctp_addrs_lookup_transport(net, laddr, paddr);
+ rcu_read_unlock();
if (!transport)
- goto out;
+ return -ENOENT;
- rcu_read_unlock();
err = cb(transport, p);
sctp_transport_put(transport);
-out:
return err;
}
EXPORT_SYMBOL_GPL(sctp_transport_lookup_process);
diff --git a/net/vmw_vsock/vmci_transport_notify.c b/net/vmw_vsock/vmci_transport_notify.c
index fd8cf0214d51..1406db4d97d1 100644
--- a/net/vmw_vsock/vmci_transport_notify.c
+++ b/net/vmw_vsock/vmci_transport_notify.c
@@ -662,19 +662,19 @@ static void vmci_transport_notify_pkt_process_negotiate(struct sock *sk)
/* Socket control packet based operations. */
const struct vmci_transport_notify_ops vmci_transport_notify_pkt_ops = {
- vmci_transport_notify_pkt_socket_init,
- vmci_transport_notify_pkt_socket_destruct,
- vmci_transport_notify_pkt_poll_in,
- vmci_transport_notify_pkt_poll_out,
- vmci_transport_notify_pkt_handle_pkt,
- vmci_transport_notify_pkt_recv_init,
- vmci_transport_notify_pkt_recv_pre_block,
- vmci_transport_notify_pkt_recv_pre_dequeue,
- vmci_transport_notify_pkt_recv_post_dequeue,
- vmci_transport_notify_pkt_send_init,
- vmci_transport_notify_pkt_send_pre_block,
- vmci_transport_notify_pkt_send_pre_enqueue,
- vmci_transport_notify_pkt_send_post_enqueue,
- vmci_transport_notify_pkt_process_request,
- vmci_transport_notify_pkt_process_negotiate,
+ .socket_init = vmci_transport_notify_pkt_socket_init,
+ .socket_destruct = vmci_transport_notify_pkt_socket_destruct,
+ .poll_in = vmci_transport_notify_pkt_poll_in,
+ .poll_out = vmci_transport_notify_pkt_poll_out,
+ .handle_notify_pkt = vmci_transport_notify_pkt_handle_pkt,
+ .recv_init = vmci_transport_notify_pkt_recv_init,
+ .recv_pre_block = vmci_transport_notify_pkt_recv_pre_block,
+ .recv_pre_dequeue = vmci_transport_notify_pkt_recv_pre_dequeue,
+ .recv_post_dequeue = vmci_transport_notify_pkt_recv_post_dequeue,
+ .send_init = vmci_transport_notify_pkt_send_init,
+ .send_pre_block = vmci_transport_notify_pkt_send_pre_block,
+ .send_pre_enqueue = vmci_transport_notify_pkt_send_pre_enqueue,
+ .send_post_enqueue = vmci_transport_notify_pkt_send_post_enqueue,
+ .process_request = vmci_transport_notify_pkt_process_request,
+ .process_negotiate = vmci_transport_notify_pkt_process_negotiate,
};
diff --git a/net/vmw_vsock/vmci_transport_notify_qstate.c b/net/vmw_vsock/vmci_transport_notify_qstate.c
index 21e591dafb03..f3a0afc46208 100644
--- a/net/vmw_vsock/vmci_transport_notify_qstate.c
+++ b/net/vmw_vsock/vmci_transport_notify_qstate.c
@@ -420,19 +420,19 @@ vmci_transport_notify_pkt_send_pre_enqueue(
/* Socket always on control packet based operations. */
const struct vmci_transport_notify_ops vmci_transport_notify_pkt_q_state_ops = {
- vmci_transport_notify_pkt_socket_init,
- vmci_transport_notify_pkt_socket_destruct,
- vmci_transport_notify_pkt_poll_in,
- vmci_transport_notify_pkt_poll_out,
- vmci_transport_notify_pkt_handle_pkt,
- vmci_transport_notify_pkt_recv_init,
- vmci_transport_notify_pkt_recv_pre_block,
- vmci_transport_notify_pkt_recv_pre_dequeue,
- vmci_transport_notify_pkt_recv_post_dequeue,
- vmci_transport_notify_pkt_send_init,
- vmci_transport_notify_pkt_send_pre_block,
- vmci_transport_notify_pkt_send_pre_enqueue,
- vmci_transport_notify_pkt_send_post_enqueue,
- vmci_transport_notify_pkt_process_request,
- vmci_transport_notify_pkt_process_negotiate,
+ .socket_init = vmci_transport_notify_pkt_socket_init,
+ .socket_destruct = vmci_transport_notify_pkt_socket_destruct,
+ .poll_in = vmci_transport_notify_pkt_poll_in,
+ .poll_out = vmci_transport_notify_pkt_poll_out,
+ .handle_notify_pkt = vmci_transport_notify_pkt_handle_pkt,
+ .recv_init = vmci_transport_notify_pkt_recv_init,
+ .recv_pre_block = vmci_transport_notify_pkt_recv_pre_block,
+ .recv_pre_dequeue = vmci_transport_notify_pkt_recv_pre_dequeue,
+ .recv_post_dequeue = vmci_transport_notify_pkt_recv_post_dequeue,
+ .send_init = vmci_transport_notify_pkt_send_init,
+ .send_pre_block = vmci_transport_notify_pkt_send_pre_block,
+ .send_pre_enqueue = vmci_transport_notify_pkt_send_pre_enqueue,
+ .send_post_enqueue = vmci_transport_notify_pkt_send_post_enqueue,
+ .process_request = vmci_transport_notify_pkt_process_request,
+ .process_negotiate = vmci_transport_notify_pkt_process_negotiate,
};
diff --git a/net/x25/sysctl_net_x25.c b/net/x25/sysctl_net_x25.c
index 43239527a205..a06dfe143c67 100644
--- a/net/x25/sysctl_net_x25.c
+++ b/net/x25/sysctl_net_x25.c
@@ -70,7 +70,7 @@ static struct ctl_table x25_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
- { 0, },
+ { },
};
void __init x25_register_sysctl(void)
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 0103bf2e0c0d..853d7e43434a 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -1059,7 +1059,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .errstr_unpriv = "unknown func 6",
+ .errstr_unpriv = "unknown func bpf_trace_printk#6",
.result_unpriv = REJECT,
.result = ACCEPT,
},
@@ -2661,6 +2661,34 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS
},
{
+ "multiple registers share map_lookup_elem bad reg type",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 10),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_1, 2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_1, 3),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map1 = { 4 },
+ .result = REJECT,
+ .errstr = "R3 invalid mem access 'inv'",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS
+ },
+ {
"invalid map access from else condition",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),