summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/cortina/gemini.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/cortina/gemini.c')
-rw-r--r--drivers/net/ethernet/cortina/gemini.c115
1 files changed, 84 insertions, 31 deletions
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 705c3eb19cd3..6a2004bbe87f 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -40,6 +40,7 @@
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
+#include <net/gro.h>
#include "gemini.h"
@@ -79,7 +80,8 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
#define GMAC0_IRQ4_8 (GMAC0_MIB_INT_BIT | GMAC0_RX_OVERRUN_INT_BIT)
#define GMAC_OFFLOAD_FEATURES (NETIF_F_SG | NETIF_F_IP_CSUM | \
- NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM)
+ NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | \
+ NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
/**
* struct gmac_queue_page - page buffer per-page info
@@ -287,13 +289,13 @@ static void gmac_set_flow_control(struct net_device *netdev, bool tx, bool rx)
spin_unlock_irqrestore(&port->config_lock, flags);
}
-static void gmac_speed_set(struct net_device *netdev)
+static void gmac_adjust_link(struct net_device *netdev)
{
struct gemini_ethernet_port *port = netdev_priv(netdev);
struct phy_device *phydev = netdev->phydev;
union gmac_status status, old_status;
- int pause_tx = 0;
- int pause_rx = 0;
+ bool pause_tx = false;
+ bool pause_rx = false;
status.bits32 = readl(port->gmac_base + GMAC_STATUS);
old_status.bits32 = status.bits32;
@@ -328,14 +330,9 @@ static void gmac_speed_set(struct net_device *netdev)
}
if (phydev->duplex == DUPLEX_FULL) {
- u16 lcladv = phy_read(phydev, MII_ADVERTISE);
- u16 rmtadv = phy_read(phydev, MII_LPA);
- u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
-
- if (cap & FLOW_CTRL_RX)
- pause_rx = 1;
- if (cap & FLOW_CTRL_TX)
- pause_tx = 1;
+ phy_get_pause(phydev, &pause_tx, &pause_rx);
+ netdev_dbg(netdev, "set negotiated pause params pause TX = %s, pause RX = %s\n",
+ pause_tx ? "ON" : "OFF", pause_rx ? "ON" : "OFF");
}
gmac_set_flow_control(netdev, pause_tx, pause_rx);
@@ -366,7 +363,7 @@ static int gmac_setup_phy(struct net_device *netdev)
phy = of_phy_get_and_connect(netdev,
dev->of_node,
- gmac_speed_set);
+ gmac_adjust_link);
if (!phy)
return -ENODEV;
netdev->phydev = phy;
@@ -1107,10 +1104,13 @@ static void gmac_tx_irq_enable(struct net_device *netdev,
{
struct gemini_ethernet_port *port = netdev_priv(netdev);
struct gemini_ethernet *geth = port->geth;
+ unsigned long flags;
u32 val, mask;
netdev_dbg(netdev, "%s device %d\n", __func__, netdev->dev_id);
+ spin_lock_irqsave(&geth->irq_lock, flags);
+
mask = GMAC0_IRQ0_TXQ0_INTS << (6 * netdev->dev_id + txq);
if (en)
@@ -1119,6 +1119,8 @@ static void gmac_tx_irq_enable(struct net_device *netdev,
val = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
val = en ? val | mask : val & ~mask;
writel(val, geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
+
+ spin_unlock_irqrestore(&geth->irq_lock, flags);
}
static void gmac_tx_irq(struct net_device *netdev, unsigned int txq_num)
@@ -1142,15 +1144,53 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
struct gmac_txdesc *txd;
skb_frag_t *skb_frag;
dma_addr_t mapping;
+ bool tcp = false;
void *buffer;
+ u16 mss;
int ret;
- /* TODO: implement proper TSO using MTU in word3 */
word1 = skb->len;
word3 = SOF_BIT;
- if (skb->len >= ETH_FRAME_LEN) {
- /* Hardware offloaded checksumming isn't working on frames
+ /* Determine if we are doing TCP */
+ if (skb->protocol == htons(ETH_P_IP))
+ tcp = (ip_hdr(skb)->protocol == IPPROTO_TCP);
+ else
+ /* IPv6 */
+ tcp = (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP);
+
+ mss = skb_shinfo(skb)->gso_size;
+ if (mss) {
+ /* This means we are dealing with TCP and skb->len is the
+ * sum total of all the segments. The TSO will deal with
+ * chopping this up for us.
+ */
+ /* The accelerator needs the full frame size here */
+ mss += skb_tcp_all_headers(skb);
+ netdev_dbg(netdev, "segment offloading mss = %04x len=%04x\n",
+ mss, skb->len);
+ word1 |= TSS_MTU_ENABLE_BIT;
+ word3 |= mss;
+ } else if (tcp) {
+ /* Even if we are not using TSO, use the hardware offloader
+ * for transferring the TCP frame: this hardware has partial
+ * TCP awareness (called TOE - TCP Offload Engine) and will
+ * according to the datasheet put packets belonging to the
+ * same TCP connection in the same queue for the TOE/TSO
+ * engine to process. The engine will deal with chopping
+ * up frames that exceed ETH_DATA_LEN which the
+ * checksumming engine cannot handle (see below) into
+ * manageable chunks. It flawlessly deals with quite big
+ * frames and frames containing custom DSA EtherTypes.
+ */
+ mss = netdev->mtu + skb_tcp_all_headers(skb);
+ mss = min(mss, skb->len);
+ netdev_dbg(netdev, "TOE/TSO len %04x mtu %04x mss %04x\n",
+ skb->len, netdev->mtu, mss);
+ word1 |= TSS_MTU_ENABLE_BIT;
+ word3 |= mss;
+ } else if (skb->len >= ETH_FRAME_LEN) {
+ /* Hardware offloaded checksumming isn't working on non-TCP frames
* bigger than 1514 bytes. A hypothesis about this is that the
* checksum buffer is only 1518 bytes, so when the frames get
* bigger they get truncated, or the last few bytes get
@@ -1164,22 +1204,19 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
return ret;
}
word1 |= TSS_BYPASS_BIT;
- } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
- int tcp = 0;
+ }
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
/* We do not switch off the checksumming on non TCP/UDP
* frames: as is shown from tests, the checksumming engine
* is smart enough to see that a frame is not actually TCP
* or UDP and then just pass it through without any changes
* to the frame.
*/
- if (skb->protocol == htons(ETH_P_IP)) {
+ if (skb->protocol == htons(ETH_P_IP))
word1 |= TSS_IP_CHKSUM_BIT;
- tcp = ip_hdr(skb)->protocol == IPPROTO_TCP;
- } else { /* IPv6 */
+ else
word1 |= TSS_IPV6_ENABLE_BIT;
- tcp = ipv6_hdr(skb)->nexthdr == IPPROTO_TCP;
- }
word1 |= tcp ? TSS_TCP_CHKSUM_BIT : TSS_UDP_CHKSUM_BIT;
}
@@ -1415,15 +1452,19 @@ static unsigned int gmac_rx(struct net_device *netdev, unsigned int budget)
union gmac_rxdesc_3 word3;
struct page *page = NULL;
unsigned int page_offs;
+ unsigned long flags;
unsigned short r, w;
union dma_rwptr rw;
dma_addr_t mapping;
int frag_nr = 0;
+ spin_lock_irqsave(&geth->irq_lock, flags);
rw.bits32 = readl(ptr_reg);
/* Reset interrupt as all packages until here are taken into account */
writel(DEFAULT_Q0_INT_BIT << netdev->dev_id,
geth->base + GLOBAL_INTERRUPT_STATUS_1_REG);
+ spin_unlock_irqrestore(&geth->irq_lock, flags);
+
r = rw.bits.rptr;
w = rw.bits.wptr;
@@ -1726,10 +1767,9 @@ static irqreturn_t gmac_irq(int irq, void *data)
gmac_update_hw_stats(netdev);
if (val & (GMAC0_RX_OVERRUN_INT_BIT << (netdev->dev_id * 8))) {
+ spin_lock(&geth->irq_lock);
writel(GMAC0_RXDERR_INT_BIT << (netdev->dev_id * 8),
geth->base + GLOBAL_INTERRUPT_STATUS_4_REG);
-
- spin_lock(&geth->irq_lock);
u64_stats_update_begin(&port->ir_stats_syncp);
++port->stats.rx_fifo_errors;
u64_stats_update_end(&port->ir_stats_syncp);
@@ -1815,9 +1855,8 @@ static int gmac_open(struct net_device *netdev)
gmac_enable_tx_rx(netdev);
netif_tx_start_all_queues(netdev);
- hrtimer_init(&port->rx_coalesce_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- port->rx_coalesce_timer.function = &gmac_coalesce_delay_expired;
+ hrtimer_setup(&port->rx_coalesce_timer, &gmac_coalesce_delay_expired, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
netdev_dbg(netdev, "opened\n");
@@ -1978,7 +2017,7 @@ static int gmac_change_mtu(struct net_device *netdev, int new_mtu)
gmac_disable_tx_rx(netdev);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
gmac_update_config0_reg(netdev, max_len << CONFIG0_MAXLEN_SHIFT,
CONFIG0_MAXLEN_MASK);
@@ -2108,6 +2147,19 @@ static void gmac_get_pauseparam(struct net_device *netdev,
pparam->autoneg = true;
}
+static int gmac_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pparam)
+{
+ struct phy_device *phydev = netdev->phydev;
+
+ if (!pparam->autoneg)
+ return -EOPNOTSUPP;
+
+ phy_set_asym_pause(phydev, pparam->rx_pause, pparam->tx_pause);
+
+ return 0;
+}
+
static void gmac_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *rp,
struct kernel_ethtool_ringparam *kernel_rp,
@@ -2228,6 +2280,7 @@ static const struct ethtool_ops gmac_351x_ethtool_ops = {
.set_link_ksettings = gmac_set_ksettings,
.nway_reset = gmac_nway_reset,
.get_pauseparam = gmac_get_pauseparam,
+ .set_pauseparam = gmac_set_pauseparam,
.get_ringparam = gmac_get_ringparam,
.set_ringparam = gmac_set_ringparam,
.get_coalesce = gmac_get_coalesce,
@@ -2541,7 +2594,7 @@ static struct platform_driver gemini_ethernet_port_driver = {
.of_match_table = gemini_ethernet_port_of_match,
},
.probe = gemini_ethernet_port_probe,
- .remove_new = gemini_ethernet_port_remove,
+ .remove = gemini_ethernet_port_remove,
};
static int gemini_ethernet_probe(struct platform_device *pdev)
@@ -2605,7 +2658,7 @@ static struct platform_driver gemini_ethernet_driver = {
.of_match_table = gemini_ethernet_of_match,
},
.probe = gemini_ethernet_probe,
- .remove_new = gemini_ethernet_remove,
+ .remove = gemini_ethernet_remove,
};
static int __init gemini_ethernet_module_init(void)