summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/renesas
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/renesas')
-rw-r--r--drivers/net/ethernet/renesas/Kconfig12
-rw-r--r--drivers/net/ethernet/renesas/Makefile2
-rw-r--r--drivers/net/ethernet/renesas/ravb.h80
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c1712
-rw-r--r--drivers/net/ethernet/renesas/rswitch.c284
-rw-r--r--drivers/net/ethernet/renesas/rswitch.h75
-rw-r--r--drivers/net/ethernet/renesas/rtsn.c1388
-rw-r--r--drivers/net/ethernet/renesas/rtsn.h464
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c10
9 files changed, 3101 insertions, 926 deletions
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
index d6136fe5c206..9b7559c88bee 100644
--- a/drivers/net/ethernet/renesas/Kconfig
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -33,7 +33,9 @@ config RAVB
select CRC32
select MII
select MDIO_BITBANG
+ select PAGE_POOL
select PHYLIB
+ select RESET_CONTROLLER
help
Renesas Ethernet AVB device driver.
@@ -57,4 +59,14 @@ config RENESAS_GEN4_PTP
help
Renesas R-Car Gen4 gPTP device driver.
+config RTSN
+ tristate "Renesas Ethernet-TSN support"
+ depends on ARCH_RENESAS || COMPILE_TEST
+ depends on PTP_1588_CLOCK
+ select CRC32
+ select PHYLIB
+ select RENESAS_GEN4_PTP
+ help
+ Renesas Ethernet-TSN device driver.
+
endif # NET_VENDOR_RENESAS
diff --git a/drivers/net/ethernet/renesas/Makefile b/drivers/net/ethernet/renesas/Makefile
index 9070acfd6aaf..f65fc76f8b4d 100644
--- a/drivers/net/ethernet/renesas/Makefile
+++ b/drivers/net/ethernet/renesas/Makefile
@@ -11,3 +11,5 @@ obj-$(CONFIG_RAVB) += ravb.o
obj-$(CONFIG_RENESAS_ETHER_SWITCH) += rswitch.o
obj-$(CONFIG_RENESAS_GEN4_PTP) += rcar_gen4_ptp.o
+
+obj-$(CONFIG_RTSN) += rtsn.o
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index e0f8276cffed..7b48060c250b 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -19,6 +19,7 @@
#include <linux/phy.h>
#include <linux/platform_device.h>
#include <linux/ptp_clock_kernel.h>
+#include <net/page_pool/types.h>
#define BE_TX_RING_SIZE 64 /* TX ring size for Best Effort */
#define BE_RX_RING_SIZE 1024 /* RX ring size for Best Effort */
@@ -205,7 +206,11 @@ enum ravb_reg {
TLFRCR = 0x0758,
RFCR = 0x0760,
MAFCR = 0x0778,
- CSR0 = 0x0800, /* RZ/G2L only */
+
+ /* TOE registers (RZ/G2L only) */
+ CSR0 = 0x0800,
+ CSR1 = 0x0804,
+ CSR2 = 0x0808,
};
@@ -253,6 +258,7 @@ enum APSR_BIT {
APSR_CMSW = 0x00000010,
APSR_RDM = 0x00002000,
APSR_TDM = 0x00004000,
+ APSR_MIISELECT = 0x01000000, /* R-Car V4M only */
};
/* RCR */
@@ -978,16 +984,44 @@ enum CSR0_BIT {
CSR0_RPE = 0x00000020,
};
+enum CSR1_BIT {
+ CSR1_TIP4 = 0x00000001,
+ CSR1_TTCP4 = 0x00000010,
+ CSR1_TUDP4 = 0x00000020,
+ CSR1_TICMP4 = 0x00000040,
+ CSR1_TTCP6 = 0x00100000,
+ CSR1_TUDP6 = 0x00200000,
+ CSR1_TICMP6 = 0x00400000,
+ CSR1_THOP = 0x01000000,
+ CSR1_TROUT = 0x02000000,
+ CSR1_TAHD = 0x04000000,
+ CSR1_TDHD = 0x08000000,
+};
+
+#define CSR1_CSUM_ENABLE (CSR1_TTCP4 | CSR1_TUDP4 | CSR1_TTCP6 | CSR1_TUDP6)
+
+enum CSR2_BIT {
+ CSR2_RIP4 = 0x00000001,
+ CSR2_RTCP4 = 0x00000010,
+ CSR2_RUDP4 = 0x00000020,
+ CSR2_RICMP4 = 0x00000040,
+ CSR2_RTCP6 = 0x00100000,
+ CSR2_RUDP6 = 0x00200000,
+ CSR2_RICMP6 = 0x00400000,
+ CSR2_RHOP = 0x01000000,
+ CSR2_RROUT = 0x02000000,
+ CSR2_RAHD = 0x04000000,
+ CSR2_RDHD = 0x08000000,
+};
+
+#define CSR2_CSUM_ENABLE (CSR2_RTCP4 | CSR2_RUDP4 | CSR2_RICMP4 | \
+ CSR2_RTCP6 | CSR2_RUDP6 | CSR2_RICMP6)
+
#define DBAT_ENTRY_NUM 22
#define RX_QUEUE_OFFSET 4
#define NUM_RX_QUEUE 2
#define NUM_TX_QUEUE 2
-#define RX_BUF_SZ (2048 - ETH_FCS_LEN + sizeof(__sum16))
-
-#define GBETH_RX_BUFF_MAX 8192
-#define GBETH_RX_DESC_DATA_SIZE 4080
-
struct ravb_tstamp_skb {
struct list_head list;
struct sk_buff *skb;
@@ -1012,10 +1046,7 @@ struct ravb_ptp {
};
struct ravb_hw_info {
- void (*rx_ring_free)(struct net_device *ndev, int q);
- void (*rx_ring_format)(struct net_device *ndev, int q);
- void *(*alloc_rx_desc)(struct net_device *ndev, int q);
- bool (*receive)(struct net_device *ndev, int *quota, int q);
+ int (*receive)(struct net_device *ndev, int budget, int q);
void (*set_rate)(struct net_device *ndev);
int (*set_feature)(struct net_device *ndev, netdev_features_t features);
int (*dmac_init)(struct net_device *ndev);
@@ -1024,11 +1055,15 @@ struct ravb_hw_info {
size_t gstrings_size;
netdev_features_t net_hw_features;
netdev_features_t net_features;
+ netdev_features_t vlan_features;
int stats_len;
- size_t max_rx_len;
u32 tccr_mask;
- u32 rx_max_buf_size;
+ u32 tx_max_frame_size;
+ u32 rx_max_frame_size;
+ u32 rx_buffer_size;
+ u32 rx_desc_size;
unsigned aligned_tx: 1;
+ unsigned coalesce_irqs:1; /* Needs software IRQ coalescing */
/* hardware features */
unsigned internal_delay:1; /* AVB-DMAC has internal delays */
@@ -1045,6 +1080,11 @@ struct ravb_hw_info {
unsigned half_duplex:1; /* E-MAC supports half duplex mode */
};
+struct ravb_rx_buffer {
+ struct page *page;
+ unsigned int offset;
+};
+
struct ravb_private {
struct net_device *ndev;
struct platform_device *pdev;
@@ -1060,12 +1100,16 @@ struct ravb_private {
struct ravb_desc *desc_bat;
dma_addr_t rx_desc_dma[NUM_RX_QUEUE];
dma_addr_t tx_desc_dma[NUM_TX_QUEUE];
- struct ravb_rx_desc *gbeth_rx_ring;
- struct ravb_ex_rx_desc *rx_ring[NUM_RX_QUEUE];
+ union {
+ struct ravb_rx_desc *desc;
+ struct ravb_ex_rx_desc *ex_desc;
+ void *raw;
+ } rx_ring[NUM_RX_QUEUE];
struct ravb_tx_desc *tx_ring[NUM_TX_QUEUE];
void *tx_align[NUM_TX_QUEUE];
struct sk_buff *rx_1st_skb;
- struct sk_buff **rx_skb[NUM_RX_QUEUE];
+ struct page_pool *rx_pool[NUM_RX_QUEUE];
+ struct ravb_rx_buffer *rx_buffers[NUM_RX_QUEUE];
struct sk_buff **tx_skb[NUM_TX_QUEUE];
u32 rx_over_errors;
u32 rx_fifo_errors;
@@ -1089,10 +1133,6 @@ struct ravb_private {
int msg_enable;
int speed;
int emac_irq;
- int erra_irq;
- int mgmta_irq;
- int rx_irqs[NUM_RX_QUEUE];
- int tx_irqs[NUM_TX_QUEUE];
unsigned no_avb_link:1;
unsigned avb_link_active_low:1;
@@ -1106,6 +1146,8 @@ struct ravb_private {
const struct ravb_hw_info *info;
struct reset_control *rstc;
+
+ u32 gti_tiv;
};
static inline u32 ravb_read(struct net_device *ndev, enum ravb_reg reg)
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index f7566cfa45ca..c9f4976a3527 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -29,6 +29,8 @@
#include <linux/spinlock.h>
#include <linux/reset.h>
#include <linux/math64.h>
+#include <net/ip.h>
+#include <net/page_pool/helpers.h>
#include "ravb.h"
@@ -38,16 +40,6 @@
NETIF_MSG_RX_ERR | \
NETIF_MSG_TX_ERR)
-static const char *ravb_rx_irqs[NUM_RX_QUEUE] = {
- "ch0", /* RAVB_BE */
- "ch1", /* RAVB_NC */
-};
-
-static const char *ravb_tx_irqs[NUM_TX_QUEUE] = {
- "ch18", /* RAVB_BE */
- "ch19", /* RAVB_NC */
-};
-
void ravb_modify(struct net_device *ndev, enum ravb_reg reg, u32 clear,
u32 set)
{
@@ -96,13 +88,13 @@ static void ravb_set_rate_gbeth(struct net_device *ndev)
struct ravb_private *priv = netdev_priv(ndev);
switch (priv->speed) {
- case 10: /* 10BASE */
+ case 10: /* 10BASE */
ravb_write(ndev, GBETH_GECMR_SPEED_10, GECMR);
break;
- case 100: /* 100BASE */
+ case 100: /* 100BASE */
ravb_write(ndev, GBETH_GECMR_SPEED_100, GECMR);
break;
- case 1000: /* 1000BASE */
+ case 1000: /* 1000BASE */
ravb_write(ndev, GBETH_GECMR_SPEED_1000, GECMR);
break;
}
@@ -122,14 +114,6 @@ static void ravb_set_rate_rcar(struct net_device *ndev)
}
}
-static void ravb_set_buffer_align(struct sk_buff *skb)
-{
- u32 reserve = (unsigned long)skb->data & (RAVB_ALIGN - 1);
-
- if (reserve)
- skb_reserve(skb, RAVB_ALIGN - reserve);
-}
-
/* Get MAC address from the MAC address registers
*
* Ethernet AVB device doesn't have ROM for MAC address.
@@ -200,6 +184,13 @@ static const struct mdiobb_ops bb_ops = {
.get_mdio_data = ravb_get_mdio_data,
};
+static struct ravb_rx_desc *
+ravb_rx_get_desc(struct ravb_private *priv, unsigned int q,
+ unsigned int i)
+{
+ return priv->rx_ring[q].raw + priv->info->rx_desc_size * i;
+}
+
/* Free TX skb function for AVB-IP */
static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
{
@@ -244,67 +235,29 @@ static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
return free_num;
}
-static void ravb_rx_ring_free_gbeth(struct net_device *ndev, int q)
-{
- struct ravb_private *priv = netdev_priv(ndev);
- unsigned int ring_size;
- unsigned int i;
-
- if (!priv->gbeth_rx_ring)
- return;
-
- for (i = 0; i < priv->num_rx_ring[q]; i++) {
- struct ravb_rx_desc *desc = &priv->gbeth_rx_ring[i];
-
- if (!dma_mapping_error(ndev->dev.parent,
- le32_to_cpu(desc->dptr)))
- dma_unmap_single(ndev->dev.parent,
- le32_to_cpu(desc->dptr),
- GBETH_RX_BUFF_MAX,
- DMA_FROM_DEVICE);
- }
- ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1);
- dma_free_coherent(ndev->dev.parent, ring_size, priv->gbeth_rx_ring,
- priv->rx_desc_dma[q]);
- priv->gbeth_rx_ring = NULL;
-}
-
-static void ravb_rx_ring_free_rcar(struct net_device *ndev, int q)
+static void ravb_rx_ring_free(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
unsigned int ring_size;
- unsigned int i;
- if (!priv->rx_ring[q])
+ if (!priv->rx_ring[q].raw)
return;
- for (i = 0; i < priv->num_rx_ring[q]; i++) {
- struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i];
-
- if (!dma_mapping_error(ndev->dev.parent,
- le32_to_cpu(desc->dptr)))
- dma_unmap_single(ndev->dev.parent,
- le32_to_cpu(desc->dptr),
- RX_BUF_SZ,
- DMA_FROM_DEVICE);
- }
- ring_size = sizeof(struct ravb_ex_rx_desc) *
- (priv->num_rx_ring[q] + 1);
- dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q],
+ ring_size = priv->info->rx_desc_size * (priv->num_rx_ring[q] + 1);
+ dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q].raw,
priv->rx_desc_dma[q]);
- priv->rx_ring[q] = NULL;
+ priv->rx_ring[q].raw = NULL;
}
/* Free skb's and DMA buffers for Ethernet AVB */
static void ravb_ring_free(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
- const struct ravb_hw_info *info = priv->info;
unsigned int num_tx_desc = priv->num_tx_desc;
unsigned int ring_size;
unsigned int i;
- info->rx_ring_free(ndev, q);
+ ravb_rx_ring_free(ndev, q);
if (priv->tx_ring[q]) {
ravb_tx_free(ndev, q, false);
@@ -316,13 +269,16 @@ static void ravb_ring_free(struct net_device *ndev, int q)
priv->tx_ring[q] = NULL;
}
- /* Free RX skb ringbuffer */
- if (priv->rx_skb[q]) {
- for (i = 0; i < priv->num_rx_ring[q]; i++)
- dev_kfree_skb(priv->rx_skb[q][i]);
+ /* Free RX buffers */
+ for (i = 0; i < priv->num_rx_ring[q]; i++) {
+ if (priv->rx_buffers[q][i].page)
+ page_pool_put_page(priv->rx_pool[q],
+ priv->rx_buffers[q][i].page,
+ 0, true);
}
- kfree(priv->rx_skb[q]);
- priv->rx_skb[q] = NULL;
+ kfree(priv->rx_buffers[q]);
+ priv->rx_buffers[q] = NULL;
+ page_pool_destroy(priv->rx_pool[q]);
/* Free aligned TX buffers */
kfree(priv->tx_align[q]);
@@ -335,73 +291,72 @@ static void ravb_ring_free(struct net_device *ndev, int q)
priv->tx_skb[q] = NULL;
}
-static void ravb_rx_ring_format_gbeth(struct net_device *ndev, int q)
+static int
+ravb_alloc_rx_buffer(struct net_device *ndev, int q, u32 entry, gfp_t gfp_mask,
+ struct ravb_rx_desc *rx_desc)
{
struct ravb_private *priv = netdev_priv(ndev);
- struct ravb_rx_desc *rx_desc;
- unsigned int rx_ring_size;
+ const struct ravb_hw_info *info = priv->info;
+ struct ravb_rx_buffer *rx_buff;
dma_addr_t dma_addr;
- unsigned int i;
+ unsigned int size;
- rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
- memset(priv->gbeth_rx_ring, 0, rx_ring_size);
- /* Build RX ring buffer */
- for (i = 0; i < priv->num_rx_ring[q]; i++) {
- /* RX descriptor */
- rx_desc = &priv->gbeth_rx_ring[i];
- rx_desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE);
- dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
- GBETH_RX_BUFF_MAX,
- DMA_FROM_DEVICE);
+ rx_buff = &priv->rx_buffers[q][entry];
+ size = info->rx_buffer_size;
+ rx_buff->page = page_pool_alloc(priv->rx_pool[q], &rx_buff->offset,
+ &size, gfp_mask);
+ if (unlikely(!rx_buff->page)) {
/* We just set the data size to 0 for a failed mapping which
* should prevent DMA from happening...
*/
- if (dma_mapping_error(ndev->dev.parent, dma_addr))
- rx_desc->ds_cc = cpu_to_le16(0);
- rx_desc->dptr = cpu_to_le32(dma_addr);
- rx_desc->die_dt = DT_FEMPTY;
+ rx_desc->ds_cc = cpu_to_le16(0);
+ return -ENOMEM;
}
- rx_desc = &priv->gbeth_rx_ring[i];
- rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
- rx_desc->die_dt = DT_LINKFIX; /* type */
+
+ dma_addr = page_pool_get_dma_addr(rx_buff->page) + rx_buff->offset;
+ dma_sync_single_for_device(ndev->dev.parent, dma_addr,
+ info->rx_buffer_size, DMA_FROM_DEVICE);
+ rx_desc->dptr = cpu_to_le32(dma_addr);
+
+ /* The end of the RX buffer is used to store skb shared data, so we need
+ * to ensure that the hardware leaves enough space for this.
+ */
+ rx_desc->ds_cc = cpu_to_le16(info->rx_buffer_size -
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) -
+ ETH_FCS_LEN + sizeof(__sum16));
+ return 0;
}
-static void ravb_rx_ring_format_rcar(struct net_device *ndev, int q)
+static u32
+ravb_rx_ring_refill(struct net_device *ndev, int q, u32 count, gfp_t gfp_mask)
{
struct ravb_private *priv = netdev_priv(ndev);
- struct ravb_ex_rx_desc *rx_desc;
- unsigned int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
- dma_addr_t dma_addr;
- unsigned int i;
+ struct ravb_rx_desc *rx_desc;
+ u32 i, entry;
- memset(priv->rx_ring[q], 0, rx_ring_size);
- /* Build RX ring buffer */
- for (i = 0; i < priv->num_rx_ring[q]; i++) {
- /* RX descriptor */
- rx_desc = &priv->rx_ring[q][i];
- rx_desc->ds_cc = cpu_to_le16(RX_BUF_SZ);
- dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
- RX_BUF_SZ,
- DMA_FROM_DEVICE);
- /* We just set the data size to 0 for a failed mapping which
- * should prevent DMA from happening...
- */
- if (dma_mapping_error(ndev->dev.parent, dma_addr))
- rx_desc->ds_cc = cpu_to_le16(0);
- rx_desc->dptr = cpu_to_le32(dma_addr);
+ for (i = 0; i < count; i++) {
+ entry = (priv->dirty_rx[q] + i) % priv->num_rx_ring[q];
+ rx_desc = ravb_rx_get_desc(priv, q, entry);
+
+ if (!priv->rx_buffers[q][entry].page) {
+ if (unlikely(ravb_alloc_rx_buffer(ndev, q, entry,
+ gfp_mask, rx_desc)))
+ break;
+ }
+ /* Descriptor type must be set after all the above writes */
+ dma_wmb();
rx_desc->die_dt = DT_FEMPTY;
}
- rx_desc = &priv->rx_ring[q][i];
- rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
- rx_desc->die_dt = DT_LINKFIX; /* type */
+
+ return i;
}
/* Format skb and descriptor buffer for Ethernet AVB */
static void ravb_ring_format(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
- const struct ravb_hw_info *info = priv->info;
unsigned int num_tx_desc = priv->num_tx_desc;
+ struct ravb_rx_desc *rx_desc;
struct ravb_tx_desc *tx_desc;
struct ravb_desc *desc;
unsigned int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] *
@@ -413,7 +368,13 @@ static void ravb_ring_format(struct net_device *ndev, int q)
priv->dirty_rx[q] = 0;
priv->dirty_tx[q] = 0;
- info->rx_ring_format(ndev, q);
+ /* Regular RX descriptors have already been initialized by
+ * ravb_rx_ring_refill(), we just need to initialize the final link
+ * descriptor.
+ */
+ rx_desc = ravb_rx_get_desc(priv, q, priv->num_rx_ring[q]);
+ rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
+ rx_desc->die_dt = DT_LINKFIX; /* type */
memset(priv->tx_ring[q], 0, tx_ring_size);
/* Build TX ring buffer */
@@ -439,57 +400,65 @@ static void ravb_ring_format(struct net_device *ndev, int q)
desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
}
-static void *ravb_alloc_rx_desc_gbeth(struct net_device *ndev, int q)
+static void *ravb_alloc_rx_desc(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
unsigned int ring_size;
- ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1);
-
- priv->gbeth_rx_ring = dma_alloc_coherent(ndev->dev.parent, ring_size,
- &priv->rx_desc_dma[q],
- GFP_KERNEL);
- return priv->gbeth_rx_ring;
-}
-
-static void *ravb_alloc_rx_desc_rcar(struct net_device *ndev, int q)
-{
- struct ravb_private *priv = netdev_priv(ndev);
- unsigned int ring_size;
+ ring_size = priv->info->rx_desc_size * (priv->num_rx_ring[q] + 1);
- ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1);
+ priv->rx_ring[q].raw = dma_alloc_coherent(ndev->dev.parent, ring_size,
+ &priv->rx_desc_dma[q],
+ GFP_KERNEL);
- priv->rx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
- &priv->rx_desc_dma[q],
- GFP_KERNEL);
- return priv->rx_ring[q];
+ return priv->rx_ring[q].raw;
}
/* Init skb and descriptor buffer for Ethernet AVB */
static int ravb_ring_init(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
- const struct ravb_hw_info *info = priv->info;
unsigned int num_tx_desc = priv->num_tx_desc;
+ struct page_pool_params params = {
+ .order = 0,
+ .flags = PP_FLAG_DMA_MAP,
+ .pool_size = priv->num_rx_ring[q],
+ .nid = NUMA_NO_NODE,
+ .dev = ndev->dev.parent,
+ .dma_dir = DMA_FROM_DEVICE,
+ };
unsigned int ring_size;
- struct sk_buff *skb;
- unsigned int i;
+ u32 num_filled;
+
+ /* Allocate RX page pool and buffers */
+ priv->rx_pool[q] = page_pool_create(&params);
+ if (IS_ERR(priv->rx_pool[q]))
+ goto error;
+
+ /* Allocate RX buffers */
+ priv->rx_buffers[q] = kcalloc(priv->num_rx_ring[q],
+ sizeof(*priv->rx_buffers[q]), GFP_KERNEL);
+ if (!priv->rx_buffers[q])
+ goto error;
- /* Allocate RX and TX skb rings */
- priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
- sizeof(*priv->rx_skb[q]), GFP_KERNEL);
+ /* Allocate TX skb rings */
priv->tx_skb[q] = kcalloc(priv->num_tx_ring[q],
sizeof(*priv->tx_skb[q]), GFP_KERNEL);
- if (!priv->rx_skb[q] || !priv->tx_skb[q])
+ if (!priv->tx_skb[q])
goto error;
- for (i = 0; i < priv->num_rx_ring[q]; i++) {
- skb = __netdev_alloc_skb(ndev, info->max_rx_len, GFP_KERNEL);
- if (!skb)
- goto error;
- ravb_set_buffer_align(skb);
- priv->rx_skb[q][i] = skb;
- }
+ /* Allocate all RX descriptors. */
+ if (!ravb_alloc_rx_desc(ndev, q))
+ goto error;
+
+ /* Populate RX ring buffer. */
+ priv->dirty_rx[q] = 0;
+ ring_size = priv->info->rx_desc_size * priv->num_rx_ring[q];
+ memset(priv->rx_ring[q].raw, 0, ring_size);
+ num_filled = ravb_rx_ring_refill(ndev, q, priv->num_rx_ring[q],
+ GFP_KERNEL);
+ if (num_filled != priv->num_rx_ring[q])
+ goto error;
if (num_tx_desc > 1) {
/* Allocate rings for the aligned buffers */
@@ -499,12 +468,6 @@ static int ravb_ring_init(struct net_device *ndev, int q)
goto error;
}
- /* Allocate all RX descriptors. */
- if (!info->alloc_rx_desc(ndev, q))
- goto error;
-
- priv->dirty_rx[q] = 0;
-
/* Allocate all TX descriptors. */
ring_size = sizeof(struct ravb_tx_desc) *
(priv->num_tx_ring[q] * num_tx_desc + 1);
@@ -522,6 +485,35 @@ error:
return -ENOMEM;
}
+static void ravb_csum_init_gbeth(struct net_device *ndev)
+{
+ bool tx_enable = ndev->features & NETIF_F_HW_CSUM;
+ bool rx_enable = ndev->features & NETIF_F_RXCSUM;
+
+ if (!(tx_enable || rx_enable))
+ goto done;
+
+ ravb_write(ndev, 0, CSR0);
+ if (ravb_wait(ndev, CSR0, CSR0_TPE | CSR0_RPE, 0)) {
+ netdev_err(ndev, "Timeout enabling hardware checksum\n");
+
+ if (tx_enable)
+ ndev->features &= ~NETIF_F_HW_CSUM;
+
+ if (rx_enable)
+ ndev->features &= ~NETIF_F_RXCSUM;
+ } else {
+ if (tx_enable)
+ ravb_write(ndev, CSR1_CSUM_ENABLE, CSR1);
+
+ if (rx_enable)
+ ravb_write(ndev, CSR2_CSUM_ENABLE, CSR2);
+ }
+
+done:
+ ravb_write(ndev, CSR0_TPE | CSR0_RPE, CSR0);
+}
+
static void ravb_emac_init_gbeth(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
@@ -536,7 +528,7 @@ static void ravb_emac_init_gbeth(struct net_device *ndev)
}
/* Receive frame limit set register */
- ravb_write(ndev, GBETH_RX_BUFF_MAX + ETH_FCS_LEN, RFLR);
+ ravb_write(ndev, priv->info->rx_max_frame_size + ETH_FCS_LEN, RFLR);
/* EMAC Mode: PAUSE prohibition; Duplex; TX; RX; CRC Pass Through */
ravb_write(ndev, ECMR_ZPF | ((priv->duplex > 0) ? ECMR_DM : 0) |
@@ -553,7 +545,8 @@ static void ravb_emac_init_gbeth(struct net_device *ndev)
/* E-MAC status register clear */
ravb_write(ndev, ECSR_ICD | ECSR_LCHNG | ECSR_PFRI, ECSR);
- ravb_write(ndev, CSR0_TPE | CSR0_RPE, CSR0);
+
+ ravb_csum_init_gbeth(ndev);
/* E-MAC interrupt enable register */
ravb_write(ndev, ECSIPR_ICDIP, ECSIPR);
@@ -561,8 +554,16 @@ static void ravb_emac_init_gbeth(struct net_device *ndev)
static void ravb_emac_init_rcar(struct net_device *ndev)
{
- /* Receive frame limit set register */
- ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR);
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ /* Set receive frame length
+ *
+ * The length set here describes the frame from the destination address
+ * up to and including the CRC data. However only the frame data,
+ * excluding the CRC, are transferred to memory. To allow for the
+ * largest frames add the CRC length to the maximum Rx descriptor size.
+ */
+ ravb_write(ndev, priv->info->rx_max_frame_size + ETH_FCS_LEN, RFLR);
/* EMAC Mode: PAUSE prohibition; Duplex; RX Checksum; TX; RX */
ravb_write(ndev, ECMR_ZPF | ECMR_DM |
@@ -585,6 +586,16 @@ static void ravb_emac_init_rcar(struct net_device *ndev)
ravb_write(ndev, ECSIPR_ICDIP | ECSIPR_MPDIP | ECSIPR_LCHNGIP, ECSIPR);
}
+static void ravb_emac_init_rcar_gen4(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ bool mii = priv->phy_interface == PHY_INTERFACE_MODE_MII;
+
+ ravb_modify(ndev, APSR, APSR_MIISELECT, mii ? APSR_MIISELECT : 0);
+
+ ravb_emac_init_rcar(ndev);
+}
+
/* E-MAC init function */
static void ravb_emac_init(struct net_device *ndev)
{
@@ -596,6 +607,7 @@ static void ravb_emac_init(struct net_device *ndev)
static int ravb_dmac_init_gbeth(struct net_device *ndev)
{
+ struct ravb_private *priv = netdev_priv(ndev);
int error;
error = ravb_ring_init(ndev, RAVB_BE);
@@ -609,7 +621,7 @@ static int ravb_dmac_init_gbeth(struct net_device *ndev)
ravb_write(ndev, 0x60000000, RCR);
/* Set Max Frame Length (RTC) */
- ravb_write(ndev, 0x7ffc0000 | GBETH_RX_BUFF_MAX, RTC);
+ ravb_write(ndev, 0x7ffc0000 | priv->info->rx_max_frame_size, RTC);
/* Set FIFO size */
ravb_write(ndev, 0x00222200, TGC);
@@ -734,6 +746,40 @@ static void ravb_get_tx_tstamp(struct net_device *ndev)
}
}
+static void ravb_rx_csum_gbeth(struct sk_buff *skb)
+{
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+ size_t csum_len;
+ u16 *hw_csum;
+
+ /* The hardware checksum status is contained in 4 bytes appended to
+ * packet data.
+ *
+ * For ipv4, the first 2 bytes are the ip header checksum status. We can
+ * ignore this as it will always be re-checked in inet_gro_receive().
+ *
+ * The last 2 bytes are the protocol checksum status which will be zero
+ * if the checksum has been validated.
+ */
+ csum_len = sizeof(*hw_csum) * 2;
+ if (unlikely(skb->len < csum_len))
+ return;
+
+ if (skb_is_nonlinear(skb)) {
+ skb_frag_t *last_frag = &shinfo->frags[shinfo->nr_frags - 1];
+
+ hw_csum = (u16 *)(skb_frag_address(last_frag) +
+ skb_frag_size(last_frag));
+ skb_frag_size_sub(last_frag, csum_len);
+ } else {
+ hw_csum = (u16 *)skb_tail_pointer(skb);
+ skb_trim(skb, skb->len - csum_len);
+ }
+
+ if (!get_unaligned(--hw_csum))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
static void ravb_rx_csum(struct sk_buff *skb)
{
u8 *hw_csum;
@@ -749,50 +795,38 @@ static void ravb_rx_csum(struct sk_buff *skb)
skb_trim(skb, skb->len - sizeof(__sum16));
}
-static struct sk_buff *ravb_get_skb_gbeth(struct net_device *ndev, int entry,
- struct ravb_rx_desc *desc)
-{
- struct ravb_private *priv = netdev_priv(ndev);
- struct sk_buff *skb;
-
- skb = priv->rx_skb[RAVB_BE][entry];
- priv->rx_skb[RAVB_BE][entry] = NULL;
- dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
- ALIGN(GBETH_RX_BUFF_MAX, 16), DMA_FROM_DEVICE);
-
- return skb;
-}
-
/* Packet receive function for Gigabit Ethernet */
-static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
+static int ravb_rx_gbeth(struct net_device *ndev, int budget, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
struct net_device_stats *stats;
struct ravb_rx_desc *desc;
struct sk_buff *skb;
- dma_addr_t dma_addr;
int rx_packets = 0;
u8 desc_status;
- u16 pkt_len;
+ u16 desc_len;
u8 die_dt;
int entry;
int limit;
int i;
- entry = priv->cur_rx[q] % priv->num_rx_ring[q];
limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q];
stats = &priv->stats[q];
- desc = &priv->gbeth_rx_ring[entry];
- for (i = 0; i < limit && rx_packets < *quota && desc->die_dt != DT_FEMPTY; i++) {
+ for (i = 0; i < limit; i++, priv->cur_rx[q]++) {
+ entry = priv->cur_rx[q] % priv->num_rx_ring[q];
+ desc = &priv->rx_ring[q].desc[entry];
+ if (rx_packets == budget || desc->die_dt == DT_FEMPTY)
+ break;
+
/* Descriptor type must be checked before all other reads */
dma_rmb();
desc_status = desc->msc;
- pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;
+ desc_len = le16_to_cpu(desc->ds_cc) & RX_DS;
/* We use 0-byte descriptors to mark the DMA mapping errors */
- if (!pkt_len)
+ if (!desc_len)
continue;
if (desc_status & MSC_MC)
@@ -809,114 +843,135 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
if (desc_status & MSC_CEEF)
stats->rx_missed_errors++;
} else {
+ struct ravb_rx_buffer *rx_buff;
+ void *rx_addr;
+
+ rx_buff = &priv->rx_buffers[q][entry];
+ rx_addr = page_address(rx_buff->page) + rx_buff->offset;
die_dt = desc->die_dt & 0xF0;
+ dma_sync_single_for_cpu(ndev->dev.parent,
+ le32_to_cpu(desc->dptr),
+ desc_len, DMA_FROM_DEVICE);
+
switch (die_dt) {
case DT_FSINGLE:
- skb = ravb_get_skb_gbeth(ndev, entry, desc);
- skb_put(skb, pkt_len);
- skb->protocol = eth_type_trans(skb, ndev);
- napi_gro_receive(&priv->napi[q], skb);
- rx_packets++;
- stats->rx_bytes += pkt_len;
- break;
case DT_FSTART:
- priv->rx_1st_skb = ravb_get_skb_gbeth(ndev, entry, desc);
- skb_put(priv->rx_1st_skb, pkt_len);
+ /* Start of packet: Set initial data length. */
+ skb = napi_build_skb(rx_addr,
+ info->rx_buffer_size);
+ if (unlikely(!skb)) {
+ stats->rx_errors++;
+ page_pool_put_page(priv->rx_pool[q],
+ rx_buff->page, 0,
+ true);
+ goto refill;
+ }
+ skb_mark_for_recycle(skb);
+ skb_put(skb, desc_len);
+
+ /* Save this skb if the packet spans multiple
+ * descriptors.
+ */
+ if (die_dt == DT_FSTART)
+ priv->rx_1st_skb = skb;
break;
+
case DT_FMID:
- skb = ravb_get_skb_gbeth(ndev, entry, desc);
- skb_copy_to_linear_data_offset(priv->rx_1st_skb,
- priv->rx_1st_skb->len,
- skb->data,
- pkt_len);
- skb_put(priv->rx_1st_skb, pkt_len);
- dev_kfree_skb(skb);
- break;
case DT_FEND:
- skb = ravb_get_skb_gbeth(ndev, entry, desc);
- skb_copy_to_linear_data_offset(priv->rx_1st_skb,
- priv->rx_1st_skb->len,
- skb->data,
- pkt_len);
- skb_put(priv->rx_1st_skb, pkt_len);
- dev_kfree_skb(skb);
- priv->rx_1st_skb->protocol =
- eth_type_trans(priv->rx_1st_skb, ndev);
- napi_gro_receive(&priv->napi[q],
- priv->rx_1st_skb);
+ /* Continuing a packet: Add this buffer as an RX
+ * frag.
+ */
+
+ /* rx_1st_skb will be NULL if napi_build_skb()
+ * failed for the first descriptor of a
+ * multi-descriptor packet.
+ */
+ if (unlikely(!priv->rx_1st_skb)) {
+ stats->rx_errors++;
+ page_pool_put_page(priv->rx_pool[q],
+ rx_buff->page, 0,
+ true);
+
+ /* We may find a DT_FSINGLE or DT_FSTART
+ * descriptor in the queue which we can
+ * process, so don't give up yet.
+ */
+ continue;
+ }
+ skb_add_rx_frag(priv->rx_1st_skb,
+ skb_shinfo(priv->rx_1st_skb)->nr_frags,
+ rx_buff->page, rx_buff->offset,
+ desc_len, info->rx_buffer_size);
+
+ /* Set skb to point at the whole packet so that
+ * we only need one code path for finishing a
+ * packet.
+ */
+ skb = priv->rx_1st_skb;
+ }
+
+ switch (die_dt) {
+ case DT_FSINGLE:
+ case DT_FEND:
+ /* Finishing a packet: Determine protocol &
+ * checksum, hand off to NAPI and update our
+ * stats.
+ */
+ skb->protocol = eth_type_trans(skb, ndev);
+ if (ndev->features & NETIF_F_RXCSUM)
+ ravb_rx_csum_gbeth(skb);
+ stats->rx_bytes += skb->len;
+ napi_gro_receive(&priv->napi[q], skb);
rx_packets++;
- stats->rx_bytes += pkt_len;
- break;
+
+ /* Clear rx_1st_skb so that it will only be
+ * non-NULL when valid.
+ */
+ priv->rx_1st_skb = NULL;
}
- }
- entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
- desc = &priv->gbeth_rx_ring[entry];
+ /* Mark this RX buffer as consumed. */
+ rx_buff->page = NULL;
+ }
}
+refill:
/* Refill the RX ring buffers. */
- for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
- entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
- desc = &priv->gbeth_rx_ring[entry];
- desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE);
-
- if (!priv->rx_skb[q][entry]) {
- skb = netdev_alloc_skb(ndev, info->max_rx_len);
- if (!skb)
- break;
- ravb_set_buffer_align(skb);
- dma_addr = dma_map_single(ndev->dev.parent,
- skb->data,
- GBETH_RX_BUFF_MAX,
- DMA_FROM_DEVICE);
- skb_checksum_none_assert(skb);
- /* We just set the data size to 0 for a failed mapping
- * which should prevent DMA from happening...
- */
- if (dma_mapping_error(ndev->dev.parent, dma_addr))
- desc->ds_cc = cpu_to_le16(0);
- desc->dptr = cpu_to_le32(dma_addr);
- priv->rx_skb[q][entry] = skb;
- }
- /* Descriptor type must be set after all the above writes */
- dma_wmb();
- desc->die_dt = DT_FEMPTY;
- }
+ priv->dirty_rx[q] += ravb_rx_ring_refill(ndev, q,
+ priv->cur_rx[q] - priv->dirty_rx[q],
+ GFP_ATOMIC);
stats->rx_packets += rx_packets;
- *quota -= rx_packets;
- return *quota == 0;
+ return rx_packets;
}
/* Packet receive function for Ethernet AVB */
-static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
+static int ravb_rx_rcar(struct net_device *ndev, int budget, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
- int entry = priv->cur_rx[q] % priv->num_rx_ring[q];
- int boguscnt = (priv->dirty_rx[q] + priv->num_rx_ring[q]) -
- priv->cur_rx[q];
struct net_device_stats *stats = &priv->stats[q];
struct ravb_ex_rx_desc *desc;
+ unsigned int limit, i;
struct sk_buff *skb;
- dma_addr_t dma_addr;
struct timespec64 ts;
+ int rx_packets = 0;
u8 desc_status;
u16 pkt_len;
- int limit;
+ int entry;
+
+ limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q];
+ for (i = 0; i < limit; i++, priv->cur_rx[q]++) {
+ entry = priv->cur_rx[q] % priv->num_rx_ring[q];
+ desc = &priv->rx_ring[q].ex_desc[entry];
+ if (rx_packets == budget || desc->die_dt == DT_FEMPTY)
+ break;
- boguscnt = min(boguscnt, *quota);
- limit = boguscnt;
- desc = &priv->rx_ring[q][entry];
- while (desc->die_dt != DT_FEMPTY) {
/* Descriptor type must be checked before all other reads */
dma_rmb();
desc_status = desc->msc;
pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;
- if (--boguscnt < 0)
- break;
-
/* We use 0-byte descriptors to mark the DMA mapping errors */
if (!pkt_len)
continue;
@@ -937,12 +992,23 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
stats->rx_missed_errors++;
} else {
u32 get_ts = priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE;
-
- skb = priv->rx_skb[q][entry];
- priv->rx_skb[q][entry] = NULL;
- dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
- RX_BUF_SZ,
- DMA_FROM_DEVICE);
+ struct ravb_rx_buffer *rx_buff;
+ void *rx_addr;
+
+ rx_buff = &priv->rx_buffers[q][entry];
+ rx_addr = page_address(rx_buff->page) + rx_buff->offset;
+ dma_sync_single_for_cpu(ndev->dev.parent,
+ le32_to_cpu(desc->dptr),
+ pkt_len, DMA_FROM_DEVICE);
+
+ skb = napi_build_skb(rx_addr, info->rx_buffer_size);
+ if (unlikely(!skb)) {
+ stats->rx_errors++;
+ page_pool_put_page(priv->rx_pool[q],
+ rx_buff->page, 0, true);
+ break;
+ }
+ skb_mark_for_recycle(skb);
get_ts &= (q == RAVB_NC) ?
RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
@@ -962,54 +1028,30 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
if (ndev->features & NETIF_F_RXCSUM)
ravb_rx_csum(skb);
napi_gro_receive(&priv->napi[q], skb);
- stats->rx_packets++;
+ rx_packets++;
stats->rx_bytes += pkt_len;
- }
-
- entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
- desc = &priv->rx_ring[q][entry];
- }
- /* Refill the RX ring buffers. */
- for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
- entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
- desc = &priv->rx_ring[q][entry];
- desc->ds_cc = cpu_to_le16(RX_BUF_SZ);
-
- if (!priv->rx_skb[q][entry]) {
- skb = netdev_alloc_skb(ndev, info->max_rx_len);
- if (!skb)
- break; /* Better luck next round. */
- ravb_set_buffer_align(skb);
- dma_addr = dma_map_single(ndev->dev.parent, skb->data,
- le16_to_cpu(desc->ds_cc),
- DMA_FROM_DEVICE);
- skb_checksum_none_assert(skb);
- /* We just set the data size to 0 for a failed mapping
- * which should prevent DMA from happening...
- */
- if (dma_mapping_error(ndev->dev.parent, dma_addr))
- desc->ds_cc = cpu_to_le16(0);
- desc->dptr = cpu_to_le32(dma_addr);
- priv->rx_skb[q][entry] = skb;
+ /* Mark this RX buffer as consumed. */
+ rx_buff->page = NULL;
}
- /* Descriptor type must be set after all the above writes */
- dma_wmb();
- desc->die_dt = DT_FEMPTY;
}
- *quota -= limit - (++boguscnt);
+ /* Refill the RX ring buffers. */
+ priv->dirty_rx[q] += ravb_rx_ring_refill(ndev, q,
+ priv->cur_rx[q] - priv->dirty_rx[q],
+ GFP_ATOMIC);
- return boguscnt <= 0;
+ stats->rx_packets += rx_packets;
+ return rx_packets;
}
/* Packet receive function for Ethernet AVB */
-static bool ravb_rx(struct net_device *ndev, int *quota, int q)
+static int ravb_rx(struct net_device *ndev, int budget, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
- return info->receive(ndev, quota, q);
+ return info->receive(ndev, budget, q);
}
static void ravb_rcv_snd_disable(struct net_device *ndev)
@@ -1088,11 +1130,23 @@ static irqreturn_t ravb_emac_interrupt(int irq, void *dev_id)
{
struct net_device *ndev = dev_id;
struct ravb_private *priv = netdev_priv(ndev);
+ struct device *dev = &priv->pdev->dev;
+ irqreturn_t result = IRQ_HANDLED;
+
+ pm_runtime_get_noresume(dev);
+
+ if (unlikely(!pm_runtime_active(dev))) {
+ result = IRQ_NONE;
+ goto out_rpm_put;
+ }
spin_lock(&priv->lock);
ravb_emac_interrupt_unlocked(ndev);
spin_unlock(&priv->lock);
- return IRQ_HANDLED;
+
+out_rpm_put:
+ pm_runtime_put_noidle(dev);
+ return result;
}
/* Error interrupt handler */
@@ -1172,9 +1226,15 @@ static irqreturn_t ravb_interrupt(int irq, void *dev_id)
struct net_device *ndev = dev_id;
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
+ struct device *dev = &priv->pdev->dev;
irqreturn_t result = IRQ_NONE;
u32 iss;
+ pm_runtime_get_noresume(dev);
+
+ if (unlikely(!pm_runtime_active(dev)))
+ goto out_rpm_put;
+
spin_lock(&priv->lock);
/* Get interrupt status */
iss = ravb_read(ndev, ISS);
@@ -1218,6 +1278,9 @@ static irqreturn_t ravb_interrupt(int irq, void *dev_id)
}
spin_unlock(&priv->lock);
+
+out_rpm_put:
+ pm_runtime_put_noidle(dev);
return result;
}
@@ -1226,9 +1289,15 @@ static irqreturn_t ravb_multi_interrupt(int irq, void *dev_id)
{
struct net_device *ndev = dev_id;
struct ravb_private *priv = netdev_priv(ndev);
+ struct device *dev = &priv->pdev->dev;
irqreturn_t result = IRQ_NONE;
u32 iss;
+ pm_runtime_get_noresume(dev);
+
+ if (unlikely(!pm_runtime_active(dev)))
+ goto out_rpm_put;
+
spin_lock(&priv->lock);
/* Get interrupt status */
iss = ravb_read(ndev, ISS);
@@ -1250,6 +1319,9 @@ static irqreturn_t ravb_multi_interrupt(int irq, void *dev_id)
}
spin_unlock(&priv->lock);
+
+out_rpm_put:
+ pm_runtime_put_noidle(dev);
return result;
}
@@ -1257,8 +1329,14 @@ static irqreturn_t ravb_dma_interrupt(int irq, void *dev_id, int q)
{
struct net_device *ndev = dev_id;
struct ravb_private *priv = netdev_priv(ndev);
+ struct device *dev = &priv->pdev->dev;
irqreturn_t result = IRQ_NONE;
+ pm_runtime_get_noresume(dev);
+
+ if (unlikely(!pm_runtime_active(dev)))
+ goto out_rpm_put;
+
spin_lock(&priv->lock);
/* Network control/Best effort queue RX/TX */
@@ -1266,6 +1344,9 @@ static irqreturn_t ravb_dma_interrupt(int irq, void *dev_id, int q)
result = IRQ_HANDLED;
spin_unlock(&priv->lock);
+
+out_rpm_put:
+ pm_runtime_put_noidle(dev);
return result;
}
@@ -1284,25 +1365,15 @@ static int ravb_poll(struct napi_struct *napi, int budget)
struct net_device *ndev = napi->dev;
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
- bool gptp = info->gptp || info->ccc_gac;
- struct ravb_rx_desc *desc;
unsigned long flags;
int q = napi - priv->napi;
int mask = BIT(q);
- int quota = budget;
- unsigned int entry;
+ int work_done;
- if (!gptp) {
- entry = priv->cur_rx[q] % priv->num_rx_ring[q];
- desc = &priv->gbeth_rx_ring[entry];
- }
/* Processing RX Descriptor Ring */
/* Clear RX interrupt */
ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
- if (gptp || desc->die_dt != DT_FEMPTY) {
- if (ravb_rx(ndev, &quota, q))
- goto out;
- }
+ work_done = ravb_rx(ndev, budget, q);
/* Processing TX Descriptor Ring */
spin_lock_irqsave(&priv->lock, flags);
@@ -1312,29 +1383,29 @@ static int ravb_poll(struct napi_struct *napi, int budget)
netif_wake_subqueue(ndev, q);
spin_unlock_irqrestore(&priv->lock, flags);
- napi_complete(napi);
-
- /* Re-enable RX/TX interrupts */
- spin_lock_irqsave(&priv->lock, flags);
- if (!info->irq_en_dis) {
- ravb_modify(ndev, RIC0, mask, mask);
- ravb_modify(ndev, TIC, mask, mask);
- } else {
- ravb_write(ndev, mask, RIE0);
- ravb_write(ndev, mask, TIE);
- }
- spin_unlock_irqrestore(&priv->lock, flags);
-
/* Receive error message handling */
- priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors;
+ priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors;
if (info->nc_queues)
priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
if (priv->rx_over_errors != ndev->stats.rx_over_errors)
ndev->stats.rx_over_errors = priv->rx_over_errors;
if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors)
ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
-out:
- return budget - quota;
+
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
+ /* Re-enable RX/TX interrupts */
+ spin_lock_irqsave(&priv->lock, flags);
+ if (!info->irq_en_dis) {
+ ravb_modify(ndev, RIC0, mask, mask);
+ ravb_modify(ndev, TIC, mask, mask);
+ } else {
+ ravb_write(ndev, mask, RIE0);
+ ravb_write(ndev, mask, TIE);
+ }
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+
+ return work_done;
}
static void ravb_set_duplex_gbeth(struct net_device *ndev)
@@ -1669,25 +1740,24 @@ static int ravb_set_ringparam(struct net_device *ndev,
}
static int ravb_get_ts_info(struct net_device *ndev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *hw_info = priv->info;
- info->so_timestamping =
- SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
- SOF_TIMESTAMPING_TX_HARDWARE |
- SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_RAW_HARDWARE;
- info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
- info->rx_filters =
- (1 << HWTSTAMP_FILTER_NONE) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
- (1 << HWTSTAMP_FILTER_ALL);
- if (hw_info->gptp || hw_info->ccc_gac)
+ if (hw_info->gptp || hw_info->ccc_gac) {
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+ info->rx_filters =
+ (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ (1 << HWTSTAMP_FILTER_ALL);
info->phc_index = ptp_clock_index(priv->ptp.clock);
+ }
return 0;
}
@@ -1732,89 +1802,159 @@ static const struct ethtool_ops ravb_ethtool_ops = {
.set_wol = ravb_set_wol,
};
-static inline int ravb_hook_irq(unsigned int irq, irq_handler_t handler,
- struct net_device *ndev, struct device *dev,
- const char *ch)
+static int ravb_set_config_mode(struct net_device *ndev)
{
- char *name;
+ struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
int error;
- name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s", ndev->name, ch);
- if (!name)
- return -ENOMEM;
- error = request_irq(irq, handler, 0, name, ndev);
- if (error)
- netdev_err(ndev, "cannot request IRQ %s\n", name);
+ if (info->gptp) {
+ error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
+ if (error)
+ return error;
+ /* Set CSEL value */
+ ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB);
+ } else if (info->ccc_gac) {
+ error = ravb_set_opmode(ndev, CCC_OPC_CONFIG | CCC_GAC | CCC_CSEL_HPB);
+ } else {
+ error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
+ }
return error;
}
+static void ravb_set_gti(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
+
+ if (!(info->gptp || info->ccc_gac))
+ return;
+
+ ravb_write(ndev, priv->gti_tiv, GTI);
+
+ /* Request GTI loading */
+ ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
+}
+
+static int ravb_compute_gti(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
+ struct device *dev = ndev->dev.parent;
+ unsigned long rate;
+ u64 inc;
+
+ if (!(info->gptp || info->ccc_gac))
+ return 0;
+
+ if (info->gptp_ref_clk)
+ rate = clk_get_rate(priv->gptp_clk);
+ else
+ rate = clk_get_rate(priv->clk);
+ if (!rate)
+ return -EINVAL;
+
+ inc = div64_ul(1000000000ULL << 20, rate);
+
+ if (inc < GTI_TIV_MIN || inc > GTI_TIV_MAX) {
+ dev_err(dev, "gti.tiv increment 0x%llx is outside the range 0x%x - 0x%x\n",
+ inc, GTI_TIV_MIN, GTI_TIV_MAX);
+ return -EINVAL;
+ }
+ priv->gti_tiv = inc;
+
+ return 0;
+}
+
+/* Set tx and rx clock internal delay modes */
+static void ravb_parse_delay_mode(struct device_node *np, struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ bool explicit_delay = false;
+ u32 delay;
+
+ if (!priv->info->internal_delay)
+ return;
+
+ if (!of_property_read_u32(np, "rx-internal-delay-ps", &delay)) {
+ /* Valid values are 0 and 1800, according to DT bindings */
+ priv->rxcidm = !!delay;
+ explicit_delay = true;
+ }
+ if (!of_property_read_u32(np, "tx-internal-delay-ps", &delay)) {
+ /* Valid values are 0 and 2000, according to DT bindings */
+ priv->txcidm = !!delay;
+ explicit_delay = true;
+ }
+
+ if (explicit_delay)
+ return;
+
+ /* Fall back to legacy rgmii-*id behavior */
+ if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ priv->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) {
+ priv->rxcidm = 1;
+ priv->rgmii_override = 1;
+ }
+
+ if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) {
+ priv->txcidm = 1;
+ priv->rgmii_override = 1;
+ }
+}
+
+static void ravb_set_delay_mode(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ u32 set = 0;
+
+ if (!priv->info->internal_delay)
+ return;
+
+ if (priv->rxcidm)
+ set |= APSR_RDM;
+ if (priv->txcidm)
+ set |= APSR_TDM;
+ ravb_modify(ndev, APSR, APSR_RDM | APSR_TDM, set);
+}
+
/* Network device open function for Ethernet AVB */
static int ravb_open(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
- struct platform_device *pdev = priv->pdev;
- struct device *dev = &pdev->dev;
+ struct device *dev = &priv->pdev->dev;
int error;
napi_enable(&priv->napi[RAVB_BE]);
if (info->nc_queues)
napi_enable(&priv->napi[RAVB_NC]);
- if (!info->multi_irqs) {
- error = request_irq(ndev->irq, ravb_interrupt, IRQF_SHARED,
- ndev->name, ndev);
- if (error) {
- netdev_err(ndev, "cannot request IRQ\n");
- goto out_napi_off;
- }
- } else {
- error = ravb_hook_irq(ndev->irq, ravb_multi_interrupt, ndev,
- dev, "ch22:multi");
- if (error)
- goto out_napi_off;
- error = ravb_hook_irq(priv->emac_irq, ravb_emac_interrupt, ndev,
- dev, "ch24:emac");
- if (error)
- goto out_free_irq;
- error = ravb_hook_irq(priv->rx_irqs[RAVB_BE], ravb_be_interrupt,
- ndev, dev, "ch0:rx_be");
- if (error)
- goto out_free_irq_emac;
- error = ravb_hook_irq(priv->tx_irqs[RAVB_BE], ravb_be_interrupt,
- ndev, dev, "ch18:tx_be");
- if (error)
- goto out_free_irq_be_rx;
- error = ravb_hook_irq(priv->rx_irqs[RAVB_NC], ravb_nc_interrupt,
- ndev, dev, "ch1:rx_nc");
- if (error)
- goto out_free_irq_be_tx;
- error = ravb_hook_irq(priv->tx_irqs[RAVB_NC], ravb_nc_interrupt,
- ndev, dev, "ch19:tx_nc");
- if (error)
- goto out_free_irq_nc_rx;
-
- if (info->err_mgmt_irqs) {
- error = ravb_hook_irq(priv->erra_irq, ravb_multi_interrupt,
- ndev, dev, "err_a");
- if (error)
- goto out_free_irq_nc_tx;
- error = ravb_hook_irq(priv->mgmta_irq, ravb_multi_interrupt,
- ndev, dev, "mgmt_a");
- if (error)
- goto out_free_irq_erra;
- }
- }
+ error = pm_runtime_resume_and_get(dev);
+ if (error < 0)
+ goto out_napi_off;
+
+ /* Set AVB config mode */
+ error = ravb_set_config_mode(ndev);
+ if (error)
+ goto out_rpm_put;
+
+ ravb_set_delay_mode(ndev);
+ ravb_write(ndev, priv->desc_bat_dma, DBAT);
/* Device init */
error = ravb_dmac_init(ndev);
if (error)
- goto out_free_irq_mgmta;
+ goto out_set_reset;
+
ravb_emac_init(ndev);
+ ravb_set_gti(ndev);
+
/* Initialise PTP Clock driver */
- if (info->gptp)
+ if (info->gptp || info->ccc_gac)
ravb_ptp_init(ndev, priv->pdev);
/* PHY control start */
@@ -1828,29 +1968,14 @@ static int ravb_open(struct net_device *ndev)
out_ptp_stop:
/* Stop PTP Clock driver */
- if (info->gptp)
+ if (info->gptp || info->ccc_gac)
ravb_ptp_stop(ndev);
ravb_stop_dma(ndev);
-out_free_irq_mgmta:
- if (!info->multi_irqs)
- goto out_free_irq;
- if (info->err_mgmt_irqs)
- free_irq(priv->mgmta_irq, ndev);
-out_free_irq_erra:
- if (info->err_mgmt_irqs)
- free_irq(priv->erra_irq, ndev);
-out_free_irq_nc_tx:
- free_irq(priv->tx_irqs[RAVB_NC], ndev);
-out_free_irq_nc_rx:
- free_irq(priv->rx_irqs[RAVB_NC], ndev);
-out_free_irq_be_tx:
- free_irq(priv->tx_irqs[RAVB_BE], ndev);
-out_free_irq_be_rx:
- free_irq(priv->rx_irqs[RAVB_BE], ndev);
-out_free_irq_emac:
- free_irq(priv->emac_irq, ndev);
-out_free_irq:
- free_irq(ndev->irq, ndev);
+out_set_reset:
+ ravb_set_opmode(ndev, CCC_OPC_RESET);
+out_rpm_put:
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
out_napi_off:
if (info->nc_queues)
napi_disable(&priv->napi[RAVB_NC]);
@@ -1935,6 +2060,48 @@ out_unlock:
rtnl_unlock();
}
+static bool ravb_can_tx_csum_gbeth(struct sk_buff *skb)
+{
+ u16 net_protocol = ntohs(skb->protocol);
+ u8 inner_protocol;
+
+ /* GbEth IP can calculate the checksum if:
+ * - there are zero or one VLAN headers with TPID=0x8100
+ * - the network protocol is IPv4 or IPv6
+ * - the transport protocol is TCP, UDP or ICMP
+ * - the packet is not fragmented
+ */
+
+ if (net_protocol == ETH_P_8021Q) {
+ struct vlan_hdr vhdr, *vh;
+
+ vh = skb_header_pointer(skb, ETH_HLEN, sizeof(vhdr), &vhdr);
+ if (!vh)
+ return false;
+
+ net_protocol = ntohs(vh->h_vlan_encapsulated_proto);
+ }
+
+ switch (net_protocol) {
+ case ETH_P_IP:
+ inner_protocol = ip_hdr(skb)->protocol;
+ break;
+ case ETH_P_IPV6:
+ inner_protocol = ipv6_hdr(skb)->nexthdr;
+ break;
+ default:
+ return false;
+ }
+
+ switch (inner_protocol) {
+ case IPPROTO_TCP:
+ case IPPROTO_UDP:
+ return true;
+ default:
+ return false;
+ }
+}
+
/* Packet transmit function for Ethernet AVB */
static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
@@ -1950,6 +2117,9 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
u32 entry;
u32 len;
+ if (skb->ip_summed == CHECKSUM_PARTIAL && !ravb_can_tx_csum_gbeth(skb))
+ skb_checksum_help(skb);
+
spin_lock_irqsave(&priv->lock, flags);
if (priv->cur_tx[q] - priv->dirty_tx[q] > (priv->num_tx_ring[q] - 1) *
num_tx_desc) {
@@ -2084,8 +2254,15 @@ static struct net_device_stats *ravb_get_stats(struct net_device *ndev)
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
struct net_device_stats *nstats, *stats0, *stats1;
+ struct device *dev = &priv->pdev->dev;
nstats = &ndev->stats;
+
+ pm_runtime_get_noresume(dev);
+
+ if (!pm_runtime_active(dev))
+ goto out_rpm_put;
+
stats0 = &priv->stats[RAVB_BE];
if (info->tx_counters) {
@@ -2127,6 +2304,8 @@ static struct net_device_stats *ravb_get_stats(struct net_device *ndev)
nstats->rx_over_errors += stats1->rx_over_errors;
}
+out_rpm_put:
+ pm_runtime_put_noidle(dev);
return nstats;
}
@@ -2149,6 +2328,8 @@ static int ravb_close(struct net_device *ndev)
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
struct ravb_tstamp_skb *ts_skb, *ts_skb2;
+ struct device *dev = &priv->pdev->dev;
+ int error;
netif_tx_stop_all_queues(ndev);
@@ -2157,8 +2338,16 @@ static int ravb_close(struct net_device *ndev)
ravb_write(ndev, 0, RIC2);
ravb_write(ndev, 0, TIC);
+ /* PHY disconnect */
+ if (ndev->phydev) {
+ phy_stop(ndev->phydev);
+ phy_disconnect(ndev->phydev);
+ if (of_phy_is_fixed_link(np))
+ of_phy_deregister_fixed_link(np);
+ }
+
/* Stop PTP Clock driver */
- if (info->gptp)
+ if (info->gptp || info->ccc_gac)
ravb_ptp_stop(ndev);
/* Set the config mode to stop the AVB-DMAC's processes */
@@ -2175,29 +2364,8 @@ static int ravb_close(struct net_device *ndev)
}
}
- /* PHY disconnect */
- if (ndev->phydev) {
- phy_stop(ndev->phydev);
- phy_disconnect(ndev->phydev);
- if (of_phy_is_fixed_link(np))
- of_phy_deregister_fixed_link(np);
- }
-
cancel_work_sync(&priv->work);
- if (info->multi_irqs) {
- free_irq(priv->tx_irqs[RAVB_NC], ndev);
- free_irq(priv->rx_irqs[RAVB_NC], ndev);
- free_irq(priv->tx_irqs[RAVB_BE], ndev);
- free_irq(priv->rx_irqs[RAVB_BE], ndev);
- free_irq(priv->emac_irq, ndev);
- if (info->err_mgmt_irqs) {
- free_irq(priv->erra_irq, ndev);
- free_irq(priv->mgmta_irq, ndev);
- }
- }
- free_irq(ndev->irq, ndev);
-
if (info->nc_queues)
napi_disable(&priv->napi[RAVB_NC]);
napi_disable(&priv->napi[RAVB_BE]);
@@ -2207,6 +2375,17 @@ static int ravb_close(struct net_device *ndev)
if (info->nc_queues)
ravb_ring_free(ndev, RAVB_NC);
+ /* Update statistics. */
+ ravb_get_stats(ndev);
+
+ /* Set reset mode. */
+ error = ravb_set_opmode(ndev, CCC_OPC_RESET);
+ if (error)
+ return error;
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
return 0;
}
@@ -2299,7 +2478,7 @@ static int ravb_change_mtu(struct net_device *ndev, int new_mtu)
{
struct ravb_private *priv = netdev_priv(ndev);
- ndev->mtu = new_mtu;
+ WRITE_ONCE(ndev->mtu, new_mtu);
if (netif_running(ndev)) {
synchronize_irq(priv->emac_irq);
@@ -2330,11 +2509,58 @@ static void ravb_set_rx_csum(struct net_device *ndev, bool enable)
spin_unlock_irqrestore(&priv->lock, flags);
}
+static int ravb_endisable_csum_gbeth(struct net_device *ndev, enum ravb_reg reg,
+ u32 val, u32 mask)
+{
+ u32 csr0 = CSR0_TPE | CSR0_RPE;
+ int ret;
+
+ ravb_write(ndev, csr0 & ~mask, CSR0);
+ ret = ravb_wait(ndev, CSR0, mask, 0);
+ if (!ret)
+ ravb_write(ndev, val, reg);
+
+ ravb_write(ndev, csr0, CSR0);
+
+ return ret;
+}
+
static int ravb_set_features_gbeth(struct net_device *ndev,
netdev_features_t features)
{
- /* Place holder */
- return 0;
+ netdev_features_t changed = ndev->features ^ features;
+ struct ravb_private *priv = netdev_priv(ndev);
+ unsigned long flags;
+ int ret = 0;
+ u32 val;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ if (changed & NETIF_F_RXCSUM) {
+ if (features & NETIF_F_RXCSUM)
+ val = CSR2_CSUM_ENABLE;
+ else
+ val = 0;
+
+ ret = ravb_endisable_csum_gbeth(ndev, CSR2, val, CSR0_RPE);
+ if (ret)
+ goto done;
+ }
+
+ if (changed & NETIF_F_HW_CSUM) {
+ if (features & NETIF_F_HW_CSUM)
+ val = CSR1_CSUM_ENABLE;
+ else
+ val = 0;
+
+ ret = ravb_endisable_csum_gbeth(ndev, CSR1, val, CSR0_TPE);
+ if (ret)
+ goto done;
+ }
+
+done:
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return ret;
}
static int ravb_set_features_rcar(struct net_device *ndev,
@@ -2345,8 +2571,6 @@ static int ravb_set_features_rcar(struct net_device *ndev,
if (changed & NETIF_F_RXCSUM)
ravb_set_rx_csum(ndev, features & NETIF_F_RXCSUM);
- ndev->features = features;
-
return 0;
}
@@ -2355,8 +2579,24 @@ static int ravb_set_features(struct net_device *ndev,
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
+ struct device *dev = &priv->pdev->dev;
+ int ret;
+
+ pm_runtime_get_noresume(dev);
+
+ if (pm_runtime_active(dev))
+ ret = info->set_feature(ndev, features);
+ else
+ ret = 0;
- return info->set_feature(ndev, features);
+ pm_runtime_put_noidle(dev);
+
+ if (ret)
+ return ret;
+
+ ndev->features = features;
+
+ return 0;
}
static const struct net_device_ops ravb_netdev_ops = {
@@ -2379,6 +2619,7 @@ static int ravb_mdio_init(struct ravb_private *priv)
{
struct platform_device *pdev = priv->pdev;
struct device *dev = &pdev->dev;
+ struct device_node *mdio_node;
struct phy_device *phydev;
struct device_node *pn;
int error;
@@ -2398,7 +2639,13 @@ static int ravb_mdio_init(struct ravb_private *priv)
pdev->name, pdev->id);
/* Register MDIO bus */
- error = of_mdiobus_register(priv->mii_bus, dev->of_node);
+ mdio_node = of_get_child_by_name(dev->of_node, "mdio");
+ if (!mdio_node) {
+ /* backwards compatibility for DT lacking mdio subnode */
+ mdio_node = of_node_get(dev->of_node);
+ }
+ error = of_mdiobus_register(priv->mii_bus, mdio_node);
+ of_node_put(mdio_node);
if (error)
goto out_free_bus;
@@ -2429,10 +2676,30 @@ static int ravb_mdio_release(struct ravb_private *priv)
return 0;
}
+static const struct ravb_hw_info ravb_gen2_hw_info = {
+ .receive = ravb_rx_rcar,
+ .set_rate = ravb_set_rate_rcar,
+ .set_feature = ravb_set_features_rcar,
+ .dmac_init = ravb_dmac_init_rcar,
+ .emac_init = ravb_emac_init_rcar,
+ .gstrings_stats = ravb_gstrings_stats,
+ .gstrings_size = sizeof(ravb_gstrings_stats),
+ .net_hw_features = NETIF_F_RXCSUM,
+ .net_features = NETIF_F_RXCSUM,
+ .stats_len = ARRAY_SIZE(ravb_gstrings_stats),
+ .tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
+ .tx_max_frame_size = SZ_2K,
+ .rx_max_frame_size = SZ_2K,
+ .rx_buffer_size = SZ_2K +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
+ .rx_desc_size = sizeof(struct ravb_ex_rx_desc),
+ .aligned_tx = 1,
+ .gptp = 1,
+ .nc_queues = 1,
+ .magic_pkt = 1,
+};
+
static const struct ravb_hw_info ravb_gen3_hw_info = {
- .rx_ring_free = ravb_rx_ring_free_rcar,
- .rx_ring_format = ravb_rx_ring_format_rcar,
- .alloc_rx_desc = ravb_alloc_rx_desc_rcar,
.receive = ravb_rx_rcar,
.set_rate = ravb_set_rate_rcar,
.set_feature = ravb_set_features_rcar,
@@ -2443,9 +2710,12 @@ static const struct ravb_hw_info ravb_gen3_hw_info = {
.net_hw_features = NETIF_F_RXCSUM,
.net_features = NETIF_F_RXCSUM,
.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
- .max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1,
.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
- .rx_max_buf_size = SZ_2K,
+ .tx_max_frame_size = SZ_2K,
+ .rx_max_frame_size = SZ_2K,
+ .rx_buffer_size = SZ_2K +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
+ .rx_desc_size = sizeof(struct ravb_ex_rx_desc),
.internal_delay = 1,
.tx_counters = 1,
.multi_irqs = 1,
@@ -2455,33 +2725,33 @@ static const struct ravb_hw_info ravb_gen3_hw_info = {
.magic_pkt = 1,
};
-static const struct ravb_hw_info ravb_gen2_hw_info = {
- .rx_ring_free = ravb_rx_ring_free_rcar,
- .rx_ring_format = ravb_rx_ring_format_rcar,
- .alloc_rx_desc = ravb_alloc_rx_desc_rcar,
+static const struct ravb_hw_info ravb_gen4_hw_info = {
.receive = ravb_rx_rcar,
.set_rate = ravb_set_rate_rcar,
.set_feature = ravb_set_features_rcar,
.dmac_init = ravb_dmac_init_rcar,
- .emac_init = ravb_emac_init_rcar,
+ .emac_init = ravb_emac_init_rcar_gen4,
.gstrings_stats = ravb_gstrings_stats,
.gstrings_size = sizeof(ravb_gstrings_stats),
.net_hw_features = NETIF_F_RXCSUM,
.net_features = NETIF_F_RXCSUM,
.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
- .max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1,
.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
- .rx_max_buf_size = SZ_2K,
- .aligned_tx = 1,
- .gptp = 1,
+ .tx_max_frame_size = SZ_2K,
+ .rx_max_frame_size = SZ_2K,
+ .rx_buffer_size = SZ_2K +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
+ .rx_desc_size = sizeof(struct ravb_ex_rx_desc),
+ .internal_delay = 1,
+ .tx_counters = 1,
+ .multi_irqs = 1,
+ .irq_en_dis = 1,
+ .ccc_gac = 1,
.nc_queues = 1,
.magic_pkt = 1,
};
static const struct ravb_hw_info ravb_rzv2m_hw_info = {
- .rx_ring_free = ravb_rx_ring_free_rcar,
- .rx_ring_format = ravb_rx_ring_format_rcar,
- .alloc_rx_desc = ravb_alloc_rx_desc_rcar,
.receive = ravb_rx_rcar,
.set_rate = ravb_set_rate_rcar,
.set_feature = ravb_set_features_rcar,
@@ -2492,9 +2762,12 @@ static const struct ravb_hw_info ravb_rzv2m_hw_info = {
.net_hw_features = NETIF_F_RXCSUM,
.net_features = NETIF_F_RXCSUM,
.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
- .max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1,
.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
- .rx_max_buf_size = SZ_2K,
+ .tx_max_frame_size = SZ_2K,
+ .rx_max_frame_size = SZ_2K,
+ .rx_buffer_size = SZ_2K +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
+ .rx_desc_size = sizeof(struct ravb_ex_rx_desc),
.multi_irqs = 1,
.err_mgmt_irqs = 1,
.gptp = 1,
@@ -2504,9 +2777,6 @@ static const struct ravb_hw_info ravb_rzv2m_hw_info = {
};
static const struct ravb_hw_info gbeth_hw_info = {
- .rx_ring_free = ravb_rx_ring_free_gbeth,
- .rx_ring_format = ravb_rx_ring_format_gbeth,
- .alloc_rx_desc = ravb_alloc_rx_desc_gbeth,
.receive = ravb_rx_gbeth,
.set_rate = ravb_set_rate_gbeth,
.set_feature = ravb_set_features_gbeth,
@@ -2514,11 +2784,17 @@ static const struct ravb_hw_info gbeth_hw_info = {
.emac_init = ravb_emac_init_gbeth,
.gstrings_stats = ravb_gstrings_stats_gbeth,
.gstrings_size = sizeof(ravb_gstrings_stats_gbeth),
+ .net_hw_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM,
+ .net_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM,
+ .vlan_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM,
.stats_len = ARRAY_SIZE(ravb_gstrings_stats_gbeth),
- .max_rx_len = ALIGN(GBETH_RX_BUFF_MAX, RAVB_ALIGN),
.tccr_mask = TCCR_TSRQ0,
- .rx_max_buf_size = SZ_8K,
+ .tx_max_frame_size = 1522,
+ .rx_max_frame_size = SZ_8K,
+ .rx_buffer_size = SZ_2K,
+ .rx_desc_size = sizeof(struct ravb_rx_desc),
.aligned_tx = 1,
+ .coalesce_irqs = 1,
.tx_counters = 1,
.carrier_counters = 1,
.half_duplex = 1,
@@ -2530,107 +2806,97 @@ static const struct of_device_id ravb_match_table[] = {
{ .compatible = "renesas,etheravb-rcar-gen2", .data = &ravb_gen2_hw_info },
{ .compatible = "renesas,etheravb-r8a7795", .data = &ravb_gen3_hw_info },
{ .compatible = "renesas,etheravb-rcar-gen3", .data = &ravb_gen3_hw_info },
- { .compatible = "renesas,etheravb-rcar-gen4", .data = &ravb_gen3_hw_info },
+ { .compatible = "renesas,etheravb-rcar-gen4", .data = &ravb_gen4_hw_info },
{ .compatible = "renesas,etheravb-rzv2m", .data = &ravb_rzv2m_hw_info },
{ .compatible = "renesas,rzg2l-gbeth", .data = &gbeth_hw_info },
{ }
};
MODULE_DEVICE_TABLE(of, ravb_match_table);
-static int ravb_set_gti(struct net_device *ndev)
+static int ravb_setup_irq(struct ravb_private *priv, const char *irq_name,
+ const char *ch, int *irq, irq_handler_t handler)
{
- struct ravb_private *priv = netdev_priv(ndev);
- const struct ravb_hw_info *info = priv->info;
- struct device *dev = ndev->dev.parent;
- unsigned long rate;
- uint64_t inc;
-
- if (info->gptp_ref_clk)
- rate = clk_get_rate(priv->gptp_clk);
- else
- rate = clk_get_rate(priv->clk);
- if (!rate)
- return -EINVAL;
+ struct platform_device *pdev = priv->pdev;
+ struct net_device *ndev = priv->ndev;
+ struct device *dev = &pdev->dev;
+ const char *devname = dev_name(dev);
+ unsigned long flags;
+ int error, irq_num;
- inc = div64_ul(1000000000ULL << 20, rate);
+ if (irq_name) {
+ devname = devm_kasprintf(dev, GFP_KERNEL, "%s:%s", devname, ch);
+ if (!devname)
+ return -ENOMEM;
- if (inc < GTI_TIV_MIN || inc > GTI_TIV_MAX) {
- dev_err(dev, "gti.tiv increment 0x%llx is outside the range 0x%x - 0x%x\n",
- inc, GTI_TIV_MIN, GTI_TIV_MAX);
- return -EINVAL;
+ irq_num = platform_get_irq_byname(pdev, irq_name);
+ flags = 0;
+ } else {
+ irq_num = platform_get_irq(pdev, 0);
+ flags = IRQF_SHARED;
}
+ if (irq_num < 0)
+ return irq_num;
- ravb_write(ndev, inc, GTI);
+ if (irq)
+ *irq = irq_num;
- return 0;
+ error = devm_request_irq(dev, irq_num, handler, flags, devname, ndev);
+ if (error)
+ netdev_err(ndev, "cannot request IRQ %s\n", devname);
+
+ return error;
}
-static int ravb_set_config_mode(struct net_device *ndev)
+static int ravb_setup_irqs(struct ravb_private *priv)
{
- struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
+ struct net_device *ndev = priv->ndev;
+ const char *irq_name, *emac_irq_name;
int error;
- if (info->gptp) {
- error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
- if (error)
- return error;
- /* Set CSEL value */
- ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB);
- } else if (info->ccc_gac) {
- error = ravb_set_opmode(ndev, CCC_OPC_CONFIG | CCC_GAC | CCC_CSEL_HPB);
+ if (!info->multi_irqs)
+ return ravb_setup_irq(priv, NULL, NULL, &ndev->irq, ravb_interrupt);
+
+ if (info->err_mgmt_irqs) {
+ irq_name = "dia";
+ emac_irq_name = "line3";
} else {
- error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
+ irq_name = "ch22";
+ emac_irq_name = "ch24";
}
- return error;
-}
-
-/* Set tx and rx clock internal delay modes */
-static void ravb_parse_delay_mode(struct device_node *np, struct net_device *ndev)
-{
- struct ravb_private *priv = netdev_priv(ndev);
- bool explicit_delay = false;
- u32 delay;
+ error = ravb_setup_irq(priv, irq_name, "ch22:multi", &ndev->irq, ravb_multi_interrupt);
+ if (error)
+ return error;
- if (!of_property_read_u32(np, "rx-internal-delay-ps", &delay)) {
- /* Valid values are 0 and 1800, according to DT bindings */
- priv->rxcidm = !!delay;
- explicit_delay = true;
- }
- if (!of_property_read_u32(np, "tx-internal-delay-ps", &delay)) {
- /* Valid values are 0 and 2000, according to DT bindings */
- priv->txcidm = !!delay;
- explicit_delay = true;
- }
+ error = ravb_setup_irq(priv, emac_irq_name, "ch24:emac", &priv->emac_irq,
+ ravb_emac_interrupt);
+ if (error)
+ return error;
- if (explicit_delay)
- return;
+ if (info->err_mgmt_irqs) {
+ error = ravb_setup_irq(priv, "err_a", "err_a", NULL, ravb_multi_interrupt);
+ if (error)
+ return error;
- /* Fall back to legacy rgmii-*id behavior */
- if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
- priv->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) {
- priv->rxcidm = 1;
- priv->rgmii_override = 1;
+ error = ravb_setup_irq(priv, "mgmt_a", "mgmt_a", NULL, ravb_multi_interrupt);
+ if (error)
+ return error;
}
- if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
- priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) {
- priv->txcidm = 1;
- priv->rgmii_override = 1;
- }
-}
+ error = ravb_setup_irq(priv, "ch0", "ch0:rx_be", NULL, ravb_be_interrupt);
+ if (error)
+ return error;
-static void ravb_set_delay_mode(struct net_device *ndev)
-{
- struct ravb_private *priv = netdev_priv(ndev);
- u32 set = 0;
+ error = ravb_setup_irq(priv, "ch1", "ch1:rx_nc", NULL, ravb_nc_interrupt);
+ if (error)
+ return error;
- if (priv->rxcidm)
- set |= APSR_RDM;
- if (priv->txcidm)
- set |= APSR_TDM;
- ravb_modify(ndev, APSR, APSR_RDM | APSR_TDM, set);
+ error = ravb_setup_irq(priv, "ch18", "ch18:tx_be", NULL, ravb_be_interrupt);
+ if (error)
+ return error;
+
+ return ravb_setup_irq(priv, "ch19", "ch19:tx_nc", NULL, ravb_nc_interrupt);
}
static int ravb_probe(struct platform_device *pdev)
@@ -2640,9 +2906,8 @@ static int ravb_probe(struct platform_device *pdev)
struct reset_control *rstc;
struct ravb_private *priv;
struct net_device *ndev;
- int error, irq, q;
struct resource *res;
- int i;
+ int error, q;
if (!np) {
dev_err(&pdev->dev,
@@ -2650,7 +2915,7 @@ static int ravb_probe(struct platform_device *pdev)
return -EINVAL;
}
- rstc = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL);
+ rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
if (IS_ERR(rstc))
return dev_err_probe(&pdev->dev, PTR_ERR(rstc),
"failed to get cpg reset\n");
@@ -2664,30 +2929,12 @@ static int ravb_probe(struct platform_device *pdev)
ndev->features = info->net_features;
ndev->hw_features = info->net_hw_features;
+ ndev->vlan_features = info->vlan_features;
error = reset_control_deassert(rstc);
if (error)
goto out_free_netdev;
- pm_runtime_enable(&pdev->dev);
- error = pm_runtime_resume_and_get(&pdev->dev);
- if (error < 0)
- goto out_rpm_disable;
-
- if (info->multi_irqs) {
- if (info->err_mgmt_irqs)
- irq = platform_get_irq_byname(pdev, "dia");
- else
- irq = platform_get_irq_byname(pdev, "ch22");
- } else {
- irq = platform_get_irq(pdev, 0);
- }
- if (irq < 0) {
- error = irq;
- goto out_release;
- }
- ndev->irq = irq;
-
SET_NETDEV_DEV(ndev, &pdev->dev);
priv = netdev_priv(ndev);
@@ -2702,10 +2949,43 @@ static int ravb_probe(struct platform_device *pdev)
priv->num_rx_ring[RAVB_NC] = NC_RX_RING_SIZE;
}
+ error = ravb_setup_irqs(priv);
+ if (error)
+ goto out_reset_assert;
+
+ priv->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ error = PTR_ERR(priv->clk);
+ goto out_reset_assert;
+ }
+
+ if (info->gptp_ref_clk) {
+ priv->gptp_clk = devm_clk_get(&pdev->dev, "gptp");
+ if (IS_ERR(priv->gptp_clk)) {
+ error = PTR_ERR(priv->gptp_clk);
+ goto out_reset_assert;
+ }
+ }
+
+ priv->refclk = devm_clk_get_optional(&pdev->dev, "refclk");
+ if (IS_ERR(priv->refclk)) {
+ error = PTR_ERR(priv->refclk);
+ goto out_reset_assert;
+ }
+ clk_prepare(priv->refclk);
+
+ platform_set_drvdata(pdev, ndev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 100);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ error = pm_runtime_resume_and_get(&pdev->dev);
+ if (error < 0)
+ goto out_rpm_disable;
+
priv->addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(priv->addr)) {
error = PTR_ERR(priv->addr);
- goto out_release;
+ goto out_rpm_put;
}
/* The Ether-specific entries in the device structure. */
@@ -2716,79 +2996,14 @@ static int ravb_probe(struct platform_device *pdev)
error = of_get_phy_mode(np, &priv->phy_interface);
if (error && error != -ENODEV)
- goto out_release;
+ goto out_rpm_put;
priv->no_avb_link = of_property_read_bool(np, "renesas,no-ether-link");
priv->avb_link_active_low =
of_property_read_bool(np, "renesas,ether-link-active-low");
- if (info->multi_irqs) {
- if (info->err_mgmt_irqs)
- irq = platform_get_irq_byname(pdev, "line3");
- else
- irq = platform_get_irq_byname(pdev, "ch24");
- if (irq < 0) {
- error = irq;
- goto out_release;
- }
- priv->emac_irq = irq;
- for (i = 0; i < NUM_RX_QUEUE; i++) {
- irq = platform_get_irq_byname(pdev, ravb_rx_irqs[i]);
- if (irq < 0) {
- error = irq;
- goto out_release;
- }
- priv->rx_irqs[i] = irq;
- }
- for (i = 0; i < NUM_TX_QUEUE; i++) {
- irq = platform_get_irq_byname(pdev, ravb_tx_irqs[i]);
- if (irq < 0) {
- error = irq;
- goto out_release;
- }
- priv->tx_irqs[i] = irq;
- }
-
- if (info->err_mgmt_irqs) {
- irq = platform_get_irq_byname(pdev, "err_a");
- if (irq < 0) {
- error = irq;
- goto out_release;
- }
- priv->erra_irq = irq;
-
- irq = platform_get_irq_byname(pdev, "mgmt_a");
- if (irq < 0) {
- error = irq;
- goto out_release;
- }
- priv->mgmta_irq = irq;
- }
- }
-
- priv->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(priv->clk)) {
- error = PTR_ERR(priv->clk);
- goto out_release;
- }
-
- priv->refclk = devm_clk_get_optional(&pdev->dev, "refclk");
- if (IS_ERR(priv->refclk)) {
- error = PTR_ERR(priv->refclk);
- goto out_release;
- }
- clk_prepare_enable(priv->refclk);
-
- if (info->gptp_ref_clk) {
- priv->gptp_clk = devm_clk_get(&pdev->dev, "gptp");
- if (IS_ERR(priv->gptp_clk)) {
- error = PTR_ERR(priv->gptp_clk);
- goto out_disable_refclk;
- }
- clk_prepare_enable(priv->gptp_clk);
- }
-
- ndev->max_mtu = info->rx_max_buf_size - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
+ ndev->max_mtu = info->tx_max_frame_size -
+ (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
ndev->min_mtu = ETH_MIN_MTU;
/* FIXME: R-Car Gen2 has 4byte alignment restriction for tx buffer
@@ -2802,25 +3017,11 @@ static int ravb_probe(struct platform_device *pdev)
ndev->netdev_ops = &ravb_netdev_ops;
ndev->ethtool_ops = &ravb_ethtool_ops;
- /* Set AVB config mode */
- error = ravb_set_config_mode(ndev);
+ error = ravb_compute_gti(ndev);
if (error)
- goto out_disable_gptp_clk;
+ goto out_rpm_put;
- if (info->gptp || info->ccc_gac) {
- /* Set GTI value */
- error = ravb_set_gti(ndev);
- if (error)
- goto out_disable_gptp_clk;
-
- /* Request GTI loading */
- ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
- }
-
- if (info->internal_delay) {
- ravb_parse_delay_mode(np, ndev);
- ravb_set_delay_mode(ndev);
- }
+ ravb_parse_delay_mode(np, ndev);
/* Allocate descriptor base address table */
priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM;
@@ -2831,22 +3032,22 @@ static int ravb_probe(struct platform_device *pdev)
"Cannot allocate desc base address table (size %d bytes)\n",
priv->desc_bat_size);
error = -ENOMEM;
- goto out_disable_gptp_clk;
+ goto out_rpm_put;
}
for (q = RAVB_BE; q < DBAT_ENTRY_NUM; q++)
priv->desc_bat[q].die_dt = DT_EOS;
- ravb_write(ndev, priv->desc_bat_dma, DBAT);
/* Initialise HW timestamp list */
INIT_LIST_HEAD(&priv->ts_skb_list);
- /* Initialise PTP Clock driver */
- if (info->ccc_gac)
- ravb_ptp_init(ndev, pdev);
-
/* Debug message level */
priv->msg_enable = RAVB_DEF_MSG_ENABLE;
+ /* Set config mode as this is needed for PHY initialization. */
+ error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
+ if (error)
+ goto out_rpm_put;
+
/* Read and set MAC address */
ravb_read_mac_address(np, ndev);
if (!is_valid_ether_addr(ndev->dev_addr)) {
@@ -2859,13 +3060,24 @@ static int ravb_probe(struct platform_device *pdev)
error = ravb_mdio_init(priv);
if (error) {
dev_err(&pdev->dev, "failed to initialize MDIO\n");
- goto out_dma_free;
+ goto out_reset_mode;
}
+ /* Undo previous switch to config opmode. */
+ error = ravb_set_opmode(ndev, CCC_OPC_RESET);
+ if (error)
+ goto out_mdio_release;
+
netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll);
if (info->nc_queues)
netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll);
+ if (info->coalesce_irqs) {
+ netdev_sw_irq_coalesce_default_on(ndev);
+ if (num_present_cpus() == 1)
+ dev_set_threaded(ndev, true);
+ }
+
/* Network device register */
error = register_netdev(ndev);
if (error)
@@ -2877,7 +3089,8 @@ static int ravb_probe(struct platform_device *pdev)
netdev_info(ndev, "Base address at %#x, %pM, IRQ %d.\n",
(u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
- platform_set_drvdata(pdev, ndev);
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
return 0;
@@ -2886,22 +3099,19 @@ out_napi_del:
netif_napi_del(&priv->napi[RAVB_NC]);
netif_napi_del(&priv->napi[RAVB_BE]);
+out_mdio_release:
ravb_mdio_release(priv);
-out_dma_free:
+out_reset_mode:
+ ravb_set_opmode(ndev, CCC_OPC_RESET);
dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
priv->desc_bat_dma);
-
- /* Stop PTP Clock driver */
- if (info->ccc_gac)
- ravb_ptp_stop(ndev);
-out_disable_gptp_clk:
- clk_disable_unprepare(priv->gptp_clk);
-out_disable_refclk:
- clk_disable_unprepare(priv->refclk);
-out_release:
+out_rpm_put:
pm_runtime_put(&pdev->dev);
out_rpm_disable:
pm_runtime_disable(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ clk_unprepare(priv->refclk);
+out_reset_assert:
reset_control_assert(rstc);
out_free_netdev:
free_netdev(ndev);
@@ -2913,6 +3123,12 @@ static void ravb_remove(struct platform_device *pdev)
struct net_device *ndev = platform_get_drvdata(pdev);
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
+ struct device *dev = &priv->pdev->dev;
+ int error;
+
+ error = pm_runtime_resume_and_get(dev);
+ if (error < 0)
+ return;
unregister_netdev(ndev);
if (info->nc_queues)
@@ -2921,20 +3137,13 @@ static void ravb_remove(struct platform_device *pdev)
ravb_mdio_release(priv);
- /* Stop PTP Clock driver */
- if (info->ccc_gac)
- ravb_ptp_stop(ndev);
-
dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
priv->desc_bat_dma);
- ravb_set_opmode(ndev, CCC_OPC_RESET);
-
- clk_disable_unprepare(priv->gptp_clk);
- clk_disable_unprepare(priv->refclk);
-
- pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_put_sync_suspend(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(dev);
+ clk_unprepare(priv->refclk);
reset_control_assert(priv->rstc);
free_netdev(ndev);
platform_set_drvdata(pdev, NULL);
@@ -2960,6 +3169,9 @@ static int ravb_wol_setup(struct net_device *ndev)
/* Enable MagicPacket */
ravb_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE);
+ if (priv->info->ccc_gac)
+ ravb_ptp_stop(ndev);
+
return enable_irq_wake(priv->emac_irq);
}
@@ -2967,6 +3179,20 @@ static int ravb_wol_restore(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
+ int error;
+
+ /* Set reset mode to rearm the WoL logic. */
+ error = ravb_set_opmode(ndev, CCC_OPC_RESET);
+ if (error)
+ return error;
+
+ /* Set AVB config mode. */
+ error = ravb_set_config_mode(ndev);
+ if (error)
+ return error;
+
+ if (priv->info->ccc_gac)
+ ravb_ptp_init(ndev, priv->pdev);
if (info->nc_queues)
napi_enable(&priv->napi[RAVB_NC]);
@@ -2980,110 +3206,110 @@ static int ravb_wol_restore(struct net_device *ndev)
return disable_irq_wake(priv->emac_irq);
}
-static int __maybe_unused ravb_suspend(struct device *dev)
+static int ravb_suspend(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct ravb_private *priv = netdev_priv(ndev);
int ret;
if (!netif_running(ndev))
- return 0;
+ goto reset_assert;
netif_device_detach(ndev);
- if (priv->wol_enabled)
+ rtnl_lock();
+ if (priv->wol_enabled) {
ret = ravb_wol_setup(ndev);
- else
- ret = ravb_close(ndev);
+ rtnl_unlock();
+ return ret;
+ }
- if (priv->info->ccc_gac)
- ravb_ptp_stop(ndev);
+ ret = ravb_close(ndev);
+ rtnl_unlock();
+ if (ret)
+ return ret;
- return ret;
+ ret = pm_runtime_force_suspend(&priv->pdev->dev);
+ if (ret)
+ return ret;
+
+reset_assert:
+ return reset_control_assert(priv->rstc);
}
-static int __maybe_unused ravb_resume(struct device *dev)
+static int ravb_resume(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct ravb_private *priv = netdev_priv(ndev);
- const struct ravb_hw_info *info = priv->info;
- int ret = 0;
-
- /* If WoL is enabled set reset mode to rearm the WoL logic */
- if (priv->wol_enabled) {
- ret = ravb_set_opmode(ndev, CCC_OPC_RESET);
- if (ret)
- return ret;
- }
-
- /* All register have been reset to default values.
- * Restore all registers which where setup at probe time and
- * reopen device if it was running before system suspended.
- */
+ int ret;
- /* Set AVB config mode */
- ret = ravb_set_config_mode(ndev);
+ ret = reset_control_deassert(priv->rstc);
if (ret)
return ret;
- if (info->gptp || info->ccc_gac) {
- /* Set GTI value */
- ret = ravb_set_gti(ndev);
- if (ret)
- return ret;
+ if (!netif_running(ndev))
+ return 0;
- /* Request GTI loading */
- ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
+ rtnl_lock();
+ /* If WoL is enabled restore the interface. */
+ if (priv->wol_enabled)
+ ret = ravb_wol_restore(ndev);
+ else
+ ret = pm_runtime_force_resume(dev);
+ if (ret) {
+ rtnl_unlock();
+ return ret;
}
- if (info->internal_delay)
- ravb_set_delay_mode(ndev);
+ /* Reopening the interface will restore the device to the working state. */
+ ret = ravb_open(ndev);
+ rtnl_unlock();
+ if (ret < 0)
+ goto out_rpm_put;
- /* Restore descriptor base address table */
- ravb_write(ndev, priv->desc_bat_dma, DBAT);
+ ravb_set_rx_mode(ndev);
+ netif_device_attach(ndev);
- if (priv->info->ccc_gac)
- ravb_ptp_init(ndev, priv->pdev);
+ return 0;
- if (netif_running(ndev)) {
- if (priv->wol_enabled) {
- ret = ravb_wol_restore(ndev);
- if (ret)
- return ret;
- }
- ret = ravb_open(ndev);
- if (ret < 0)
- return ret;
- ravb_set_rx_mode(ndev);
- netif_device_attach(ndev);
+out_rpm_put:
+ if (!priv->wol_enabled) {
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
}
return ret;
}
-static int __maybe_unused ravb_runtime_nop(struct device *dev)
+static int ravb_runtime_suspend(struct device *dev)
{
- /* Runtime PM callback shared between ->runtime_suspend()
- * and ->runtime_resume(). Simply returns success.
- *
- * This driver re-initializes all registers after
- * pm_runtime_get_sync() anyway so there is no need
- * to save and restore registers here.
- */
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ clk_disable(priv->refclk);
+
return 0;
}
+static int ravb_runtime_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ return clk_enable(priv->refclk);
+}
+
static const struct dev_pm_ops ravb_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(ravb_suspend, ravb_resume)
- SET_RUNTIME_PM_OPS(ravb_runtime_nop, ravb_runtime_nop, NULL)
+ SYSTEM_SLEEP_PM_OPS(ravb_suspend, ravb_resume)
+ RUNTIME_PM_OPS(ravb_runtime_suspend, ravb_runtime_resume, NULL)
};
static struct platform_driver ravb_driver = {
.probe = ravb_probe,
- .remove_new = ravb_remove,
+ .remove = ravb_remove,
.driver = {
.name = "ravb",
- .pm = &ravb_dev_pm_ops,
+ .pm = pm_ptr(&ravb_dev_pm_ops),
.of_match_table = ravb_match_table,
},
};
diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c
index dcab638c57fe..84d09a8973b7 100644
--- a/drivers/net/ethernet/renesas/rswitch.c
+++ b/drivers/net/ethernet/renesas/rswitch.c
@@ -111,25 +111,35 @@ static void rswitch_top_init(struct rswitch_private *priv)
/* Forwarding engine block (MFWD) */
static void rswitch_fwd_init(struct rswitch_private *priv)
{
+ u32 all_ports_mask = GENMASK(RSWITCH_NUM_AGENTS - 1, 0);
unsigned int i;
- /* For ETHA */
- for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
- iowrite32(FWPC0_DEFAULT, priv->addr + FWPC0(i));
+ /* Start with empty configuration */
+ for (i = 0; i < RSWITCH_NUM_AGENTS; i++) {
+ /* Disable all port features */
+ iowrite32(0, priv->addr + FWPC0(i));
+ /* Disallow L3 forwarding and direct descriptor forwarding */
+ iowrite32(FIELD_PREP(FWCP1_LTHFW, all_ports_mask),
+ priv->addr + FWPC1(i));
+ /* Disallow L2 forwarding */
+ iowrite32(FIELD_PREP(FWCP2_LTWFW, all_ports_mask),
+ priv->addr + FWPC2(i));
+ /* Disallow port based forwarding */
iowrite32(0, priv->addr + FWPBFC(i));
}
- for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
+ /* For enabled ETHA ports, setup port based forwarding */
+ rswitch_for_each_enabled_port(priv, i) {
+ /* Port based forwarding from port i to GWCA port */
+ rswitch_modify(priv->addr, FWPBFC(i), FWPBFC_PBDV,
+ FIELD_PREP(FWPBFC_PBDV, BIT(priv->gwca.index)));
+ /* Within GWCA port, forward to Rx queue for port i */
iowrite32(priv->rdev[i]->rx_queue->index,
priv->addr + FWPBFCSDC(GWCA_INDEX, i));
- iowrite32(BIT(priv->gwca.index), priv->addr + FWPBFC(i));
}
- /* For GWCA */
- iowrite32(FWPC0_DEFAULT, priv->addr + FWPC0(priv->gwca.index));
- iowrite32(FWPC1_DDE, priv->addr + FWPC1(priv->gwca.index));
- iowrite32(0, priv->addr + FWPBFC(priv->gwca.index));
- iowrite32(GENMASK(RSWITCH_NUM_PORTS - 1, 0), priv->addr + FWPBFC(priv->gwca.index));
+ /* For GWCA port, allow direct descriptor forwarding */
+ rswitch_modify(priv->addr, FWPC1(priv->gwca.index), FWPC1_DDE, FWPC1_DDE);
}
/* Gateway CPU agent block (GWCA) */
@@ -547,7 +557,6 @@ static int rswitch_gwca_ts_queue_alloc(struct rswitch_private *priv)
desc = &gq->ts_ring[gq->ring_size];
desc->desc.die_dt = DT_LINKFIX;
rswitch_desc_set_dptr(&desc->desc, gq->ring_dma);
- INIT_LIST_HEAD(&priv->gwca.ts_info_list);
return 0;
}
@@ -862,24 +871,24 @@ static void rswitch_tx_free(struct net_device *ndev)
struct rswitch_ext_desc *desc;
struct sk_buff *skb;
- for (; rswitch_get_num_cur_queues(gq) > 0;
- gq->dirty = rswitch_next_queue_index(gq, false, 1)) {
- desc = &gq->tx_ring[gq->dirty];
- if ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY)
- break;
-
+ desc = &gq->tx_ring[gq->dirty];
+ while ((desc->desc.die_dt & DT_MASK) == DT_FEMPTY) {
dma_rmb();
+
skb = gq->skbs[gq->dirty];
if (skb) {
+ rdev->ndev->stats.tx_packets++;
+ rdev->ndev->stats.tx_bytes += skb->len;
dma_unmap_single(ndev->dev.parent,
gq->unmap_addrs[gq->dirty],
skb->len, DMA_TO_DEVICE);
dev_kfree_skb_any(gq->skbs[gq->dirty]);
gq->skbs[gq->dirty] = NULL;
- rdev->ndev->stats.tx_packets++;
- rdev->ndev->stats.tx_bytes += skb->len;
}
+
desc->desc.die_dt = DT_EEMPTY;
+ gq->dirty = rswitch_next_queue_index(gq, false, 1);
+ desc = &gq->tx_ring[gq->dirty];
}
}
@@ -908,8 +917,10 @@ retry:
if (napi_complete_done(napi, budget - quota)) {
spin_lock_irqsave(&priv->lock, flags);
- rswitch_enadis_data_irq(priv, rdev->tx_queue->index, true);
- rswitch_enadis_data_irq(priv, rdev->rx_queue->index, true);
+ if (test_bit(rdev->port, priv->opened_ports)) {
+ rswitch_enadis_data_irq(priv, rdev->tx_queue->index, true);
+ rswitch_enadis_data_irq(priv, rdev->rx_queue->index, true);
+ }
spin_unlock_irqrestore(&priv->lock, flags);
}
@@ -1001,9 +1012,10 @@ static int rswitch_gwca_request_irqs(struct rswitch_private *priv)
static void rswitch_ts(struct rswitch_private *priv)
{
struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
- struct rswitch_gwca_ts_info *ts_info, *ts_info2;
struct skb_shared_hwtstamps shhwtstamps;
struct rswitch_ts_desc *desc;
+ struct rswitch_device *rdev;
+ struct sk_buff *ts_skb;
struct timespec64 ts;
unsigned int num;
u32 tag, port;
@@ -1013,23 +1025,28 @@ static void rswitch_ts(struct rswitch_private *priv)
dma_rmb();
port = TS_DESC_DPN(__le32_to_cpu(desc->desc.dptrl));
- tag = TS_DESC_TSUN(__le32_to_cpu(desc->desc.dptrl));
-
- list_for_each_entry_safe(ts_info, ts_info2, &priv->gwca.ts_info_list, list) {
- if (!(ts_info->port == port && ts_info->tag == tag))
- continue;
-
- memset(&shhwtstamps, 0, sizeof(shhwtstamps));
- ts.tv_sec = __le32_to_cpu(desc->ts_sec);
- ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff));
- shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
- skb_tstamp_tx(ts_info->skb, &shhwtstamps);
- dev_consume_skb_irq(ts_info->skb);
- list_del(&ts_info->list);
- kfree(ts_info);
- break;
- }
+ if (unlikely(port >= RSWITCH_NUM_PORTS))
+ goto next;
+ rdev = priv->rdev[port];
+ tag = TS_DESC_TSUN(__le32_to_cpu(desc->desc.dptrl));
+ if (unlikely(tag >= TS_TAGS_PER_PORT))
+ goto next;
+ ts_skb = xchg(&rdev->ts_skb[tag], NULL);
+ smp_mb(); /* order rdev->ts_skb[] read before bitmap update */
+ clear_bit(tag, rdev->ts_skb_used);
+
+ if (unlikely(!ts_skb))
+ goto next;
+
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ ts.tv_sec = __le32_to_cpu(desc->ts_sec);
+ ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff));
+ shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
+ skb_tstamp_tx(ts_skb, &shhwtstamps);
+ dev_consume_skb_irq(ts_skb);
+
+next:
gq->cur = rswitch_next_queue_index(gq, true, 1);
desc = &gq->ts_ring[gq->cur];
}
@@ -1114,32 +1131,47 @@ static int rswitch_etha_wait_link_verification(struct rswitch_etha *etha)
static void rswitch_rmac_setting(struct rswitch_etha *etha, const u8 *mac)
{
- u32 val;
+ u32 pis, lsc;
rswitch_etha_write_mac_address(etha, mac);
+ switch (etha->phy_interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ pis = MPIC_PIS_GMII;
+ break;
+ case PHY_INTERFACE_MODE_USXGMII:
+ case PHY_INTERFACE_MODE_5GBASER:
+ pis = MPIC_PIS_XGMII;
+ break;
+ default:
+ pis = FIELD_GET(MPIC_PIS, ioread32(etha->addr + MPIC));
+ break;
+ }
+
switch (etha->speed) {
case 100:
- val = MPIC_LSC_100M;
+ lsc = MPIC_LSC_100M;
break;
case 1000:
- val = MPIC_LSC_1G;
+ lsc = MPIC_LSC_1G;
break;
case 2500:
- val = MPIC_LSC_2_5G;
+ lsc = MPIC_LSC_2_5G;
break;
default:
- return;
+ lsc = FIELD_GET(MPIC_LSC, ioread32(etha->addr + MPIC));
+ break;
}
- iowrite32(MPIC_PIS_GMII | val, etha->addr + MPIC);
+ rswitch_modify(etha->addr, MPIC, MPIC_PIS | MPIC_LSC,
+ FIELD_PREP(MPIC_PIS, pis) | FIELD_PREP(MPIC_LSC, lsc));
}
static void rswitch_etha_enable_mii(struct rswitch_etha *etha)
{
- rswitch_modify(etha->addr, MPIC, MPIC_PSMCS_MASK | MPIC_PSMHT_MASK,
- MPIC_PSMCS(etha->psmcs) | MPIC_PSMHT(0x06));
- rswitch_modify(etha->addr, MPSM, 0, MPSM_MFF_C45);
+ rswitch_modify(etha->addr, MPIC, MPIC_PSMCS | MPIC_PSMHT,
+ FIELD_PREP(MPIC_PSMCS, etha->psmcs) |
+ FIELD_PREP(MPIC_PSMHT, 0x06));
}
static int rswitch_etha_hw_init(struct rswitch_etha *etha, const u8 *mac)
@@ -1168,42 +1200,29 @@ static int rswitch_etha_hw_init(struct rswitch_etha *etha, const u8 *mac)
return rswitch_etha_change_mode(etha, EAMC_OPC_OPERATION);
}
-static int rswitch_etha_set_access(struct rswitch_etha *etha, bool read,
- int phyad, int devad, int regad, int data)
+static int rswitch_etha_mpsm_op(struct rswitch_etha *etha, bool read,
+ unsigned int mmf, unsigned int pda,
+ unsigned int pra, unsigned int pop,
+ unsigned int prd)
{
- int pop = read ? MDIO_READ_C45 : MDIO_WRITE_C45;
u32 val;
int ret;
- if (devad == 0xffffffff)
- return -ENODEV;
-
- writel(MMIS1_CLEAR_FLAGS, etha->addr + MMIS1);
-
- val = MPSM_PSME | MPSM_MFF_C45;
- iowrite32((regad << 16) | (devad << 8) | (phyad << 3) | val, etha->addr + MPSM);
+ val = MPSM_PSME |
+ FIELD_PREP(MPSM_MFF, mmf) |
+ FIELD_PREP(MPSM_PDA, pda) |
+ FIELD_PREP(MPSM_PRA, pra) |
+ FIELD_PREP(MPSM_POP, pop) |
+ FIELD_PREP(MPSM_PRD, prd);
+ iowrite32(val, etha->addr + MPSM);
- ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PAACS, MMIS1_PAACS);
+ ret = rswitch_reg_wait(etha->addr, MPSM, MPSM_PSME, 0);
if (ret)
return ret;
- rswitch_modify(etha->addr, MMIS1, MMIS1_PAACS, MMIS1_PAACS);
-
if (read) {
- writel((pop << 13) | (devad << 8) | (phyad << 3) | val, etha->addr + MPSM);
-
- ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PRACS, MMIS1_PRACS);
- if (ret)
- return ret;
-
- ret = (ioread32(etha->addr + MPSM) & MPSM_PRD_MASK) >> 16;
-
- rswitch_modify(etha->addr, MMIS1, MMIS1_PRACS, MMIS1_PRACS);
- } else {
- iowrite32((data << 16) | (pop << 13) | (devad << 8) | (phyad << 3) | val,
- etha->addr + MPSM);
-
- ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PWACS, MMIS1_PWACS);
+ val = ioread32(etha->addr + MPSM);
+ ret = FIELD_GET(MPSM_PRD, val);
}
return ret;
@@ -1213,16 +1232,47 @@ static int rswitch_etha_mii_read_c45(struct mii_bus *bus, int addr, int devad,
int regad)
{
struct rswitch_etha *etha = bus->priv;
+ int ret;
+
+ ret = rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C45, addr, devad,
+ MPSM_POP_ADDRESS, regad);
+ if (ret)
+ return ret;
- return rswitch_etha_set_access(etha, true, addr, devad, regad, 0);
+ return rswitch_etha_mpsm_op(etha, true, MPSM_MMF_C45, addr, devad,
+ MPSM_POP_READ_C45, 0);
}
static int rswitch_etha_mii_write_c45(struct mii_bus *bus, int addr, int devad,
int regad, u16 val)
{
struct rswitch_etha *etha = bus->priv;
+ int ret;
- return rswitch_etha_set_access(etha, false, addr, devad, regad, val);
+ ret = rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C45, addr, devad,
+ MPSM_POP_ADDRESS, regad);
+ if (ret)
+ return ret;
+
+ return rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C45, addr, devad,
+ MPSM_POP_WRITE, val);
+}
+
+static int rswitch_etha_mii_read_c22(struct mii_bus *bus, int phyad, int regad)
+{
+ struct rswitch_etha *etha = bus->priv;
+
+ return rswitch_etha_mpsm_op(etha, true, MPSM_MMF_C22, phyad, regad,
+ MPSM_POP_READ_C22, 0);
+}
+
+static int rswitch_etha_mii_write_c22(struct mii_bus *bus, int phyad,
+ int regad, u16 val)
+{
+ struct rswitch_etha *etha = bus->priv;
+
+ return rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C22, phyad, regad,
+ MPSM_POP_WRITE, val);
}
/* Call of_node_put(port) after done */
@@ -1307,6 +1357,8 @@ static int rswitch_mii_register(struct rswitch_device *rdev)
mii_bus->priv = rdev->etha;
mii_bus->read_c45 = rswitch_etha_mii_read_c45;
mii_bus->write_c45 = rswitch_etha_mii_write_c45;
+ mii_bus->read = rswitch_etha_mii_read_c22;
+ mii_bus->write = rswitch_etha_mii_write_c22;
mii_bus->parent = &rdev->priv->pdev->dev;
mdio_np = of_get_child_by_name(rdev->np_port, "mdio");
@@ -1527,7 +1579,7 @@ static void rswitch_ether_port_deinit_all(struct rswitch_private *priv)
{
unsigned int i;
- for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
+ rswitch_for_each_enabled_port(priv, i) {
phy_exit(priv->rdev[i]->serdes);
rswitch_ether_port_deinit_one(priv->rdev[i]);
}
@@ -1538,20 +1590,20 @@ static int rswitch_open(struct net_device *ndev)
struct rswitch_device *rdev = netdev_priv(ndev);
unsigned long flags;
- phy_start(ndev->phydev);
+ if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS))
+ iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDIE);
napi_enable(&rdev->napi);
- netif_start_queue(ndev);
spin_lock_irqsave(&rdev->priv->lock, flags);
+ bitmap_set(rdev->priv->opened_ports, rdev->port, 1);
rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, true);
rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, true);
spin_unlock_irqrestore(&rdev->priv->lock, flags);
- if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS))
- iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDIE);
+ phy_start(ndev->phydev);
- bitmap_set(rdev->priv->opened_ports, rdev->port, 1);
+ netif_start_queue(ndev);
return 0;
};
@@ -1559,31 +1611,34 @@ static int rswitch_open(struct net_device *ndev)
static int rswitch_stop(struct net_device *ndev)
{
struct rswitch_device *rdev = netdev_priv(ndev);
- struct rswitch_gwca_ts_info *ts_info, *ts_info2;
+ struct sk_buff *ts_skb;
unsigned long flags;
+ unsigned int tag;
netif_tx_stop_all_queues(ndev);
- bitmap_clear(rdev->priv->opened_ports, rdev->port, 1);
-
- if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS))
- iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDID);
- list_for_each_entry_safe(ts_info, ts_info2, &rdev->priv->gwca.ts_info_list, list) {
- if (ts_info->port != rdev->port)
- continue;
- dev_kfree_skb_irq(ts_info->skb);
- list_del(&ts_info->list);
- kfree(ts_info);
- }
+ phy_stop(ndev->phydev);
spin_lock_irqsave(&rdev->priv->lock, flags);
rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false);
rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false);
+ bitmap_clear(rdev->priv->opened_ports, rdev->port, 1);
spin_unlock_irqrestore(&rdev->priv->lock, flags);
- phy_stop(ndev->phydev);
napi_disable(&rdev->napi);
+ if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS))
+ iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDID);
+
+ for (tag = find_first_bit(rdev->ts_skb_used, TS_TAGS_PER_PORT);
+ tag < TS_TAGS_PER_PORT;
+ tag = find_next_bit(rdev->ts_skb_used, TS_TAGS_PER_PORT, tag + 1)) {
+ ts_skb = xchg(&rdev->ts_skb[tag], NULL);
+ clear_bit(tag, rdev->ts_skb_used);
+ if (ts_skb)
+ dev_kfree_skb(ts_skb);
+ }
+
return 0;
};
@@ -1594,20 +1649,17 @@ static bool rswitch_ext_desc_set_info1(struct rswitch_device *rdev,
desc->info1 = cpu_to_le64(INFO1_DV(BIT(rdev->etha->index)) |
INFO1_IPV(GWCA_IPV_NUM) | INFO1_FMT);
if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
- struct rswitch_gwca_ts_info *ts_info;
+ unsigned int tag;
- ts_info = kzalloc(sizeof(*ts_info), GFP_ATOMIC);
- if (!ts_info)
+ tag = find_first_zero_bit(rdev->ts_skb_used, TS_TAGS_PER_PORT);
+ if (tag == TS_TAGS_PER_PORT)
return false;
+ smp_mb(); /* order bitmap read before rdev->ts_skb[] write */
+ rdev->ts_skb[tag] = skb_get(skb);
+ set_bit(tag, rdev->ts_skb_used);
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- rdev->ts_tag++;
- desc->info1 |= cpu_to_le64(INFO1_TSUN(rdev->ts_tag) | INFO1_TXC);
-
- ts_info->skb = skb_get(skb);
- ts_info->port = rdev->port;
- ts_info->tag = rdev->ts_tag;
- list_add_tail(&ts_info->list, &rdev->priv->gwca.ts_info_list);
+ desc->info1 |= cpu_to_le64(INFO1_TSUN(tag) | INFO1_TXC);
skb_tx_timestamp(skb);
}
@@ -1681,8 +1733,11 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd
if (dma_mapping_error(ndev->dev.parent, dma_addr_orig))
goto err_kfree;
- gq->skbs[gq->cur] = skb;
- gq->unmap_addrs[gq->cur] = dma_addr_orig;
+ /* Stored the skb at the last descriptor to avoid skb free before hardware completes send */
+ gq->skbs[(gq->cur + nr_desc - 1) % gq->ring_size] = skb;
+ gq->unmap_addrs[(gq->cur + nr_desc - 1) % gq->ring_size] = dma_addr_orig;
+
+ dma_wmb();
/* DT_FSTART should be set at last. So, this is reverse order. */
for (i = nr_desc; i-- > 0; ) {
@@ -1694,14 +1749,13 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd
goto err_unmap;
}
- wmb(); /* gq->cur must be incremented after die_dt was set */
-
gq->cur = rswitch_next_queue_index(gq, true, nr_desc);
rswitch_modify(rdev->addr, GWTRC(gq->index), 0, BIT(gq->index % 32));
return ret;
err_unmap:
+ gq->skbs[(gq->cur + nr_desc - 1) % gq->ring_size] = NULL;
dma_unmap_single(ndev->dev.parent, dma_addr_orig, skb->len, DMA_TO_DEVICE);
err_kfree:
@@ -1809,14 +1863,12 @@ static const struct net_device_ops rswitch_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
};
-static int rswitch_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info)
+static int rswitch_get_ts_info(struct net_device *ndev, struct kernel_ethtool_ts_info *info)
{
struct rswitch_device *rdev = netdev_priv(ndev);
info->phc_index = ptp_clock_index(rdev->priv->ptp_priv->clock);
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
@@ -1891,7 +1943,6 @@ static int rswitch_device_alloc(struct rswitch_private *priv, unsigned int index
rdev->np_port = rswitch_get_port_node(rdev);
rdev->disabled = !rdev->np_port;
err = of_get_ethdev_address(rdev->np_port, ndev);
- of_node_put(rdev->np_port);
if (err) {
if (is_valid_ether_addr(rdev->etha->mac_addr))
eth_hw_addr_set(ndev, rdev->etha->mac_addr);
@@ -1903,9 +1954,6 @@ static int rswitch_device_alloc(struct rswitch_private *priv, unsigned int index
if (err < 0)
goto out_get_params;
- if (rdev->priv->gwca.speed < rdev->etha->speed)
- rdev->priv->gwca.speed = rdev->etha->speed;
-
err = rswitch_rxdmac_alloc(ndev);
if (err < 0)
goto out_rxdmac;
@@ -1921,6 +1969,7 @@ out_txdmac:
out_rxdmac:
out_get_params:
+ of_node_put(rdev->np_port);
netif_napi_del(&rdev->napi);
free_netdev(ndev);
@@ -1934,6 +1983,7 @@ static void rswitch_device_free(struct rswitch_private *priv, unsigned int index
rswitch_txdmac_free(ndev);
rswitch_rxdmac_free(ndev);
+ of_node_put(rdev->np_port);
netif_napi_del(&rdev->napi);
free_netdev(ndev);
}
@@ -2190,7 +2240,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(renesas_eth_sw_pm_ops, renesas_eth_sw_suspend,
static struct platform_driver renesas_eth_sw_driver_platform = {
.probe = renesas_eth_sw_probe,
- .remove_new = renesas_eth_sw_remove,
+ .remove = renesas_eth_sw_remove,
.driver = {
.name = "renesas_eth_sw",
.pm = pm_sleep_ptr(&renesas_eth_sw_pm_ops),
diff --git a/drivers/net/ethernet/renesas/rswitch.h b/drivers/net/ethernet/renesas/rswitch.h
index 72e3ff596d31..532192cbca4b 100644
--- a/drivers/net/ethernet/renesas/rswitch.h
+++ b/drivers/net/ethernet/renesas/rswitch.h
@@ -12,6 +12,7 @@
#define RSWITCH_MAX_NUM_QUEUES 128
+#define RSWITCH_NUM_AGENTS 5
#define RSWITCH_NUM_PORTS 3
#define rswitch_for_each_enabled_port(priv, i) \
for (i = 0; i < RSWITCH_NUM_PORTS; i++) \
@@ -724,35 +725,28 @@ enum rswitch_etha_mode {
#define EAVCC_VEM_SC_TAG (0x3 << 16)
-#define MPIC_PIS_MII 0x00
-#define MPIC_PIS_GMII 0x02
-#define MPIC_PIS_XGMII 0x04
-#define MPIC_LSC_SHIFT 3
-#define MPIC_LSC_100M (1 << MPIC_LSC_SHIFT)
-#define MPIC_LSC_1G (2 << MPIC_LSC_SHIFT)
-#define MPIC_LSC_2_5G (3 << MPIC_LSC_SHIFT)
-
-#define MDIO_READ_C45 0x03
-#define MDIO_WRITE_C45 0x01
+#define MPIC_PIS GENMASK(2, 0)
+#define MPIC_PIS_GMII 2
+#define MPIC_PIS_XGMII 4
+#define MPIC_LSC GENMASK(5, 3)
+#define MPIC_LSC_100M 1
+#define MPIC_LSC_1G 2
+#define MPIC_LSC_2_5G 3
+#define MPIC_PSMCS GENMASK(22, 16)
+#define MPIC_PSMHT GENMASK(26, 24)
#define MPSM_PSME BIT(0)
-#define MPSM_MFF_C45 BIT(2)
-#define MPSM_PRD_SHIFT 16
-#define MPSM_PRD_MASK GENMASK(31, MPSM_PRD_SHIFT)
-
-/* Completion flags */
-#define MMIS1_PAACS BIT(2) /* Address */
-#define MMIS1_PWACS BIT(1) /* Write */
-#define MMIS1_PRACS BIT(0) /* Read */
-#define MMIS1_CLEAR_FLAGS 0xf
-
-#define MPIC_PSMCS_SHIFT 16
-#define MPIC_PSMCS_MASK GENMASK(22, MPIC_PSMCS_SHIFT)
-#define MPIC_PSMCS(val) ((val) << MPIC_PSMCS_SHIFT)
-
-#define MPIC_PSMHT_SHIFT 24
-#define MPIC_PSMHT_MASK GENMASK(26, MPIC_PSMHT_SHIFT)
-#define MPIC_PSMHT(val) ((val) << MPIC_PSMHT_SHIFT)
+#define MPSM_MFF BIT(2)
+#define MPSM_MMF_C22 0
+#define MPSM_MMF_C45 1
+#define MPSM_PDA GENMASK(7, 3)
+#define MPSM_PRA GENMASK(12, 8)
+#define MPSM_POP GENMASK(14, 13)
+#define MPSM_POP_ADDRESS 0
+#define MPSM_POP_WRITE 1
+#define MPSM_POP_READ_C22 2
+#define MPSM_POP_READ_C45 3
+#define MPSM_PRD GENMASK(31, 16)
#define MLVC_PLV BIT(16)
@@ -806,6 +800,7 @@ enum rswitch_gwca_mode {
#define CABPPFLC_INIT_VALUE 0x00800080
/* MFWD */
+#define FWPC0(i) (FWPC00 + (i) * 0x10)
#define FWPC0_LTHTA BIT(0)
#define FWPC0_IP4UE BIT(3)
#define FWPC0_IP4TE BIT(4)
@@ -819,15 +814,15 @@ enum rswitch_gwca_mode {
#define FWPC0_MACHMA BIT(27)
#define FWPC0_VLANSA BIT(28)
-#define FWPC0(i) (FWPC00 + (i) * 0x10)
-#define FWPC0_DEFAULT (FWPC0_LTHTA | FWPC0_IP4UE | FWPC0_IP4TE | \
- FWPC0_IP4OE | FWPC0_L2SE | FWPC0_IP4EA | \
- FWPC0_IPDSA | FWPC0_IPHLA | FWPC0_MACSDA | \
- FWPC0_MACHLA | FWPC0_MACHMA | FWPC0_VLANSA)
#define FWPC1(i) (FWPC10 + (i) * 0x10)
+#define FWCP1_LTHFW GENMASK(16 + (RSWITCH_NUM_AGENTS - 1), 16)
#define FWPC1_DDE BIT(0)
-#define FWPBFC(i) (FWPBFC0 + (i) * 0x10)
+#define FWPC2(i) (FWPC20 + (i) * 0x10)
+#define FWCP2_LTWFW GENMASK(16 + (RSWITCH_NUM_AGENTS - 1), 16)
+
+#define FWPBFC(i) (FWPBFC0 + (i) * 0x10)
+#define FWPBFC_PBDV GENMASK(RSWITCH_NUM_AGENTS - 1, 0)
#define FWPBFCSDC(j, i) (FWPBFCSDC00 + (i) * 0x10 + (j) * 0x04)
@@ -972,14 +967,6 @@ struct rswitch_gwca_queue {
};
};
-struct rswitch_gwca_ts_info {
- struct sk_buff *skb;
- struct list_head list;
-
- int port;
- u8 tag;
-};
-
#define RSWITCH_NUM_IRQ_REGS (RSWITCH_MAX_NUM_QUEUES / BITS_PER_TYPE(u32))
struct rswitch_gwca {
unsigned int index;
@@ -989,14 +976,13 @@ struct rswitch_gwca {
struct rswitch_gwca_queue *queues;
int num_queues;
struct rswitch_gwca_queue ts_queue;
- struct list_head ts_info_list;
DECLARE_BITMAP(used, RSWITCH_MAX_NUM_QUEUES);
u32 tx_irq_bits[RSWITCH_NUM_IRQ_REGS];
u32 rx_irq_bits[RSWITCH_NUM_IRQ_REGS];
- int speed;
};
#define NUM_QUEUES_PER_NDEV 2
+#define TS_TAGS_PER_PORT 256
struct rswitch_device {
struct rswitch_private *priv;
struct net_device *ndev;
@@ -1004,7 +990,8 @@ struct rswitch_device {
void __iomem *addr;
struct rswitch_gwca_queue *tx_queue;
struct rswitch_gwca_queue *rx_queue;
- u8 ts_tag;
+ struct sk_buff *ts_skb[TS_TAGS_PER_PORT];
+ DECLARE_BITMAP(ts_skb_used, TS_TAGS_PER_PORT);
bool disabled;
int port;
diff --git a/drivers/net/ethernet/renesas/rtsn.c b/drivers/net/ethernet/renesas/rtsn.c
new file mode 100644
index 000000000000..6b3f7fca8d15
--- /dev/null
+++ b/drivers/net/ethernet/renesas/rtsn.c
@@ -0,0 +1,1388 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Renesas Ethernet-TSN device driver
+ *
+ * Copyright (C) 2022 Renesas Electronics Corporation
+ * Copyright (C) 2023 Niklas Söderlund <niklas.soderlund@ragnatech.se>
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/module.h>
+#include <linux/net_tstamp.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/spinlock.h>
+
+#include "rtsn.h"
+#include "rcar_gen4_ptp.h"
+
+struct rtsn_private {
+ struct net_device *ndev;
+ struct platform_device *pdev;
+ void __iomem *base;
+ struct rcar_gen4_ptp_private *ptp_priv;
+ struct clk *clk;
+ struct reset_control *reset;
+
+ u32 num_tx_ring;
+ u32 num_rx_ring;
+ u32 tx_desc_bat_size;
+ dma_addr_t tx_desc_bat_dma;
+ struct rtsn_desc *tx_desc_bat;
+ u32 rx_desc_bat_size;
+ dma_addr_t rx_desc_bat_dma;
+ struct rtsn_desc *rx_desc_bat;
+ dma_addr_t tx_desc_dma;
+ dma_addr_t rx_desc_dma;
+ struct rtsn_ext_desc *tx_ring;
+ struct rtsn_ext_ts_desc *rx_ring;
+ struct sk_buff **tx_skb;
+ struct sk_buff **rx_skb;
+ spinlock_t lock; /* Register access lock */
+ u32 cur_tx;
+ u32 dirty_tx;
+ u32 cur_rx;
+ u32 dirty_rx;
+ u8 ts_tag;
+ struct napi_struct napi;
+ struct rtnl_link_stats64 stats;
+
+ struct mii_bus *mii;
+ phy_interface_t iface;
+ int link;
+ int speed;
+
+ int tx_data_irq;
+ int rx_data_irq;
+};
+
+static u32 rtsn_read(struct rtsn_private *priv, enum rtsn_reg reg)
+{
+ return ioread32(priv->base + reg);
+}
+
+static void rtsn_write(struct rtsn_private *priv, enum rtsn_reg reg, u32 data)
+{
+ iowrite32(data, priv->base + reg);
+}
+
+static void rtsn_modify(struct rtsn_private *priv, enum rtsn_reg reg,
+ u32 clear, u32 set)
+{
+ rtsn_write(priv, reg, (rtsn_read(priv, reg) & ~clear) | set);
+}
+
+static int rtsn_reg_wait(struct rtsn_private *priv, enum rtsn_reg reg,
+ u32 mask, u32 expected)
+{
+ u32 val;
+
+ return readl_poll_timeout(priv->base + reg, val,
+ (val & mask) == expected,
+ RTSN_INTERVAL_US, RTSN_TIMEOUT_US);
+}
+
+static void rtsn_ctrl_data_irq(struct rtsn_private *priv, bool enable)
+{
+ if (enable) {
+ rtsn_write(priv, TDIE0, TDIE_TDID_TDX(TX_CHAIN_IDX));
+ rtsn_write(priv, RDIE0, RDIE_RDID_RDX(RX_CHAIN_IDX));
+ } else {
+ rtsn_write(priv, TDID0, TDIE_TDID_TDX(TX_CHAIN_IDX));
+ rtsn_write(priv, RDID0, RDIE_RDID_RDX(RX_CHAIN_IDX));
+ }
+}
+
+static void rtsn_get_timestamp(struct rtsn_private *priv, struct timespec64 *ts)
+{
+ struct rcar_gen4_ptp_private *ptp_priv = priv->ptp_priv;
+
+ ptp_priv->info.gettime64(&ptp_priv->info, ts);
+}
+
+static int rtsn_tx_free(struct net_device *ndev, bool free_txed_only)
+{
+ struct rtsn_private *priv = netdev_priv(ndev);
+ struct rtsn_ext_desc *desc;
+ struct sk_buff *skb;
+ int free_num = 0;
+ int entry, size;
+
+ for (; priv->cur_tx - priv->dirty_tx > 0; priv->dirty_tx++) {
+ entry = priv->dirty_tx % priv->num_tx_ring;
+ desc = &priv->tx_ring[entry];
+ if (free_txed_only && (desc->die_dt & DT_MASK) != DT_FEMPTY)
+ break;
+
+ dma_rmb();
+ size = le16_to_cpu(desc->info_ds) & TX_DS;
+ skb = priv->tx_skb[entry];
+ if (skb) {
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct timespec64 ts;
+
+ rtsn_get_timestamp(priv, &ts);
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
+ skb_tstamp_tx(skb, &shhwtstamps);
+ }
+ dma_unmap_single(ndev->dev.parent,
+ le32_to_cpu(desc->dptr),
+ size, DMA_TO_DEVICE);
+ dev_kfree_skb_any(priv->tx_skb[entry]);
+ free_num++;
+
+ priv->stats.tx_packets++;
+ priv->stats.tx_bytes += size;
+ }
+
+ desc->die_dt = DT_EEMPTY;
+ }
+
+ desc = &priv->tx_ring[priv->num_tx_ring];
+ desc->die_dt = DT_LINK;
+
+ return free_num;
+}
+
+static int rtsn_rx(struct net_device *ndev, int budget)
+{
+ struct rtsn_private *priv = netdev_priv(ndev);
+ unsigned int ndescriptors;
+ unsigned int rx_packets;
+ unsigned int i;
+ bool get_ts;
+
+ get_ts = priv->ptp_priv->tstamp_rx_ctrl &
+ RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT;
+
+ ndescriptors = priv->dirty_rx + priv->num_rx_ring - priv->cur_rx;
+ rx_packets = 0;
+ for (i = 0; i < ndescriptors; i++) {
+ const unsigned int entry = priv->cur_rx % priv->num_rx_ring;
+ struct rtsn_ext_ts_desc *desc = &priv->rx_ring[entry];
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ u16 pkt_len;
+
+ /* Stop processing descriptors if budget is consumed. */
+ if (rx_packets >= budget)
+ break;
+
+ /* Stop processing descriptors on first empty. */
+ if ((desc->die_dt & DT_MASK) == DT_FEMPTY)
+ break;
+
+ dma_rmb();
+ pkt_len = le16_to_cpu(desc->info_ds) & RX_DS;
+
+ skb = priv->rx_skb[entry];
+ priv->rx_skb[entry] = NULL;
+ dma_addr = le32_to_cpu(desc->dptr);
+ dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ,
+ DMA_FROM_DEVICE);
+
+ /* Get timestamp if enabled. */
+ if (get_ts) {
+ struct skb_shared_hwtstamps *shhwtstamps;
+ struct timespec64 ts;
+
+ shhwtstamps = skb_hwtstamps(skb);
+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+
+ ts.tv_sec = (u64)le32_to_cpu(desc->ts_sec);
+ ts.tv_nsec = le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff));
+
+ shhwtstamps->hwtstamp = timespec64_to_ktime(ts);
+ }
+
+ skb_put(skb, pkt_len);
+ skb->protocol = eth_type_trans(skb, ndev);
+ napi_gro_receive(&priv->napi, skb);
+
+ /* Update statistics. */
+ priv->stats.rx_packets++;
+ priv->stats.rx_bytes += pkt_len;
+
+ /* Update counters. */
+ priv->cur_rx++;
+ rx_packets++;
+ }
+
+ /* Refill the RX ring buffers */
+ for (; priv->cur_rx - priv->dirty_rx > 0; priv->dirty_rx++) {
+ const unsigned int entry = priv->dirty_rx % priv->num_rx_ring;
+ struct rtsn_ext_ts_desc *desc = &priv->rx_ring[entry];
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+
+ desc->info_ds = cpu_to_le16(PKT_BUF_SZ);
+
+ if (!priv->rx_skb[entry]) {
+ skb = napi_alloc_skb(&priv->napi,
+ PKT_BUF_SZ + RTSN_ALIGN - 1);
+ if (!skb)
+ break;
+ skb_reserve(skb, NET_IP_ALIGN);
+ dma_addr = dma_map_single(ndev->dev.parent, skb->data,
+ le16_to_cpu(desc->info_ds),
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(ndev->dev.parent, dma_addr))
+ desc->info_ds = cpu_to_le16(0);
+ desc->dptr = cpu_to_le32(dma_addr);
+ skb_checksum_none_assert(skb);
+ priv->rx_skb[entry] = skb;
+ }
+
+ dma_wmb();
+ desc->die_dt = DT_FEMPTY | D_DIE;
+ }
+
+ priv->rx_ring[priv->num_rx_ring].die_dt = DT_LINK;
+
+ return rx_packets;
+}
+
+static int rtsn_poll(struct napi_struct *napi, int budget)
+{
+ struct rtsn_private *priv;
+ struct net_device *ndev;
+ unsigned long flags;
+ int work_done;
+
+ ndev = napi->dev;
+ priv = netdev_priv(ndev);
+
+ /* Processing RX Descriptor Ring */
+ work_done = rtsn_rx(ndev, budget);
+
+ /* Processing TX Descriptor Ring */
+ spin_lock_irqsave(&priv->lock, flags);
+ rtsn_tx_free(ndev, true);
+ netif_wake_subqueue(ndev, 0);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ /* Re-enable TX/RX interrupts */
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
+ spin_lock_irqsave(&priv->lock, flags);
+ rtsn_ctrl_data_irq(priv, true);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+
+ return work_done;
+}
+
+static int rtsn_desc_alloc(struct rtsn_private *priv)
+{
+ struct device *dev = &priv->pdev->dev;
+ unsigned int i;
+
+ priv->tx_desc_bat_size = sizeof(struct rtsn_desc) * TX_NUM_CHAINS;
+ priv->tx_desc_bat = dma_alloc_coherent(dev, priv->tx_desc_bat_size,
+ &priv->tx_desc_bat_dma,
+ GFP_KERNEL);
+
+ if (!priv->tx_desc_bat)
+ return -ENOMEM;
+
+ for (i = 0; i < TX_NUM_CHAINS; i++)
+ priv->tx_desc_bat[i].die_dt = DT_EOS;
+
+ priv->rx_desc_bat_size = sizeof(struct rtsn_desc) * RX_NUM_CHAINS;
+ priv->rx_desc_bat = dma_alloc_coherent(dev, priv->rx_desc_bat_size,
+ &priv->rx_desc_bat_dma,
+ GFP_KERNEL);
+
+ if (!priv->rx_desc_bat)
+ return -ENOMEM;
+
+ for (i = 0; i < RX_NUM_CHAINS; i++)
+ priv->rx_desc_bat[i].die_dt = DT_EOS;
+
+ return 0;
+}
+
+static void rtsn_desc_free(struct rtsn_private *priv)
+{
+ if (priv->tx_desc_bat)
+ dma_free_coherent(&priv->pdev->dev, priv->tx_desc_bat_size,
+ priv->tx_desc_bat, priv->tx_desc_bat_dma);
+ priv->tx_desc_bat = NULL;
+
+ if (priv->rx_desc_bat)
+ dma_free_coherent(&priv->pdev->dev, priv->rx_desc_bat_size,
+ priv->rx_desc_bat, priv->rx_desc_bat_dma);
+ priv->rx_desc_bat = NULL;
+}
+
+static void rtsn_chain_free(struct rtsn_private *priv)
+{
+ struct device *dev = &priv->pdev->dev;
+
+ dma_free_coherent(dev,
+ sizeof(struct rtsn_ext_desc) * (priv->num_tx_ring + 1),
+ priv->tx_ring, priv->tx_desc_dma);
+ priv->tx_ring = NULL;
+
+ dma_free_coherent(dev,
+ sizeof(struct rtsn_ext_ts_desc) * (priv->num_rx_ring + 1),
+ priv->rx_ring, priv->rx_desc_dma);
+ priv->rx_ring = NULL;
+
+ kfree(priv->tx_skb);
+ priv->tx_skb = NULL;
+
+ kfree(priv->rx_skb);
+ priv->rx_skb = NULL;
+}
+
+static int rtsn_chain_init(struct rtsn_private *priv, int tx_size, int rx_size)
+{
+ struct net_device *ndev = priv->ndev;
+ struct sk_buff *skb;
+ int i;
+
+ priv->num_tx_ring = tx_size;
+ priv->num_rx_ring = rx_size;
+
+ priv->tx_skb = kcalloc(tx_size, sizeof(*priv->tx_skb), GFP_KERNEL);
+ priv->rx_skb = kcalloc(rx_size, sizeof(*priv->rx_skb), GFP_KERNEL);
+
+ if (!priv->rx_skb || !priv->tx_skb)
+ goto error;
+
+ for (i = 0; i < rx_size; i++) {
+ skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RTSN_ALIGN - 1);
+ if (!skb)
+ goto error;
+ skb_reserve(skb, NET_IP_ALIGN);
+ priv->rx_skb[i] = skb;
+ }
+
+ /* Allocate TX, RX descriptors */
+ priv->tx_ring = dma_alloc_coherent(ndev->dev.parent,
+ sizeof(struct rtsn_ext_desc) * (tx_size + 1),
+ &priv->tx_desc_dma, GFP_KERNEL);
+ priv->rx_ring = dma_alloc_coherent(ndev->dev.parent,
+ sizeof(struct rtsn_ext_ts_desc) * (rx_size + 1),
+ &priv->rx_desc_dma, GFP_KERNEL);
+
+ if (!priv->tx_ring || !priv->rx_ring)
+ goto error;
+
+ return 0;
+error:
+ rtsn_chain_free(priv);
+
+ return -ENOMEM;
+}
+
+static void rtsn_chain_format(struct rtsn_private *priv)
+{
+ struct net_device *ndev = priv->ndev;
+ struct rtsn_ext_ts_desc *rx_desc;
+ struct rtsn_ext_desc *tx_desc;
+ struct rtsn_desc *bat_desc;
+ dma_addr_t dma_addr;
+ unsigned int i;
+
+ priv->cur_tx = 0;
+ priv->cur_rx = 0;
+ priv->dirty_rx = 0;
+ priv->dirty_tx = 0;
+
+ /* TX */
+ memset(priv->tx_ring, 0, sizeof(*tx_desc) * priv->num_tx_ring);
+ for (i = 0, tx_desc = priv->tx_ring; i < priv->num_tx_ring; i++, tx_desc++)
+ tx_desc->die_dt = DT_EEMPTY | D_DIE;
+
+ tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma);
+ tx_desc->die_dt = DT_LINK;
+
+ bat_desc = &priv->tx_desc_bat[TX_CHAIN_IDX];
+ bat_desc->die_dt = DT_LINK;
+ bat_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma);
+
+ /* RX */
+ memset(priv->rx_ring, 0, sizeof(*rx_desc) * priv->num_rx_ring);
+ for (i = 0, rx_desc = priv->rx_ring; i < priv->num_rx_ring; i++, rx_desc++) {
+ dma_addr = dma_map_single(ndev->dev.parent,
+ priv->rx_skb[i]->data, PKT_BUF_SZ,
+ DMA_FROM_DEVICE);
+ if (!dma_mapping_error(ndev->dev.parent, dma_addr))
+ rx_desc->info_ds = cpu_to_le16(PKT_BUF_SZ);
+ rx_desc->dptr = cpu_to_le32((u32)dma_addr);
+ rx_desc->die_dt = DT_FEMPTY | D_DIE;
+ }
+ rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma);
+ rx_desc->die_dt = DT_LINK;
+
+ bat_desc = &priv->rx_desc_bat[RX_CHAIN_IDX];
+ bat_desc->die_dt = DT_LINK;
+ bat_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma);
+}
+
+static int rtsn_dmac_init(struct rtsn_private *priv)
+{
+ int ret;
+
+ ret = rtsn_chain_init(priv, TX_CHAIN_SIZE, RX_CHAIN_SIZE);
+ if (ret)
+ return ret;
+
+ rtsn_chain_format(priv);
+
+ return 0;
+}
+
+static enum rtsn_mode rtsn_read_mode(struct rtsn_private *priv)
+{
+ return (rtsn_read(priv, OSR) & OSR_OPS) >> 1;
+}
+
+static int rtsn_wait_mode(struct rtsn_private *priv, enum rtsn_mode mode)
+{
+ unsigned int i;
+
+ /* Need to busy loop as mode changes can happen in atomic context. */
+ for (i = 0; i < RTSN_TIMEOUT_US / RTSN_INTERVAL_US; i++) {
+ if (rtsn_read_mode(priv) == mode)
+ return 0;
+
+ udelay(RTSN_INTERVAL_US);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int rtsn_change_mode(struct rtsn_private *priv, enum rtsn_mode mode)
+{
+ int ret;
+
+ rtsn_write(priv, OCR, mode);
+ ret = rtsn_wait_mode(priv, mode);
+ if (ret)
+ netdev_err(priv->ndev, "Failed to switch operation mode\n");
+ return ret;
+}
+
+static int rtsn_get_data_irq_status(struct rtsn_private *priv)
+{
+ u32 val;
+
+ val = rtsn_read(priv, TDIS0) | TDIS_TDS(TX_CHAIN_IDX);
+ val |= rtsn_read(priv, RDIS0) | RDIS_RDS(RX_CHAIN_IDX);
+
+ return val;
+}
+
+static irqreturn_t rtsn_irq(int irq, void *dev_id)
+{
+ struct rtsn_private *priv = dev_id;
+ int ret = IRQ_NONE;
+
+ spin_lock(&priv->lock);
+
+ if (rtsn_get_data_irq_status(priv)) {
+ /* Clear TX/RX irq status */
+ rtsn_write(priv, TDIS0, TDIS_TDS(TX_CHAIN_IDX));
+ rtsn_write(priv, RDIS0, RDIS_RDS(RX_CHAIN_IDX));
+
+ if (napi_schedule_prep(&priv->napi)) {
+ /* Disable TX/RX interrupts */
+ rtsn_ctrl_data_irq(priv, false);
+
+ __napi_schedule(&priv->napi);
+ }
+
+ ret = IRQ_HANDLED;
+ }
+
+ spin_unlock(&priv->lock);
+
+ return ret;
+}
+
+static int rtsn_request_irq(unsigned int irq, irq_handler_t handler,
+ unsigned long flags, struct rtsn_private *priv,
+ const char *ch)
+{
+ char *name;
+ int ret;
+
+ name = devm_kasprintf(&priv->pdev->dev, GFP_KERNEL, "%s:%s",
+ priv->ndev->name, ch);
+ if (!name)
+ return -ENOMEM;
+
+ ret = request_irq(irq, handler, flags, name, priv);
+ if (ret)
+ netdev_err(priv->ndev, "Cannot request IRQ %s\n", name);
+
+ return ret;
+}
+
+static void rtsn_free_irqs(struct rtsn_private *priv)
+{
+ free_irq(priv->tx_data_irq, priv);
+ free_irq(priv->rx_data_irq, priv);
+}
+
+static int rtsn_request_irqs(struct rtsn_private *priv)
+{
+ int ret;
+
+ priv->rx_data_irq = platform_get_irq_byname(priv->pdev, "rx");
+ if (priv->rx_data_irq < 0)
+ return priv->rx_data_irq;
+
+ priv->tx_data_irq = platform_get_irq_byname(priv->pdev, "tx");
+ if (priv->tx_data_irq < 0)
+ return priv->tx_data_irq;
+
+ ret = rtsn_request_irq(priv->tx_data_irq, rtsn_irq, 0, priv, "tx");
+ if (ret)
+ return ret;
+
+ ret = rtsn_request_irq(priv->rx_data_irq, rtsn_irq, 0, priv, "rx");
+ if (ret) {
+ free_irq(priv->tx_data_irq, priv);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rtsn_reset(struct rtsn_private *priv)
+{
+ reset_control_reset(priv->reset);
+ mdelay(1);
+
+ return rtsn_wait_mode(priv, OCR_OPC_DISABLE);
+}
+
+static int rtsn_axibmi_init(struct rtsn_private *priv)
+{
+ int ret;
+
+ ret = rtsn_reg_wait(priv, RR, RR_RST, RR_RST_COMPLETE);
+ if (ret)
+ return ret;
+
+ /* Set AXIWC */
+ rtsn_write(priv, AXIWC, AXIWC_DEFAULT);
+
+ /* Set AXIRC */
+ rtsn_write(priv, AXIRC, AXIRC_DEFAULT);
+
+ /* TX Descriptor chain setting */
+ rtsn_write(priv, TATLS0, TATLS0_TEDE | TATLS0_TATEN(TX_CHAIN_IDX));
+ rtsn_write(priv, TATLS1, priv->tx_desc_bat_dma + TX_CHAIN_ADDR_OFFSET);
+ rtsn_write(priv, TATLR, TATLR_TATL);
+
+ ret = rtsn_reg_wait(priv, TATLR, TATLR_TATL, 0);
+ if (ret)
+ return ret;
+
+ /* RX Descriptor chain setting */
+ rtsn_write(priv, RATLS0,
+ RATLS0_RETS | RATLS0_REDE | RATLS0_RATEN(RX_CHAIN_IDX));
+ rtsn_write(priv, RATLS1, priv->rx_desc_bat_dma + RX_CHAIN_ADDR_OFFSET);
+ rtsn_write(priv, RATLR, RATLR_RATL);
+
+ ret = rtsn_reg_wait(priv, RATLR, RATLR_RATL, 0);
+ if (ret)
+ return ret;
+
+ /* Enable TX/RX interrupts */
+ rtsn_ctrl_data_irq(priv, true);
+
+ return 0;
+}
+
+static void rtsn_mhd_init(struct rtsn_private *priv)
+{
+ /* TX General setting */
+ rtsn_write(priv, TGC1, TGC1_STTV_DEFAULT | TGC1_TQTM_SFM);
+ rtsn_write(priv, TMS0, TMS_MFS_MAX);
+
+ /* RX Filter IP */
+ rtsn_write(priv, CFCR0, CFCR_SDID(RX_CHAIN_IDX));
+ rtsn_write(priv, FMSCR, FMSCR_FMSIE(RX_CHAIN_IDX));
+}
+
+static int rtsn_get_phy_params(struct rtsn_private *priv)
+{
+ int ret;
+
+ ret = of_get_phy_mode(priv->pdev->dev.of_node, &priv->iface);
+ if (ret)
+ return ret;
+
+ switch (priv->iface) {
+ case PHY_INTERFACE_MODE_MII:
+ priv->speed = 100;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ priv->speed = 1000;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static void rtsn_set_phy_interface(struct rtsn_private *priv)
+{
+ u32 val;
+
+ switch (priv->iface) {
+ case PHY_INTERFACE_MODE_MII:
+ val = MPIC_PIS_MII;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ val = MPIC_PIS_GMII;
+ break;
+ default:
+ return;
+ }
+
+ rtsn_modify(priv, MPIC, MPIC_PIS_MASK, val);
+}
+
+static void rtsn_set_rate(struct rtsn_private *priv)
+{
+ u32 val;
+
+ switch (priv->speed) {
+ case 10:
+ val = MPIC_LSC_10M;
+ break;
+ case 100:
+ val = MPIC_LSC_100M;
+ break;
+ case 1000:
+ val = MPIC_LSC_1G;
+ break;
+ default:
+ return;
+ }
+
+ rtsn_modify(priv, MPIC, MPIC_LSC_MASK, val);
+}
+
+static int rtsn_rmac_init(struct rtsn_private *priv)
+{
+ const u8 *mac_addr = priv->ndev->dev_addr;
+ int ret;
+
+ /* Set MAC address */
+ rtsn_write(priv, MRMAC0, (mac_addr[0] << 8) | mac_addr[1]);
+ rtsn_write(priv, MRMAC1, (mac_addr[2] << 24) | (mac_addr[3] << 16) |
+ (mac_addr[4] << 8) | mac_addr[5]);
+
+ /* Set xMII type */
+ rtsn_set_phy_interface(priv);
+ rtsn_set_rate(priv);
+
+ /* Enable MII */
+ rtsn_modify(priv, MPIC, MPIC_PSMCS_MASK | MPIC_PSMHT_MASK,
+ MPIC_PSMCS_DEFAULT | MPIC_PSMHT_DEFAULT);
+
+ /* Link verification */
+ rtsn_modify(priv, MLVC, MLVC_PLV, MLVC_PLV);
+ ret = rtsn_reg_wait(priv, MLVC, MLVC_PLV, 0);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int rtsn_hw_init(struct rtsn_private *priv)
+{
+ int ret;
+
+ ret = rtsn_reset(priv);
+ if (ret)
+ return ret;
+
+ /* Change to CONFIG mode */
+ ret = rtsn_change_mode(priv, OCR_OPC_CONFIG);
+ if (ret)
+ return ret;
+
+ ret = rtsn_axibmi_init(priv);
+ if (ret)
+ return ret;
+
+ rtsn_mhd_init(priv);
+
+ ret = rtsn_rmac_init(priv);
+ if (ret)
+ return ret;
+
+ ret = rtsn_change_mode(priv, OCR_OPC_DISABLE);
+ if (ret)
+ return ret;
+
+ /* Change to OPERATION mode */
+ ret = rtsn_change_mode(priv, OCR_OPC_OPERATION);
+
+ return ret;
+}
+
+static int rtsn_mii_access(struct mii_bus *bus, bool read, int phyad,
+ int regad, u16 data)
+{
+ struct rtsn_private *priv = bus->priv;
+ u32 val;
+ int ret;
+
+ val = MPSM_PDA(phyad) | MPSM_PRA(regad) | MPSM_PSME;
+
+ if (!read)
+ val |= MPSM_PSMAD | MPSM_PRD_SET(data);
+
+ rtsn_write(priv, MPSM, val);
+
+ ret = rtsn_reg_wait(priv, MPSM, MPSM_PSME, 0);
+ if (ret)
+ return ret;
+
+ if (read)
+ ret = MPSM_PRD_GET(rtsn_read(priv, MPSM));
+
+ return ret;
+}
+
+static int rtsn_mii_read(struct mii_bus *bus, int addr, int regnum)
+{
+ return rtsn_mii_access(bus, true, addr, regnum, 0);
+}
+
+static int rtsn_mii_write(struct mii_bus *bus, int addr, int regnum, u16 val)
+{
+ return rtsn_mii_access(bus, false, addr, regnum, val);
+}
+
+static int rtsn_mdio_alloc(struct rtsn_private *priv)
+{
+ struct platform_device *pdev = priv->pdev;
+ struct device *dev = &pdev->dev;
+ struct device_node *mdio_node;
+ struct mii_bus *mii;
+ int ret;
+
+ mii = mdiobus_alloc();
+ if (!mii)
+ return -ENOMEM;
+
+ mdio_node = of_get_child_by_name(dev->of_node, "mdio");
+ if (!mdio_node) {
+ ret = -ENODEV;
+ goto out_free_bus;
+ }
+
+ /* Enter config mode before registering the MDIO bus */
+ ret = rtsn_reset(priv);
+ if (ret)
+ goto out_free_bus;
+
+ ret = rtsn_change_mode(priv, OCR_OPC_CONFIG);
+ if (ret)
+ goto out_free_bus;
+
+ rtsn_modify(priv, MPIC, MPIC_PSMCS_MASK | MPIC_PSMHT_MASK,
+ MPIC_PSMCS_DEFAULT | MPIC_PSMHT_DEFAULT);
+
+ /* Register the MDIO bus */
+ mii->name = "rtsn_mii";
+ snprintf(mii->id, MII_BUS_ID_SIZE, "%s-%x",
+ pdev->name, pdev->id);
+ mii->priv = priv;
+ mii->read = rtsn_mii_read;
+ mii->write = rtsn_mii_write;
+ mii->parent = dev;
+
+ ret = of_mdiobus_register(mii, mdio_node);
+ of_node_put(mdio_node);
+ if (ret)
+ goto out_free_bus;
+
+ priv->mii = mii;
+
+ return 0;
+
+out_free_bus:
+ mdiobus_free(mii);
+ return ret;
+}
+
+static void rtsn_mdio_free(struct rtsn_private *priv)
+{
+ mdiobus_unregister(priv->mii);
+ mdiobus_free(priv->mii);
+ priv->mii = NULL;
+}
+
+static void rtsn_adjust_link(struct net_device *ndev)
+{
+ struct rtsn_private *priv = netdev_priv(ndev);
+ struct phy_device *phydev = ndev->phydev;
+ bool new_state = false;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ if (phydev->link) {
+ if (phydev->speed != priv->speed) {
+ new_state = true;
+ priv->speed = phydev->speed;
+ }
+
+ if (!priv->link) {
+ new_state = true;
+ priv->link = phydev->link;
+ }
+ } else if (priv->link) {
+ new_state = true;
+ priv->link = 0;
+ priv->speed = 0;
+ }
+
+ if (new_state) {
+ /* Need to transition to CONFIG mode before reconfiguring and
+ * then back to the original mode. Any state change to/from
+ * CONFIG or OPERATION must go over DISABLED to stop Rx/Tx.
+ */
+ enum rtsn_mode orgmode = rtsn_read_mode(priv);
+
+ /* Transit to CONFIG */
+ if (orgmode != OCR_OPC_CONFIG) {
+ if (orgmode != OCR_OPC_DISABLE &&
+ rtsn_change_mode(priv, OCR_OPC_DISABLE))
+ goto out;
+ if (rtsn_change_mode(priv, OCR_OPC_CONFIG))
+ goto out;
+ }
+
+ rtsn_set_rate(priv);
+
+ /* Transition to original mode */
+ if (orgmode != OCR_OPC_CONFIG) {
+ if (rtsn_change_mode(priv, OCR_OPC_DISABLE))
+ goto out;
+ if (orgmode != OCR_OPC_DISABLE &&
+ rtsn_change_mode(priv, orgmode))
+ goto out;
+ }
+ }
+out:
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ if (new_state)
+ phy_print_status(phydev);
+}
+
+static int rtsn_phy_init(struct rtsn_private *priv)
+{
+ struct device_node *np = priv->ndev->dev.parent->of_node;
+ struct phy_device *phydev;
+ struct device_node *phy;
+
+ priv->link = 0;
+
+ phy = of_parse_phandle(np, "phy-handle", 0);
+ if (!phy)
+ return -ENOENT;
+
+ phydev = of_phy_connect(priv->ndev, phy, rtsn_adjust_link, 0,
+ priv->iface);
+ of_node_put(phy);
+ if (!phydev)
+ return -ENOENT;
+
+ /* Only support full-duplex mode */
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+
+ phy_attached_info(phydev);
+
+ return 0;
+}
+
+static void rtsn_phy_deinit(struct rtsn_private *priv)
+{
+ phy_disconnect(priv->ndev->phydev);
+ priv->ndev->phydev = NULL;
+}
+
+static int rtsn_init(struct rtsn_private *priv)
+{
+ int ret;
+
+ ret = rtsn_desc_alloc(priv);
+ if (ret)
+ return ret;
+
+ ret = rtsn_dmac_init(priv);
+ if (ret)
+ goto error_free_desc;
+
+ ret = rtsn_hw_init(priv);
+ if (ret)
+ goto error_free_chain;
+
+ ret = rtsn_phy_init(priv);
+ if (ret)
+ goto error_free_chain;
+
+ ret = rtsn_request_irqs(priv);
+ if (ret)
+ goto error_free_phy;
+
+ return 0;
+error_free_phy:
+ rtsn_phy_deinit(priv);
+error_free_chain:
+ rtsn_chain_free(priv);
+error_free_desc:
+ rtsn_desc_free(priv);
+ return ret;
+}
+
+static void rtsn_deinit(struct rtsn_private *priv)
+{
+ rtsn_free_irqs(priv);
+ rtsn_phy_deinit(priv);
+ rtsn_chain_free(priv);
+ rtsn_desc_free(priv);
+}
+
+static void rtsn_parse_mac_address(struct device_node *np,
+ struct net_device *ndev)
+{
+ struct rtsn_private *priv = netdev_priv(ndev);
+ u8 addr[ETH_ALEN];
+ u32 mrmac0;
+ u32 mrmac1;
+
+ /* Try to read address from Device Tree. */
+ if (!of_get_mac_address(np, addr)) {
+ eth_hw_addr_set(ndev, addr);
+ return;
+ }
+
+ /* Try to read address from device. */
+ mrmac0 = rtsn_read(priv, MRMAC0);
+ mrmac1 = rtsn_read(priv, MRMAC1);
+
+ addr[0] = (mrmac0 >> 8) & 0xff;
+ addr[1] = (mrmac0 >> 0) & 0xff;
+ addr[2] = (mrmac1 >> 24) & 0xff;
+ addr[3] = (mrmac1 >> 16) & 0xff;
+ addr[4] = (mrmac1 >> 8) & 0xff;
+ addr[5] = (mrmac1 >> 0) & 0xff;
+
+ if (is_valid_ether_addr(addr)) {
+ eth_hw_addr_set(ndev, addr);
+ return;
+ }
+
+ /* Fallback to a random address */
+ eth_hw_addr_random(ndev);
+}
+
+static int rtsn_open(struct net_device *ndev)
+{
+ struct rtsn_private *priv = netdev_priv(ndev);
+ int ret;
+
+ napi_enable(&priv->napi);
+
+ ret = rtsn_init(priv);
+ if (ret) {
+ napi_disable(&priv->napi);
+ return ret;
+ }
+
+ phy_start(ndev->phydev);
+
+ netif_start_queue(ndev);
+
+ return 0;
+}
+
+static int rtsn_stop(struct net_device *ndev)
+{
+ struct rtsn_private *priv = netdev_priv(ndev);
+
+ phy_stop(priv->ndev->phydev);
+ napi_disable(&priv->napi);
+ rtsn_change_mode(priv, OCR_OPC_DISABLE);
+ rtsn_deinit(priv);
+
+ return 0;
+}
+
+static netdev_tx_t rtsn_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct rtsn_private *priv = netdev_priv(ndev);
+ struct rtsn_ext_desc *desc;
+ int ret = NETDEV_TX_OK;
+ unsigned long flags;
+ dma_addr_t dma_addr;
+ int entry;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* Drop packet if it won't fit in a single descriptor. */
+ if (skb->len >= TX_DS) {
+ priv->stats.tx_dropped++;
+ priv->stats.tx_errors++;
+ dev_kfree_skb_any(skb);
+ goto out;
+ }
+
+ if (priv->cur_tx - priv->dirty_tx > priv->num_tx_ring) {
+ netif_stop_subqueue(ndev, 0);
+ ret = NETDEV_TX_BUSY;
+ goto out;
+ }
+
+ if (skb_put_padto(skb, ETH_ZLEN))
+ goto out;
+
+ dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ndev->dev.parent, dma_addr)) {
+ dev_kfree_skb_any(skb);
+ goto out;
+ }
+
+ entry = priv->cur_tx % priv->num_tx_ring;
+ priv->tx_skb[entry] = skb;
+ desc = &priv->tx_ring[entry];
+ desc->dptr = cpu_to_le32(dma_addr);
+ desc->info_ds = cpu_to_le16(skb->len);
+ desc->info1 = cpu_to_le64(skb->len);
+
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ priv->ts_tag++;
+ desc->info_ds |= cpu_to_le16(TXC);
+ desc->info = priv->ts_tag;
+ }
+
+ skb_tx_timestamp(skb);
+ dma_wmb();
+
+ desc->die_dt = DT_FSINGLE | D_DIE;
+ priv->cur_tx++;
+
+ /* Start xmit */
+ rtsn_write(priv, TRCR0, BIT(TX_CHAIN_IDX));
+out:
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return ret;
+}
+
+static void rtsn_get_stats64(struct net_device *ndev,
+ struct rtnl_link_stats64 *storage)
+{
+ struct rtsn_private *priv = netdev_priv(ndev);
+ *storage = priv->stats;
+}
+
+static int rtsn_do_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
+{
+ if (!netif_running(ndev))
+ return -ENODEV;
+
+ return phy_do_ioctl_running(ndev, ifr, cmd);
+}
+
+static int rtsn_hwtstamp_get(struct net_device *ndev,
+ struct kernel_hwtstamp_config *config)
+{
+ struct rcar_gen4_ptp_private *ptp_priv;
+ struct rtsn_private *priv;
+
+ if (!netif_running(ndev))
+ return -ENODEV;
+
+ priv = netdev_priv(ndev);
+ ptp_priv = priv->ptp_priv;
+
+ config->flags = 0;
+
+ config->tx_type =
+ ptp_priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+
+ switch (ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE) {
+ case RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT:
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ break;
+ case RCAR_GEN4_RXTSTAMP_TYPE_ALL:
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ config->rx_filter = HWTSTAMP_FILTER_NONE;
+ break;
+ }
+
+ return 0;
+}
+
+static int rtsn_hwtstamp_set(struct net_device *ndev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ struct rcar_gen4_ptp_private *ptp_priv;
+ struct rtsn_private *priv;
+ u32 tstamp_rx_ctrl;
+ u32 tstamp_tx_ctrl;
+
+ if (!netif_running(ndev))
+ return -ENODEV;
+
+ priv = netdev_priv(ndev);
+ ptp_priv = priv->ptp_priv;
+
+ if (config->flags)
+ return -EINVAL;
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ tstamp_tx_ctrl = 0;
+ break;
+ case HWTSTAMP_TX_ON:
+ tstamp_tx_ctrl = RCAR_GEN4_TXTSTAMP_ENABLED;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ tstamp_rx_ctrl = 0;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ tstamp_rx_ctrl = RCAR_GEN4_RXTSTAMP_ENABLED |
+ RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT;
+ break;
+ default:
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
+ tstamp_rx_ctrl = RCAR_GEN4_RXTSTAMP_ENABLED |
+ RCAR_GEN4_RXTSTAMP_TYPE_ALL;
+ break;
+ }
+
+ ptp_priv->tstamp_tx_ctrl = tstamp_tx_ctrl;
+ ptp_priv->tstamp_rx_ctrl = tstamp_rx_ctrl;
+
+ return 0;
+}
+
+static const struct net_device_ops rtsn_netdev_ops = {
+ .ndo_open = rtsn_open,
+ .ndo_stop = rtsn_stop,
+ .ndo_start_xmit = rtsn_start_xmit,
+ .ndo_get_stats64 = rtsn_get_stats64,
+ .ndo_eth_ioctl = rtsn_do_ioctl,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_hwtstamp_set = rtsn_hwtstamp_set,
+ .ndo_hwtstamp_get = rtsn_hwtstamp_get,
+};
+
+static int rtsn_get_ts_info(struct net_device *ndev,
+ struct kernel_ethtool_ts_info *info)
+{
+ struct rtsn_private *priv = netdev_priv(ndev);
+
+ info->phc_index = ptp_clock_index(priv->ptp_priv->clock);
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
+static const struct ethtool_ops rtsn_ethtool_ops = {
+ .nway_reset = phy_ethtool_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_ts_info = rtsn_get_ts_info,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+};
+
+static const struct of_device_id rtsn_match_table[] = {
+ { .compatible = "renesas,r8a779g0-ethertsn", },
+ { /* Sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, rtsn_match_table);
+
+static int rtsn_probe(struct platform_device *pdev)
+{
+ struct rtsn_private *priv;
+ struct net_device *ndev;
+ struct resource *res;
+ int ret;
+
+ ndev = alloc_etherdev_mqs(sizeof(struct rtsn_private), TX_NUM_CHAINS,
+ RX_NUM_CHAINS);
+ if (!ndev)
+ return -ENOMEM;
+
+ priv = netdev_priv(ndev);
+ priv->pdev = pdev;
+ priv->ndev = ndev;
+ priv->ptp_priv = rcar_gen4_ptp_alloc(pdev);
+
+ spin_lock_init(&priv->lock);
+ platform_set_drvdata(pdev, priv);
+
+ priv->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ ret = PTR_ERR(priv->clk);
+ goto error_free;
+ }
+
+ priv->reset = devm_reset_control_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->reset)) {
+ ret = PTR_ERR(priv->reset);
+ goto error_free;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tsnes");
+ if (!res) {
+ dev_err(&pdev->dev, "Can't find tsnes resource\n");
+ ret = -EINVAL;
+ goto error_free;
+ }
+
+ priv->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->base)) {
+ ret = PTR_ERR(priv->base);
+ goto error_free;
+ }
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ ndev->features = NETIF_F_RXCSUM;
+ ndev->hw_features = NETIF_F_RXCSUM;
+ ndev->base_addr = res->start;
+ ndev->netdev_ops = &rtsn_netdev_ops;
+ ndev->ethtool_ops = &rtsn_ethtool_ops;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gptp");
+ if (!res) {
+ dev_err(&pdev->dev, "Can't find gptp resource\n");
+ ret = -EINVAL;
+ goto error_free;
+ }
+
+ priv->ptp_priv->addr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->ptp_priv->addr)) {
+ ret = PTR_ERR(priv->ptp_priv->addr);
+ goto error_free;
+ }
+
+ ret = rtsn_get_phy_params(priv);
+ if (ret)
+ goto error_free;
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+
+ netif_napi_add(ndev, &priv->napi, rtsn_poll);
+
+ rtsn_parse_mac_address(pdev->dev.of_node, ndev);
+
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+
+ device_set_wakeup_capable(&pdev->dev, 1);
+
+ ret = rcar_gen4_ptp_register(priv->ptp_priv, RCAR_GEN4_PTP_REG_LAYOUT,
+ clk_get_rate(priv->clk));
+ if (ret)
+ goto error_pm;
+
+ ret = rtsn_mdio_alloc(priv);
+ if (ret)
+ goto error_ptp;
+
+ ret = register_netdev(ndev);
+ if (ret)
+ goto error_mdio;
+
+ netdev_info(ndev, "MAC address %pM\n", ndev->dev_addr);
+
+ return 0;
+
+error_mdio:
+ rtsn_mdio_free(priv);
+error_ptp:
+ rcar_gen4_ptp_unregister(priv->ptp_priv);
+error_pm:
+ netif_napi_del(&priv->napi);
+ rtsn_change_mode(priv, OCR_OPC_DISABLE);
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+error_free:
+ free_netdev(ndev);
+
+ return ret;
+}
+
+static void rtsn_remove(struct platform_device *pdev)
+{
+ struct rtsn_private *priv = platform_get_drvdata(pdev);
+
+ unregister_netdev(priv->ndev);
+ rtsn_mdio_free(priv);
+ rcar_gen4_ptp_unregister(priv->ptp_priv);
+ rtsn_change_mode(priv, OCR_OPC_DISABLE);
+ netif_napi_del(&priv->napi);
+
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ free_netdev(priv->ndev);
+}
+
+static struct platform_driver rtsn_driver = {
+ .probe = rtsn_probe,
+ .remove = rtsn_remove,
+ .driver = {
+ .name = "rtsn",
+ .of_match_table = rtsn_match_table,
+ }
+};
+module_platform_driver(rtsn_driver);
+
+MODULE_AUTHOR("Phong Hoang, Niklas Söderlund");
+MODULE_DESCRIPTION("Renesas Ethernet-TSN device driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/renesas/rtsn.h b/drivers/net/ethernet/renesas/rtsn.h
new file mode 100644
index 000000000000..3183e80d7e6b
--- /dev/null
+++ b/drivers/net/ethernet/renesas/rtsn.h
@@ -0,0 +1,464 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Renesas Ethernet-TSN device driver
+ *
+ * Copyright (C) 2022 Renesas Electronics Corporation
+ * Copyright (C) 2023 Niklas Söderlund <niklas.soderlund@ragnatech.se>
+ */
+
+#ifndef __RTSN_H__
+#define __RTSN_H__
+
+#include <linux/types.h>
+
+#define AXIBMI 0x0000
+#define TSNMHD 0x1000
+#define RMSO 0x2000
+#define RMRO 0x3800
+
+enum rtsn_reg {
+ AXIWC = AXIBMI + 0x0000,
+ AXIRC = AXIBMI + 0x0004,
+ TDPC0 = AXIBMI + 0x0010,
+ TFT = AXIBMI + 0x0090,
+ TATLS0 = AXIBMI + 0x00a0,
+ TATLS1 = AXIBMI + 0x00a4,
+ TATLR = AXIBMI + 0x00a8,
+ RATLS0 = AXIBMI + 0x00b0,
+ RATLS1 = AXIBMI + 0x00b4,
+ RATLR = AXIBMI + 0x00b8,
+ TSA0 = AXIBMI + 0x00c0,
+ TSS0 = AXIBMI + 0x00c4,
+ TRCR0 = AXIBMI + 0x0140,
+ RIDAUAS0 = AXIBMI + 0x0180,
+ RR = AXIBMI + 0x0200,
+ TATS = AXIBMI + 0x0210,
+ TATSR0 = AXIBMI + 0x0214,
+ TATSR1 = AXIBMI + 0x0218,
+ TATSR2 = AXIBMI + 0x021c,
+ RATS = AXIBMI + 0x0220,
+ RATSR0 = AXIBMI + 0x0224,
+ RATSR1 = AXIBMI + 0x0228,
+ RATSR2 = AXIBMI + 0x022c,
+ RIDASM0 = AXIBMI + 0x0240,
+ RIDASAM0 = AXIBMI + 0x0244,
+ RIDACAM0 = AXIBMI + 0x0248,
+ EIS0 = AXIBMI + 0x0300,
+ EIE0 = AXIBMI + 0x0304,
+ EID0 = AXIBMI + 0x0308,
+ EIS1 = AXIBMI + 0x0310,
+ EIE1 = AXIBMI + 0x0314,
+ EID1 = AXIBMI + 0x0318,
+ TCEIS0 = AXIBMI + 0x0340,
+ TCEIE0 = AXIBMI + 0x0344,
+ TCEID0 = AXIBMI + 0x0348,
+ RFSEIS0 = AXIBMI + 0x04c0,
+ RFSEIE0 = AXIBMI + 0x04c4,
+ RFSEID0 = AXIBMI + 0x04c8,
+ RFEIS0 = AXIBMI + 0x0540,
+ RFEIE0 = AXIBMI + 0x0544,
+ RFEID0 = AXIBMI + 0x0548,
+ RCEIS0 = AXIBMI + 0x05c0,
+ RCEIE0 = AXIBMI + 0x05c4,
+ RCEID0 = AXIBMI + 0x05c8,
+ RIDAOIS = AXIBMI + 0x0640,
+ RIDAOIE = AXIBMI + 0x0644,
+ RIDAOID = AXIBMI + 0x0648,
+ TSFEIS = AXIBMI + 0x06c0,
+ TSFEIE = AXIBMI + 0x06c4,
+ TSFEID = AXIBMI + 0x06c8,
+ TSCEIS = AXIBMI + 0x06d0,
+ TSCEIE = AXIBMI + 0x06d4,
+ TSCEID = AXIBMI + 0x06d8,
+ DIS = AXIBMI + 0x0b00,
+ DIE = AXIBMI + 0x0b04,
+ DID = AXIBMI + 0x0b08,
+ TDIS0 = AXIBMI + 0x0b10,
+ TDIE0 = AXIBMI + 0x0b14,
+ TDID0 = AXIBMI + 0x0b18,
+ RDIS0 = AXIBMI + 0x0b90,
+ RDIE0 = AXIBMI + 0x0b94,
+ RDID0 = AXIBMI + 0x0b98,
+ TSDIS = AXIBMI + 0x0c10,
+ TSDIE = AXIBMI + 0x0c14,
+ TSDID = AXIBMI + 0x0c18,
+ GPOUT = AXIBMI + 0x6000,
+
+ OCR = TSNMHD + 0x0000,
+ OSR = TSNMHD + 0x0004,
+ SWR = TSNMHD + 0x0008,
+ SIS = TSNMHD + 0x000c,
+ GIS = TSNMHD + 0x0010,
+ GIE = TSNMHD + 0x0014,
+ GID = TSNMHD + 0x0018,
+ TIS1 = TSNMHD + 0x0020,
+ TIE1 = TSNMHD + 0x0024,
+ TID1 = TSNMHD + 0x0028,
+ TIS2 = TSNMHD + 0x0030,
+ TIE2 = TSNMHD + 0x0034,
+ TID2 = TSNMHD + 0x0038,
+ RIS = TSNMHD + 0x0040,
+ RIE = TSNMHD + 0x0044,
+ RID = TSNMHD + 0x0048,
+ TGC1 = TSNMHD + 0x0050,
+ TGC2 = TSNMHD + 0x0054,
+ TFS0 = TSNMHD + 0x0060,
+ TCF0 = TSNMHD + 0x0070,
+ TCR1 = TSNMHD + 0x0080,
+ TCR2 = TSNMHD + 0x0084,
+ TCR3 = TSNMHD + 0x0088,
+ TCR4 = TSNMHD + 0x008c,
+ TMS0 = TSNMHD + 0x0090,
+ TSR1 = TSNMHD + 0x00b0,
+ TSR2 = TSNMHD + 0x00b4,
+ TSR3 = TSNMHD + 0x00b8,
+ TSR4 = TSNMHD + 0x00bc,
+ TSR5 = TSNMHD + 0x00c0,
+ RGC = TSNMHD + 0x00d0,
+ RDFCR = TSNMHD + 0x00d4,
+ RCFCR = TSNMHD + 0x00d8,
+ REFCNCR = TSNMHD + 0x00dc,
+ RSR1 = TSNMHD + 0x00e0,
+ RSR2 = TSNMHD + 0x00e4,
+ RSR3 = TSNMHD + 0x00e8,
+ TCIS = TSNMHD + 0x01e0,
+ TCIE = TSNMHD + 0x01e4,
+ TCID = TSNMHD + 0x01e8,
+ TPTPC = TSNMHD + 0x01f0,
+ TTML = TSNMHD + 0x01f4,
+ TTJ = TSNMHD + 0x01f8,
+ TCC = TSNMHD + 0x0200,
+ TCS = TSNMHD + 0x0204,
+ TGS = TSNMHD + 0x020c,
+ TACST0 = TSNMHD + 0x0210,
+ TACST1 = TSNMHD + 0x0214,
+ TACST2 = TSNMHD + 0x0218,
+ TALIT0 = TSNMHD + 0x0220,
+ TALIT1 = TSNMHD + 0x0224,
+ TALIT2 = TSNMHD + 0x0228,
+ TAEN0 = TSNMHD + 0x0230,
+ TAEN1 = TSNMHD + 0x0234,
+ TASFE = TSNMHD + 0x0240,
+ TACLL0 = TSNMHD + 0x0250,
+ TACLL1 = TSNMHD + 0x0254,
+ TACLL2 = TSNMHD + 0x0258,
+ CACC = TSNMHD + 0x0260,
+ CCS = TSNMHD + 0x0264,
+ CAIV0 = TSNMHD + 0x0270,
+ CAUL0 = TSNMHD + 0x0290,
+ TOCST0 = TSNMHD + 0x0300,
+ TOCST1 = TSNMHD + 0x0304,
+ TOCST2 = TSNMHD + 0x0308,
+ TOLIT0 = TSNMHD + 0x0310,
+ TOLIT1 = TSNMHD + 0x0314,
+ TOLIT2 = TSNMHD + 0x0318,
+ TOEN0 = TSNMHD + 0x0320,
+ TOEN1 = TSNMHD + 0x0324,
+ TOSFE = TSNMHD + 0x0330,
+ TCLR0 = TSNMHD + 0x0340,
+ TCLR1 = TSNMHD + 0x0344,
+ TCLR2 = TSNMHD + 0x0348,
+ TSMS = TSNMHD + 0x0350,
+ COCC = TSNMHD + 0x0360,
+ COIV0 = TSNMHD + 0x03b0,
+ COUL0 = TSNMHD + 0x03d0,
+ QSTMACU0 = TSNMHD + 0x0400,
+ QSTMACD0 = TSNMHD + 0x0404,
+ QSTMAMU0 = TSNMHD + 0x0408,
+ QSTMAMD0 = TSNMHD + 0x040c,
+ QSFTVL0 = TSNMHD + 0x0410,
+ QSFTVLM0 = TSNMHD + 0x0414,
+ QSFTMSD0 = TSNMHD + 0x0418,
+ QSFTGMI0 = TSNMHD + 0x041c,
+ QSFTLS = TSNMHD + 0x0600,
+ QSFTLIS = TSNMHD + 0x0604,
+ QSFTLIE = TSNMHD + 0x0608,
+ QSFTLID = TSNMHD + 0x060c,
+ QSMSMC = TSNMHD + 0x0610,
+ QSGTMC = TSNMHD + 0x0614,
+ QSEIS = TSNMHD + 0x0618,
+ QSEIE = TSNMHD + 0x061c,
+ QSEID = TSNMHD + 0x0620,
+ QGACST0 = TSNMHD + 0x0630,
+ QGACST1 = TSNMHD + 0x0634,
+ QGACST2 = TSNMHD + 0x0638,
+ QGALIT1 = TSNMHD + 0x0640,
+ QGALIT2 = TSNMHD + 0x0644,
+ QGAEN0 = TSNMHD + 0x0648,
+ QGAEN1 = TSNMHD + 0x074c,
+ QGIGS = TSNMHD + 0x0650,
+ QGGC = TSNMHD + 0x0654,
+ QGATL0 = TSNMHD + 0x0664,
+ QGATL1 = TSNMHD + 0x0668,
+ QGATL2 = TSNMHD + 0x066c,
+ QGOCST0 = TSNMHD + 0x0670,
+ QGOCST1 = TSNMHD + 0x0674,
+ QGOCST2 = TSNMHD + 0x0678,
+ QGOLIT0 = TSNMHD + 0x067c,
+ QGOLIT1 = TSNMHD + 0x0680,
+ QGOLIT2 = TSNMHD + 0x0684,
+ QGOEN0 = TSNMHD + 0x0688,
+ QGOEN1 = TSNMHD + 0x068c,
+ QGTRO = TSNMHD + 0x0690,
+ QGTR1 = TSNMHD + 0x0694,
+ QGTR2 = TSNMHD + 0x0698,
+ QGFSMS = TSNMHD + 0x069c,
+ QTMIS = TSNMHD + 0x06e0,
+ QTMIE = TSNMHD + 0x06e4,
+ QTMID = TSNMHD + 0x06e8,
+ QMEC = TSNMHD + 0x0700,
+ QMMC = TSNMHD + 0x0704,
+ QRFDC = TSNMHD + 0x0708,
+ QYFDC = TSNMHD + 0x070c,
+ QVTCMC0 = TSNMHD + 0x0710,
+ QMCBSC0 = TSNMHD + 0x0750,
+ QMCIRC0 = TSNMHD + 0x0790,
+ QMEBSC0 = TSNMHD + 0x07d0,
+ QMEIRC0 = TSNMHD + 0x0710,
+ QMCFC = TSNMHD + 0x0850,
+ QMEIS = TSNMHD + 0x0860,
+ QMEIE = TSNMHD + 0x0864,
+ QMEID = TSNMHD + 0x086c,
+ QSMFC0 = TSNMHD + 0x0870,
+ QMSPPC0 = TSNMHD + 0x08b0,
+ QMSRPC0 = TSNMHD + 0x08f0,
+ QGPPC0 = TSNMHD + 0x0930,
+ QGRPC0 = TSNMHD + 0x0950,
+ QMDPC0 = TSNMHD + 0x0970,
+ QMGPC0 = TSNMHD + 0x09b0,
+ QMYPC0 = TSNMHD + 0x09f0,
+ QMRPC0 = TSNMHD + 0x0a30,
+ MQSTMACU = TSNMHD + 0x0a70,
+ MQSTMACD = TSNMHD + 0x0a74,
+ MQSTMAMU = TSNMHD + 0x0a78,
+ MQSTMAMD = TSNMHD + 0x0a7c,
+ MQSFTVL = TSNMHD + 0x0a80,
+ MQSFTVLM = TSNMHD + 0x0a84,
+ MQSFTMSD = TSNMHD + 0x0a88,
+ MQSFTGMI = TSNMHD + 0x0a8c,
+
+ CFCR0 = RMSO + 0x0800,
+ FMSCR = RMSO + 0x0c10,
+
+ MMC = RMRO + 0x0000,
+ MPSM = RMRO + 0x0010,
+ MPIC = RMRO + 0x0014,
+ MTFFC = RMRO + 0x0020,
+ MTPFC = RMRO + 0x0024,
+ MTATC0 = RMRO + 0x0040,
+ MRGC = RMRO + 0x0080,
+ MRMAC0 = RMRO + 0x0084,
+ MRMAC1 = RMRO + 0x0088,
+ MRAFC = RMRO + 0x008c,
+ MRSCE = RMRO + 0x0090,
+ MRSCP = RMRO + 0x0094,
+ MRSCC = RMRO + 0x0098,
+ MRFSCE = RMRO + 0x009c,
+ MRFSCP = RMRO + 0x00a0,
+ MTRC = RMRO + 0x00a4,
+ MPFC = RMRO + 0x0100,
+ MLVC = RMRO + 0x0340,
+ MEEEC = RMRO + 0x0350,
+ MLBC = RMRO + 0x0360,
+ MGMR = RMRO + 0x0400,
+ MMPFTCT = RMRO + 0x0410,
+ MAPFTCT = RMRO + 0x0414,
+ MPFRCT = RMRO + 0x0418,
+ MFCICT = RMRO + 0x041c,
+ MEEECT = RMRO + 0x0420,
+ MEIS = RMRO + 0x0500,
+ MEIE = RMRO + 0x0504,
+ MEID = RMRO + 0x0508,
+ MMIS0 = RMRO + 0x0510,
+ MMIE0 = RMRO + 0x0514,
+ MMID0 = RMRO + 0x0518,
+ MMIS1 = RMRO + 0x0520,
+ MMIE1 = RMRO + 0x0524,
+ MMID1 = RMRO + 0x0528,
+ MMIS2 = RMRO + 0x0530,
+ MMIE2 = RMRO + 0x0534,
+ MMID2 = RMRO + 0x0538,
+ MXMS = RMRO + 0x0600,
+
+};
+
+/* AXIBMI */
+#define RR_RATRR BIT(0)
+#define RR_TATRR BIT(1)
+#define RR_RST (RR_RATRR | RR_TATRR)
+#define RR_RST_COMPLETE 0x03
+
+#define AXIWC_DEFAULT 0xffff
+#define AXIRC_DEFAULT 0xffff
+
+#define TATLS0_TEDE BIT(1)
+#define TATLS0_TATEN_SHIFT 24
+#define TATLS0_TATEN(n) ((n) << TATLS0_TATEN_SHIFT)
+#define TATLR_TATL BIT(31)
+
+#define RATLS0_RETS BIT(2)
+#define RATLS0_REDE BIT(3)
+#define RATLS0_RATEN_SHIFT 24
+#define RATLS0_RATEN(n) ((n) << RATLS0_RATEN_SHIFT)
+#define RATLR_RATL BIT(31)
+
+#define DIE_DID_TDICX(n) BIT((n))
+#define DIE_DID_RDICX(n) BIT((n) + 8)
+#define TDIE_TDID_TDX(n) BIT(n)
+#define RDIE_RDID_RDX(n) BIT(n)
+#define TDIS_TDS(n) BIT(n)
+#define RDIS_RDS(n) BIT(n)
+
+/* MHD */
+#define OSR_OPS 0x07
+#define SWR_SWR BIT(0)
+
+#define TGC1_TQTM_SFM 0xff00
+#define TGC1_STTV_DEFAULT 0x03
+
+#define TMS_MFS_MAX 0x2800
+
+/* RMAC System */
+#define CFCR_SDID(n) ((n) << 16)
+#define FMSCR_FMSIE(n) ((n) << 0)
+
+/* RMAC */
+#define MPIC_PIS_MASK GENMASK(1, 0)
+#define MPIC_PIS_MII 0
+#define MPIC_PIS_RMII 0x01
+#define MPIC_PIS_GMII 0x02
+#define MPIC_PIS_RGMII 0x03
+#define MPIC_LSC_SHIFT 2
+#define MPIC_LSC_MASK GENMASK(3, MPIC_LSC_SHIFT)
+#define MPIC_LSC_10M (0 << MPIC_LSC_SHIFT)
+#define MPIC_LSC_100M (0x01 << MPIC_LSC_SHIFT)
+#define MPIC_LSC_1G (0x02 << MPIC_LSC_SHIFT)
+#define MPIC_PSMCS_SHIFT 16
+#define MPIC_PSMCS_MASK GENMASK(21, MPIC_PSMCS_SHIFT)
+#define MPIC_PSMCS_DEFAULT (0x0a << MPIC_PSMCS_SHIFT)
+#define MPIC_PSMHT_SHIFT 24
+#define MPIC_PSMHT_MASK GENMASK(26, MPIC_PSMHT_SHIFT)
+#define MPIC_PSMHT_DEFAULT (0x07 << MPIC_PSMHT_SHIFT)
+
+#define MLVC_PASE BIT(8)
+#define MLVC_PSE BIT(16)
+#define MLVC_PLV BIT(17)
+
+#define MPSM_PSME BIT(0)
+#define MPSM_PSMAD BIT(1)
+#define MPSM_PDA_SHIFT 3
+#define MPSM_PDA_MASK GENMASK(7, 3)
+#define MPSM_PDA(n) (((n) << MPSM_PDA_SHIFT) & MPSM_PDA_MASK)
+#define MPSM_PRA_SHIFT 8
+#define MPSM_PRA_MASK GENMASK(12, 8)
+#define MPSM_PRA(n) (((n) << MPSM_PRA_SHIFT) & MPSM_PRA_MASK)
+#define MPSM_PRD_SHIFT 16
+#define MPSM_PRD_SET(n) ((n) << MPSM_PRD_SHIFT)
+#define MPSM_PRD_GET(n) ((n) >> MPSM_PRD_SHIFT)
+
+#define GPOUT_RDM BIT(13)
+#define GPOUT_TDM BIT(14)
+
+/* RTSN */
+#define RTSN_INTERVAL_US 1000
+#define RTSN_TIMEOUT_US 1000000
+
+#define TX_NUM_CHAINS 1
+#define RX_NUM_CHAINS 1
+
+#define TX_CHAIN_SIZE 1024
+#define RX_CHAIN_SIZE 1024
+
+#define TX_CHAIN_IDX 0
+#define RX_CHAIN_IDX 0
+
+#define TX_CHAIN_ADDR_OFFSET (sizeof(struct rtsn_desc) * TX_CHAIN_IDX)
+#define RX_CHAIN_ADDR_OFFSET (sizeof(struct rtsn_desc) * RX_CHAIN_IDX)
+
+#define PKT_BUF_SZ 1584
+#define RTSN_ALIGN 128
+
+enum rtsn_mode {
+ OCR_OPC_DISABLE,
+ OCR_OPC_CONFIG,
+ OCR_OPC_OPERATION,
+};
+
+/* Descriptors */
+enum RX_DS_CC_BIT {
+ RX_DS = 0x0fff, /* Data size */
+ RX_TR = 0x1000, /* Truncation indication */
+ RX_EI = 0x2000, /* Error indication */
+ RX_PS = 0xc000, /* Padding selection */
+};
+
+enum TX_FS_TAGL_BIT {
+ TX_DS = 0x0fff, /* Data size */
+ TX_TAGL = 0xf000, /* Frame tag LSBs */
+};
+
+enum DIE_DT {
+ /* HW/SW arbitration */
+ DT_FEMPTY_IS = 0x10,
+ DT_FEMPTY_IC = 0x20,
+ DT_FEMPTY_ND = 0x30,
+ DT_FEMPTY = 0x40,
+ DT_FEMPTY_START = 0x50,
+ DT_FEMPTY_MID = 0x60,
+ DT_FEMPTY_END = 0x70,
+
+ /* Frame data */
+ DT_FSINGLE = 0x80,
+ DT_FSTART = 0x90,
+ DT_FMID = 0xa0,
+ DT_FEND = 0xb0,
+
+ /* Chain control */
+ DT_LEMPTY = 0xc0,
+ DT_EEMPTY = 0xd0,
+ DT_LINK = 0xe0,
+ DT_EOS = 0xf0,
+
+ DT_MASK = 0xf0,
+ D_DIE = 0x08,
+};
+
+struct rtsn_desc {
+ __le16 info_ds;
+ __u8 info;
+ u8 die_dt;
+ __le32 dptr;
+} __packed;
+
+struct rtsn_ts_desc {
+ __le16 info_ds;
+ __u8 info;
+ u8 die_dt;
+ __le32 dptr;
+ __le32 ts_nsec;
+ __le32 ts_sec;
+} __packed;
+
+struct rtsn_ext_desc {
+ __le16 info_ds;
+ __u8 info;
+ u8 die_dt;
+ __le32 dptr;
+ __le64 info1;
+} __packed;
+
+struct rtsn_ext_ts_desc {
+ __le16 info_ds;
+ __u8 info;
+ u8 die_dt;
+ __le32 dptr;
+ __le64 info1;
+ __le32 ts_nsec;
+ __le32 ts_sec;
+} __packed;
+
+enum EXT_INFO_DS_BIT {
+ TXC = 0x4000,
+};
+
+#endif
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 475e1e8c1d35..5fc8027c92c7 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -50,7 +50,7 @@
* the macros available to do this only define GCC 8.
*/
__diag_push();
-__diag_ignore(GCC, 8, "-Woverride-init",
+__diag_ignore_all("-Woverride-init",
"logic to initialize all and then override some is OK");
static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
SH_ETH_OFFSET_DEFAULTS,
@@ -2624,7 +2624,7 @@ static int sh_eth_change_mtu(struct net_device *ndev, int new_mtu)
if (netif_running(ndev))
return -EBUSY;
- ndev->mtu = new_mtu;
+ WRITE_ONCE(ndev->mtu, new_mtu);
netdev_update_features(ndev);
return 0;
@@ -3494,10 +3494,12 @@ static int sh_eth_suspend(struct device *dev)
netif_device_detach(ndev);
+ rtnl_lock();
if (mdp->wol_enabled)
ret = sh_eth_wol_setup(ndev);
else
ret = sh_eth_close(ndev);
+ rtnl_unlock();
return ret;
}
@@ -3511,10 +3513,12 @@ static int sh_eth_resume(struct device *dev)
if (!netif_running(ndev))
return 0;
+ rtnl_lock();
if (mdp->wol_enabled)
ret = sh_eth_wol_restore(ndev);
else
ret = sh_eth_open(ndev);
+ rtnl_unlock();
if (ret < 0)
return ret;
@@ -3560,7 +3564,7 @@ MODULE_DEVICE_TABLE(platform, sh_eth_id_table);
static struct platform_driver sh_eth_driver = {
.probe = sh_eth_drv_probe,
- .remove_new = sh_eth_drv_remove,
+ .remove = sh_eth_drv_remove,
.id_table = sh_eth_id_table,
.driver = {
.name = CARDNAME,