summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/hisilicon/hns/hns_enet.c
diff options
context:
space:
mode:
authorSalil <salil.mehta@huawei.com>2015-12-03 12:17:53 +0000
committerDavid S. Miller <davem@davemloft.net>2015-12-04 14:36:15 -0500
commit13ac695e7ea16cb27b804fadf2ff569dbcab6af1 (patch)
treecbe37e07ae5e30dbf4d835c56be76317b0e4d11d /drivers/net/ethernet/hisilicon/hns/hns_enet.c
parentce3ea1c705761fc73ce4a08f301c93fcba39c58a (diff)
net:hns: Add support of Hip06 SoC to the Hislicon Network Subsystem
This patchset adds support of Hisilicon Hip06 SoC to the existing HNS ethernet driver. The changes in the driver are mainly due to changes in the DMA descriptor provided by the Hip06 ethernet hardware. These changes need to co-exist with already present Hip05 DMA descriptor and its operating functions. The decision to choose the correct type of DMA descriptor is taken dynamically depending upon the version of the hardware (i.e. V1/hip05 or V2/hip06, see already existing hisilicon-hns-nic.txt binding file for detailed description). other changes includes in SBM, DSAF and PPE modules as well. Changes affecting the driver related to the newly added ethernet hardware features in Hip06 would be added as separate patch over this and subsequent patches. Signed-off-by: Salil Mehta <salil.mehta@huawei.com> Signed-off-by: yankejian <yankejian@huawei.com> Signed-off-by: huangdaode <huangdaode@hisilicon.com> Signed-off-by: lipeng <lipeng321@huawei.com> Signed-off-by: lisheng <lisheng011@huawei.com> Signed-off-by: Fengguang Wu <fengguang.wu@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/hisilicon/hns/hns_enet.c')
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c360
1 files changed, 288 insertions, 72 deletions
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 08cef0dfb5db..0ca7fa90a193 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -34,9 +34,103 @@
#define RCB_IRQ_NOT_INITED 0
#define RCB_IRQ_INITED 1
+#define BD_MAX_SEND_SIZE 8191
+#define SKB_TMP_LEN(SKB) \
+ (((SKB)->transport_header - (SKB)->mac_header) + tcp_hdrlen(SKB))
+
+static void fill_v2_desc(struct hnae_ring *ring, void *priv,
+ int size, dma_addr_t dma, int frag_end,
+ int buf_num, enum hns_desc_type type, int mtu)
+{
+ struct hnae_desc *desc = &ring->desc[ring->next_to_use];
+ struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
+ struct iphdr *iphdr;
+ struct ipv6hdr *ipv6hdr;
+ struct sk_buff *skb;
+ int skb_tmp_len;
+ __be16 protocol;
+ u8 bn_pid = 0;
+ u8 rrcfv = 0;
+ u8 ip_offset = 0;
+ u8 tvsvsn = 0;
+ u16 mss = 0;
+ u8 l4_len = 0;
+ u16 paylen = 0;
+
+ desc_cb->priv = priv;
+ desc_cb->length = size;
+ desc_cb->dma = dma;
+ desc_cb->type = type;
+
+ desc->addr = cpu_to_le64(dma);
+ desc->tx.send_size = cpu_to_le16((u16)size);
+
+ /*config bd buffer end */
+ hnae_set_bit(rrcfv, HNSV2_TXD_VLD_B, 1);
+ hnae_set_field(bn_pid, HNSV2_TXD_BUFNUM_M, 0, buf_num - 1);
+
+ if (type == DESC_TYPE_SKB) {
+ skb = (struct sk_buff *)priv;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ skb_reset_mac_len(skb);
+ protocol = skb->protocol;
+ ip_offset = ETH_HLEN;
+
+ if (protocol == htons(ETH_P_8021Q)) {
+ ip_offset += VLAN_HLEN;
+ protocol = vlan_get_protocol(skb);
+ skb->protocol = protocol;
+ }
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ iphdr = ip_hdr(skb);
+ hnae_set_bit(rrcfv, HNSV2_TXD_L3CS_B, 1);
+ hnae_set_bit(rrcfv, HNSV2_TXD_L4CS_B, 1);
+
+ /* check for tcp/udp header */
+ if (iphdr->protocol == IPPROTO_TCP) {
+ hnae_set_bit(tvsvsn,
+ HNSV2_TXD_TSE_B, 1);
+ skb_tmp_len = SKB_TMP_LEN(skb);
+ l4_len = tcp_hdrlen(skb);
+ mss = mtu - skb_tmp_len - ETH_FCS_LEN;
+ paylen = skb->len - skb_tmp_len;
+ }
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ hnae_set_bit(tvsvsn, HNSV2_TXD_IPV6_B, 1);
+ ipv6hdr = ipv6_hdr(skb);
+ hnae_set_bit(rrcfv, HNSV2_TXD_L4CS_B, 1);
+
+ /* check for tcp/udp header */
+ if (ipv6hdr->nexthdr == IPPROTO_TCP) {
+ hnae_set_bit(tvsvsn,
+ HNSV2_TXD_TSE_B, 1);
+ skb_tmp_len = SKB_TMP_LEN(skb);
+ l4_len = tcp_hdrlen(skb);
+ mss = mtu - skb_tmp_len - ETH_FCS_LEN;
+ paylen = skb->len - skb_tmp_len;
+ }
+ }
+ desc->tx.ip_offset = ip_offset;
+ desc->tx.tse_vlan_snap_v6_sctp_nth = tvsvsn;
+ desc->tx.mss = cpu_to_le16(mss);
+ desc->tx.l4_len = l4_len;
+ desc->tx.paylen = cpu_to_le16(paylen);
+ }
+ }
+
+ hnae_set_bit(rrcfv, HNSV2_TXD_FE_B, frag_end);
+
+ desc->tx.bn_pid = bn_pid;
+ desc->tx.ra_ri_cs_fe_vld = rrcfv;
+
+ ring_ptr_move_fw(ring, next_to_use);
+}
+
static void fill_desc(struct hnae_ring *ring, void *priv,
int size, dma_addr_t dma, int frag_end,
- int buf_num, enum hns_desc_type type)
+ int buf_num, enum hns_desc_type type, int mtu)
{
struct hnae_desc *desc = &ring->desc[ring->next_to_use];
struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
@@ -100,47 +194,64 @@ static void unfill_desc(struct hnae_ring *ring)
ring_ptr_move_bw(ring, next_to_use);
}
-int hns_nic_net_xmit_hw(struct net_device *ndev,
- struct sk_buff *skb,
- struct hns_nic_ring_data *ring_data)
+static int hns_nic_maybe_stop_tx(
+ struct sk_buff **out_skb, int *bnum, struct hnae_ring *ring)
{
- struct hns_nic_priv *priv = netdev_priv(ndev);
- struct device *dev = priv->dev;
- struct hnae_ring *ring = ring_data->ring;
- struct netdev_queue *dev_queue;
- struct skb_frag_struct *frag;
+ struct sk_buff *skb = *out_skb;
+ struct sk_buff *new_skb = NULL;
int buf_num;
- dma_addr_t dma;
- int size, next_to_use;
- int i, j;
- struct sk_buff *new_skb;
-
- assert(ring->max_desc_num_per_pkt <= ring->desc_num);
/* no. of segments (plus a header) */
buf_num = skb_shinfo(skb)->nr_frags + 1;
if (unlikely(buf_num > ring->max_desc_num_per_pkt)) {
- if (ring_space(ring) < 1) {
- ring->stats.tx_busy++;
- goto out_net_tx_busy;
- }
+ if (ring_space(ring) < 1)
+ return -EBUSY;
new_skb = skb_copy(skb, GFP_ATOMIC);
- if (!new_skb) {
- ring->stats.sw_err_cnt++;
- netdev_err(ndev, "no memory to xmit!\n");
- goto out_err_tx_ok;
- }
+ if (!new_skb)
+ return -ENOMEM;
dev_kfree_skb_any(skb);
- skb = new_skb;
+ *out_skb = new_skb;
buf_num = 1;
- assert(skb_shinfo(skb)->nr_frags == 1);
} else if (buf_num > ring_space(ring)) {
+ return -EBUSY;
+ }
+
+ *bnum = buf_num;
+ return 0;
+}
+
+int hns_nic_net_xmit_hw(struct net_device *ndev,
+ struct sk_buff *skb,
+ struct hns_nic_ring_data *ring_data)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct device *dev = priv->dev;
+ struct hnae_ring *ring = ring_data->ring;
+ struct netdev_queue *dev_queue;
+ struct skb_frag_struct *frag;
+ int buf_num;
+ int seg_num;
+ dma_addr_t dma;
+ int size, next_to_use;
+ int i;
+
+ switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) {
+ case -EBUSY:
ring->stats.tx_busy++;
goto out_net_tx_busy;
+ case -ENOMEM:
+ ring->stats.sw_err_cnt++;
+ netdev_err(ndev, "no memory to xmit!\n");
+ goto out_err_tx_ok;
+ default:
+ break;
}
+
+ /* no. of segments (plus a header) */
+ seg_num = skb_shinfo(skb)->nr_frags + 1;
next_to_use = ring->next_to_use;
/* fill the first part */
@@ -151,11 +262,11 @@ int hns_nic_net_xmit_hw(struct net_device *ndev,
ring->stats.sw_err_cnt++;
goto out_err_tx_ok;
}
- fill_desc(ring, skb, size, dma, buf_num == 1 ? 1 : 0, buf_num,
- DESC_TYPE_SKB);
+ priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0,
+ buf_num, DESC_TYPE_SKB, ndev->mtu);
/* fill the fragments */
- for (i = 1; i < buf_num; i++) {
+ for (i = 1; i < seg_num; i++) {
frag = &skb_shinfo(skb)->frags[i - 1];
size = skb_frag_size(frag);
dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
@@ -164,8 +275,9 @@ int hns_nic_net_xmit_hw(struct net_device *ndev,
ring->stats.sw_err_cnt++;
goto out_map_frag_fail;
}
- fill_desc(ring, skb_frag_page(frag), size, dma,
- buf_num - 1 == i ? 1 : 0, buf_num, DESC_TYPE_PAGE);
+ priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma,
+ seg_num - 1 == i ? 1 : 0, buf_num,
+ DESC_TYPE_PAGE, ndev->mtu);
}
/*complete translate all packets*/
@@ -182,19 +294,20 @@ int hns_nic_net_xmit_hw(struct net_device *ndev,
out_map_frag_fail:
- for (j = i - 1; j > 0; j--) {
+ while (ring->next_to_use != next_to_use) {
unfill_desc(ring);
- next_to_use = ring->next_to_use;
- dma_unmap_page(dev, ring->desc_cb[next_to_use].dma,
- ring->desc_cb[next_to_use].length,
- DMA_TO_DEVICE);
+ if (ring->next_to_use != next_to_use)
+ dma_unmap_page(dev,
+ ring->desc_cb[ring->next_to_use].dma,
+ ring->desc_cb[ring->next_to_use].length,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_single(dev,
+ ring->desc_cb[next_to_use].dma,
+ ring->desc_cb[next_to_use].length,
+ DMA_TO_DEVICE);
}
- unfill_desc(ring);
- next_to_use = ring->next_to_use;
- dma_unmap_single(dev, ring->desc_cb[next_to_use].dma,
- ring->desc_cb[next_to_use].length, DMA_TO_DEVICE);
-
out_err_tx_ok:
dev_kfree_skb_any(skb);
@@ -329,11 +442,24 @@ hns_nic_reuse_page(struct hnae_desc_cb *desc_cb, int tsize, int last_offset)
}
}
+static void get_v2rx_desc_bnum(u32 bnum_flag, int *out_bnum)
+{
+ *out_bnum = hnae_get_field(bnum_flag,
+ HNS_RXD_BUFNUM_M, HNS_RXD_BUFNUM_S) + 1;
+}
+
+static void get_rx_desc_bnum(u32 bnum_flag, int *out_bnum)
+{
+ *out_bnum = hnae_get_field(bnum_flag,
+ HNS_RXD_BUFNUM_M, HNS_RXD_BUFNUM_S);
+}
+
static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data,
struct sk_buff **out_skb, int *out_bnum)
{
struct hnae_ring *ring = ring_data->ring;
struct net_device *ndev = ring_data->napi.dev;
+ struct hns_nic_priv *priv = netdev_priv(ndev);
struct sk_buff *skb;
struct hnae_desc *desc;
struct hnae_desc_cb *desc_cb;
@@ -345,19 +471,36 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data,
last_offset = hnae_page_size(ring) - hnae_buf_size(ring);
desc = &ring->desc[ring->next_to_clean];
desc_cb = &ring->desc_cb[ring->next_to_clean];
- length = le16_to_cpu(desc->rx.pkt_len);
- bnum_flag = le32_to_cpu(desc->rx.ipoff_bnum_pid_flag);
- bnum = hnae_get_field(bnum_flag, HNS_RXD_BUFNUM_M, HNS_RXD_BUFNUM_S);
- *out_bnum = bnum;
+
+ prefetch(desc);
+
va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
- skb = *out_skb = napi_alloc_skb(&ring_data->napi, HNS_RX_HEAD_SIZE);
+ /* prefetch first cache line of first page */
+ prefetch(va);
+#if L1_CACHE_BYTES < 128
+ prefetch(va + L1_CACHE_BYTES);
+#endif
+
+ skb = *out_skb = napi_alloc_skb(&ring_data->napi,
+ HNS_RX_HEAD_SIZE);
if (unlikely(!skb)) {
netdev_err(ndev, "alloc rx skb fail\n");
ring->stats.sw_err_cnt++;
return -ENOMEM;
}
+ length = le16_to_cpu(desc->rx.pkt_len);
+ bnum_flag = le32_to_cpu(desc->rx.ipoff_bnum_pid_flag);
+ priv->ops.get_rxd_bnum(bnum_flag, &bnum);
+ *out_bnum = bnum;
+
+ /* we will be copying header into skb->data in
+ * pskb_may_pull so it is in our interest to prefetch
+ * it now to avoid a possible cache miss
+ */
+ prefetchw(skb->data);
+
if (length <= HNS_RX_HEAD_SIZE) {
memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
@@ -540,20 +683,19 @@ recv:
}
/* make all data has been write before submit */
- if (clean_count > 0) {
- hns_nic_alloc_rx_buffers(ring_data, clean_count);
- clean_count = 0;
- }
-
if (recv_pkts < budget) {
ex_num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
rmb(); /*complete read rx ring bd number*/
- if (ex_num > 0) {
- num += ex_num;
+ if (ex_num > clean_count) {
+ num += ex_num - clean_count;
goto recv;
}
}
+ /* make all data has been write before submit */
+ if (clean_count > 0)
+ hns_nic_alloc_rx_buffers(ring_data, clean_count);
+
return recv_pkts;
}
@@ -650,6 +792,9 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
netdev_tx_completed_queue(dev_queue, pkts, bytes);
+ if (unlikely(priv->link && !netif_carrier_ok(ndev)))
+ netif_carrier_on(ndev);
+
if (unlikely(pkts && netif_carrier_ok(ndev) &&
(ring_space(ring) >= ring->max_desc_num_per_pkt * 2))) {
/* Make sure that anybody stopping the queue after this
@@ -848,15 +993,58 @@ static void hns_nic_ring_close(struct net_device *netdev, int idx)
napi_disable(&priv->ring_data[idx].napi);
}
-static int hns_nic_init_irq(struct hns_nic_priv *priv)
+static void hns_set_irq_affinity(struct hns_nic_priv *priv)
{
struct hnae_handle *h = priv->ae_handle;
struct hns_nic_ring_data *rd;
int i;
- int ret;
int cpu;
cpumask_t mask;
+ /*diffrent irq banlance for 16core and 32core*/
+ if (h->q_num == num_possible_cpus()) {
+ for (i = 0; i < h->q_num * 2; i++) {
+ rd = &priv->ring_data[i];
+ if (cpu_online(rd->queue_index)) {
+ cpumask_clear(&mask);
+ cpu = rd->queue_index;
+ cpumask_set_cpu(cpu, &mask);
+ (void)irq_set_affinity_hint(rd->ring->irq,
+ &mask);
+ }
+ }
+ } else {
+ for (i = 0; i < h->q_num; i++) {
+ rd = &priv->ring_data[i];
+ if (cpu_online(rd->queue_index * 2)) {
+ cpumask_clear(&mask);
+ cpu = rd->queue_index * 2;
+ cpumask_set_cpu(cpu, &mask);
+ (void)irq_set_affinity_hint(rd->ring->irq,
+ &mask);
+ }
+ }
+
+ for (i = h->q_num; i < h->q_num * 2; i++) {
+ rd = &priv->ring_data[i];
+ if (cpu_online(rd->queue_index * 2 + 1)) {
+ cpumask_clear(&mask);
+ cpu = rd->queue_index * 2 + 1;
+ cpumask_set_cpu(cpu, &mask);
+ (void)irq_set_affinity_hint(rd->ring->irq,
+ &mask);
+ }
+ }
+ }
+}
+
+static int hns_nic_init_irq(struct hns_nic_priv *priv)
+{
+ struct hnae_handle *h = priv->ae_handle;
+ struct hns_nic_ring_data *rd;
+ int i;
+ int ret;
+
for (i = 0; i < h->q_num * 2; i++) {
rd = &priv->ring_data[i];
@@ -878,16 +1066,11 @@ static int hns_nic_init_irq(struct hns_nic_priv *priv)
}
disable_irq(rd->ring->irq);
rd->ring->irq_init_flag = RCB_IRQ_INITED;
-
- /*set cpu affinity*/
- if (cpu_online(rd->queue_index)) {
- cpumask_clear(&mask);
- cpu = rd->queue_index;
- cpumask_set_cpu(cpu, &mask);
- irq_set_affinity_hint(rd->ring->irq, &mask);
- }
}
+ /*set cpu affinity*/
+ hns_set_irq_affinity(priv);
+
return 0;
}
@@ -1315,22 +1498,26 @@ static void hns_nic_reset_subtask(struct hns_nic_priv *priv)
return;
hns_nic_dump(priv);
- netdev_info(priv->netdev, "Reset %s port\n",
- (type == HNAE_PORT_DEBUG ? "debug" : "business"));
+ netdev_info(priv->netdev, "try to reset %s port!\n",
+ (type == HNAE_PORT_DEBUG ? "debug" : "service"));
rtnl_lock();
/* put off any impending NetWatchDogTimeout */
priv->netdev->trans_start = jiffies;
- if (type == HNAE_PORT_DEBUG)
+ if (type == HNAE_PORT_DEBUG) {
hns_nic_net_reinit(priv->netdev);
+ } else {
+ netif_carrier_off(priv->netdev);
+ netif_tx_disable(priv->netdev);
+ }
rtnl_unlock();
}
/* for doing service complete*/
static void hns_nic_service_event_complete(struct hns_nic_priv *priv)
{
- assert(!test_bit(NIC_STATE_SERVICE_SCHED, &priv->state));
+ WARN_ON(!test_bit(NIC_STATE_SERVICE_SCHED, &priv->state));
smp_mb__before_atomic();
clear_bit(NIC_STATE_SERVICE_SCHED, &priv->state);
@@ -1435,8 +1622,9 @@ static void hns_nic_uninit_ring_data(struct hns_nic_priv *priv)
for (i = 0; i < h->q_num * 2; i++) {
netif_napi_del(&priv->ring_data[i].napi);
if (priv->ring_data[i].ring->irq_init_flag == RCB_IRQ_INITED) {
- irq_set_affinity_hint(priv->ring_data[i].ring->irq,
- NULL);
+ (void)irq_set_affinity_hint(
+ priv->ring_data[i].ring->irq,
+ NULL);
free_irq(priv->ring_data[i].ring->irq,
&priv->ring_data[i]);
}
@@ -1446,6 +1634,21 @@ static void hns_nic_uninit_ring_data(struct hns_nic_priv *priv)
kfree(priv->ring_data);
}
+static void hns_nic_set_priv_ops(struct net_device *netdev)
+{
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+
+ if (AE_IS_VER1(priv->enet_ver)) {
+ priv->ops.fill_desc = fill_desc;
+ priv->ops.get_rxd_bnum = get_rx_desc_bnum;
+ priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
+ } else {
+ priv->ops.get_rxd_bnum = get_v2rx_desc_bnum;
+ priv->ops.fill_desc = fill_v2_desc;
+ priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
+ }
+}
+
static int hns_nic_try_get_ae(struct net_device *ndev)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
@@ -1473,6 +1676,8 @@ static int hns_nic_try_get_ae(struct net_device *ndev)
goto out_init_ring_data;
}
+ hns_nic_set_priv_ops(ndev);
+
ret = register_netdev(ndev);
if (ret) {
dev_err(priv->dev, "probe register netdev fail!\n");
@@ -1524,10 +1729,10 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
priv->dev = dev;
priv->netdev = ndev;
- if (of_device_is_compatible(node, "hisilicon,hns-nic-v2"))
- priv->enet_ver = AE_VERSION_2;
- else
+ if (of_device_is_compatible(node, "hisilicon,hns-nic-v1"))
priv->enet_ver = AE_VERSION_1;
+ else
+ priv->enet_ver = AE_VERSION_2;
ret = of_property_read_string(node, "ae-name", &priv->ae_name);
if (ret)
@@ -1543,6 +1748,7 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
ndev->priv_flags |= IFF_UNICAST_FLT;
ndev->netdev_ops = &hns_nic_netdev_ops;
hns_ethtool_set_ops(ndev);
+
ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
NETIF_F_GRO;
@@ -1550,6 +1756,16 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
ndev->vlan_features |= NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
+ switch (priv->enet_ver) {
+ case AE_VERSION_2:
+ ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
+ NETIF_F_GRO;
+ break;
+ default:
+ break;
+ }
+
SET_NETDEV_DEV(ndev, dev);
if (!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))