summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/hisilicon/hns3/hns3_enet.c')
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c181
1 files changed, 107 insertions, 74 deletions
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 19668a8d22f7..bfa5568baa92 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -11,6 +11,7 @@
#include <linux/irq.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
+#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/skbuff.h>
@@ -473,20 +474,14 @@ static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector,
writel(mask_en, tqp_vector->mask_addr);
}
-static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector)
+static void hns3_irq_enable(struct hns3_enet_tqp_vector *tqp_vector)
{
napi_enable(&tqp_vector->napi);
enable_irq(tqp_vector->vector_irq);
-
- /* enable vector */
- hns3_mask_vector_irq(tqp_vector, 1);
}
-static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
+static void hns3_irq_disable(struct hns3_enet_tqp_vector *tqp_vector)
{
- /* disable vector */
- hns3_mask_vector_irq(tqp_vector, 0);
-
disable_irq(tqp_vector->vector_irq);
napi_disable(&tqp_vector->napi);
cancel_work_sync(&tqp_vector->rx_group.dim.work);
@@ -553,9 +548,9 @@ void hns3_set_vector_coalesce_rx_ql(struct hns3_enet_tqp_vector *tqp_vector,
static void hns3_vector_coalesce_init(struct hns3_enet_tqp_vector *tqp_vector,
struct hns3_nic_priv *priv)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal;
struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal;
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle);
struct hns3_enet_coalesce *ptx_coal = &priv->tx_coal;
struct hns3_enet_coalesce *prx_coal = &priv->rx_coal;
@@ -707,11 +702,42 @@ static int hns3_set_rx_cpu_rmap(struct net_device *netdev)
return 0;
}
+static void hns3_enable_irqs_and_tqps(struct net_device *netdev)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
+ u16 i;
+
+ for (i = 0; i < priv->vector_num; i++)
+ hns3_irq_enable(&priv->tqp_vector[i]);
+
+ for (i = 0; i < priv->vector_num; i++)
+ hns3_mask_vector_irq(&priv->tqp_vector[i], 1);
+
+ for (i = 0; i < h->kinfo.num_tqps; i++)
+ hns3_tqp_enable(h->kinfo.tqp[i]);
+}
+
+static void hns3_disable_irqs_and_tqps(struct net_device *netdev)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
+ u16 i;
+
+ for (i = 0; i < h->kinfo.num_tqps; i++)
+ hns3_tqp_disable(h->kinfo.tqp[i]);
+
+ for (i = 0; i < priv->vector_num; i++)
+ hns3_mask_vector_irq(&priv->tqp_vector[i], 0);
+
+ for (i = 0; i < priv->vector_num; i++)
+ hns3_irq_disable(&priv->tqp_vector[i]);
+}
+
static int hns3_nic_net_up(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = priv->ae_handle;
- int i, j;
int ret;
ret = hns3_nic_reset_all_ring(h);
@@ -720,23 +746,13 @@ static int hns3_nic_net_up(struct net_device *netdev)
clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
- /* enable the vectors */
- for (i = 0; i < priv->vector_num; i++)
- hns3_vector_enable(&priv->tqp_vector[i]);
-
- /* enable rcb */
- for (j = 0; j < h->kinfo.num_tqps; j++)
- hns3_tqp_enable(h->kinfo.tqp[j]);
+ hns3_enable_irqs_and_tqps(netdev);
/* start the ae_dev */
ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
if (ret) {
set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
- while (j--)
- hns3_tqp_disable(h->kinfo.tqp[j]);
-
- for (j = i - 1; j >= 0; j--)
- hns3_vector_disable(&priv->tqp_vector[j]);
+ hns3_disable_irqs_and_tqps(netdev);
}
return ret;
@@ -823,17 +839,9 @@ static void hns3_reset_tx_queue(struct hnae3_handle *h)
static void hns3_nic_net_down(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = hns3_get_handle(netdev);
const struct hnae3_ae_ops *ops;
- int i;
-
- /* disable vectors */
- for (i = 0; i < priv->vector_num; i++)
- hns3_vector_disable(&priv->tqp_vector[i]);
- /* disable rcb */
- for (i = 0; i < h->kinfo.num_tqps; i++)
- hns3_tqp_disable(h->kinfo.tqp[i]);
+ hns3_disable_irqs_and_tqps(netdev);
/* stop ae_dev */
ops = priv->ae_handle->ae_algo->ops;
@@ -953,7 +961,7 @@ static void hns3_nic_set_rx_mode(struct net_device *netdev)
void hns3_request_update_promisc_mode(struct hnae3_handle *handle)
{
- const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
if (ops->request_update_promisc_mode)
ops->request_update_promisc_mode(handle);
@@ -1032,6 +1040,8 @@ static bool hns3_can_use_tx_sgl(struct hns3_enet_ring *ring,
static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring)
{
u32 alloc_size = ring->tqp->handle->kinfo.tx_spare_buf_size;
+ struct net_device *netdev = ring_to_netdev(ring);
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hns3_tx_spare *tx_spare;
struct page *page;
dma_addr_t dma;
@@ -1073,6 +1083,7 @@ static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring)
tx_spare->buf = page_address(page);
tx_spare->len = PAGE_SIZE << order;
ring->tx_spare = tx_spare;
+ ring->tx_copybreak = priv->tx_copybreak;
return;
dma_mapping_error:
@@ -1297,7 +1308,7 @@ static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
{
struct hns3_nic_priv *priv = netdev_priv(skb->dev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle);
union l4_hdr_info l4;
/* device version above V3(include V3), the hardware can
@@ -1497,7 +1508,7 @@ static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring,
* VLAN enabled, only one VLAN header is allowed in skb, otherwise it
* will cause RAS error.
*/
- ae_dev = pci_get_drvdata(handle->pdev);
+ ae_dev = hns3_get_ae_dev(handle);
if (unlikely(skb_vlan_tagged_multi(skb) &&
ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
handle->port_base_vlan_state ==
@@ -1683,8 +1694,8 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, dma_addr_t dma,
#define HNS3_LIKELY_BD_NUM 1
struct hns3_desc *desc = &ring->desc[ring->next_to_use];
- unsigned int frag_buf_num;
- int k, sizeoflast;
+ unsigned int frag_buf_num, k;
+ int sizeoflast;
if (likely(size <= HNS3_MAX_BD_SIZE)) {
desc->addr = cpu_to_le64(dma);
@@ -1856,7 +1867,7 @@ static bool hns3_skb_need_linearized(struct sk_buff *skb, unsigned int *bd_size,
unsigned int bd_num, u8 max_non_tso_bd_num)
{
unsigned int tot_len = 0;
- int i;
+ unsigned int i;
for (i = 0; i < max_non_tso_bd_num - 1U; i++)
tot_len += bd_size[i];
@@ -1884,7 +1895,7 @@ static bool hns3_skb_need_linearized(struct sk_buff *skb, unsigned int *bd_size,
void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size)
{
- int i;
+ u32 i;
for (i = 0; i < MAX_SKB_FRAGS; i++)
size[i] = skb_frag_size(&shinfo->frags[i]);
@@ -2068,8 +2079,6 @@ static void hns3_tx_push_bd(struct hns3_enet_ring *ring, int num)
__iowrite64_copy(ring->tqp->mem_base, desc,
(sizeof(struct hns3_desc) * HNS3_MAX_PUSH_BD_NUM) /
HNS3_BYTES_PER_64BIT);
-
- io_stop_wc();
}
static void hns3_tx_mem_doorbell(struct hns3_enet_ring *ring)
@@ -2088,8 +2097,6 @@ static void hns3_tx_mem_doorbell(struct hns3_enet_ring *ring)
u64_stats_update_begin(&ring->syncp);
ring->stats.tx_mem_doorbell += ring->pending_buf;
u64_stats_update_end(&ring->syncp);
-
- io_stop_wc();
}
static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
@@ -2103,7 +2110,7 @@ static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
*/
if (test_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state) && num &&
!ring->pending_buf && num <= HNS3_MAX_PUSH_BD_NUM && doorbell) {
- /* This smp_store_release() pairs with smp_load_aquire() in
+ /* This smp_store_release() pairs with smp_load_acquire() in
* hns3_nic_reclaim_desc(). Ensure that the BD valid bit
* is updated.
*/
@@ -2119,7 +2126,7 @@ static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
return;
}
- /* This smp_store_release() pairs with smp_load_aquire() in
+ /* This smp_store_release() pairs with smp_load_acquire() in
* hns3_nic_reclaim_desc(). Ensure that the BD valid bit is updated.
*/
smp_store_release(&ring->last_to_use, ring->next_to_use);
@@ -2204,9 +2211,9 @@ static int hns3_handle_tx_sgl(struct hns3_enet_ring *ring,
struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
u32 nfrag = skb_shinfo(skb)->nr_frags + 1;
struct sg_table *sgt;
- int i, bd_num = 0;
+ int bd_num = 0;
dma_addr_t dma;
- u32 cb_len;
+ u32 cb_len, i;
int nents;
if (skb_has_frag_list(skb))
@@ -2444,7 +2451,7 @@ static int hns3_nic_set_features(struct net_device *netdev,
if ((netdev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC) &&
h->ae_algo->ops->cls_flower_active(h)) {
netdev_err(netdev,
- "there are offloaded TC filters active, cannot disable HW TC offload");
+ "there are offloaded TC filters active, cannot disable HW TC offload\n");
return -EINVAL;
}
@@ -2456,7 +2463,6 @@ static int hns3_nic_set_features(struct net_device *netdev,
return ret;
}
- netdev->features = features;
return 0;
}
@@ -2542,7 +2548,7 @@ static void hns3_nic_get_stats64(struct net_device *netdev,
struct hnae3_handle *handle = priv->ae_handle;
struct rtnl_link_stats64 ring_total_stats;
struct hns3_enet_ring *ring;
- unsigned int idx;
+ int idx;
if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
return;
@@ -2761,14 +2767,14 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
netdev_err(netdev, "failed to change MTU in hardware %d\n",
ret);
else
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
return ret;
}
static int hns3_get_timeout_queue(struct net_device *ndev)
{
- int i;
+ unsigned int i;
/* Find the stopped queue the same way the stack does */
for (i = 0; i < ndev->num_tx_queues; i++) {
@@ -2849,7 +2855,7 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hnae3_handle *h = hns3_get_handle(ndev);
struct hns3_enet_ring *tx_ring;
- int timeout_queue;
+ u32 timeout_queue;
timeout_queue = hns3_get_timeout_queue(ndev);
if (timeout_queue >= ndev->num_tx_queues) {
@@ -3539,6 +3545,9 @@ static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring)
ret = hns3_alloc_and_attach_buffer(ring, i);
if (ret)
goto out_buffer_fail;
+
+ if (!(i % HNS3_RESCHED_BD_NUM))
+ cond_resched();
}
return 0;
@@ -3816,7 +3825,7 @@ static int hns3_gro_complete(struct sk_buff *skb, u32 l234info)
{
__be16 type = skb->protocol;
struct tcphdr *th;
- int depth = 0;
+ u32 depth = 0;
while (eth_type_vlan(type)) {
struct vlan_hdr *vh;
@@ -4449,7 +4458,7 @@ static void hns3_update_rx_int_coalesce(struct hns3_enet_tqp_vector *tqp_vector)
dim_update_sample(tqp_vector->event_cnt, rx_group->total_packets,
rx_group->total_bytes, &sample);
- net_dim(&rx_group->dim, sample);
+ net_dim(&rx_group->dim, &sample);
}
static void hns3_update_tx_int_coalesce(struct hns3_enet_tqp_vector *tqp_vector)
@@ -4462,7 +4471,7 @@ static void hns3_update_tx_int_coalesce(struct hns3_enet_tqp_vector *tqp_vector)
dim_update_sample(tqp_vector->event_cnt, tx_group->total_packets,
tx_group->total_bytes, &sample);
- net_dim(&tx_group->dim, sample);
+ net_dim(&tx_group->dim, &sample);
}
static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
@@ -4742,7 +4751,7 @@ map_ring_fail:
static void hns3_nic_init_coal_cfg(struct hns3_nic_priv *priv)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle);
struct hns3_enet_coalesce *tx_coal = &priv->tx_coal;
struct hns3_enet_coalesce *rx_coal = &priv->rx_coal;
@@ -4869,6 +4878,30 @@ static void hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
devm_kfree(&pdev->dev, priv->tqp_vector);
}
+static void hns3_update_tx_spare_buf_config(struct hns3_nic_priv *priv)
+{
+#define HNS3_MIN_SPARE_BUF_SIZE (2 * 1024 * 1024)
+#define HNS3_MAX_PACKET_SIZE (64 * 1024)
+
+ struct iommu_domain *domain = iommu_get_domain_for_dev(priv->dev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle);
+ struct hnae3_handle *handle = priv->ae_handle;
+
+ if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3)
+ return;
+
+ if (!(domain && iommu_is_dma_domain(domain)))
+ return;
+
+ priv->min_tx_copybreak = HNS3_MAX_PACKET_SIZE;
+ priv->min_tx_spare_buf_size = HNS3_MIN_SPARE_BUF_SIZE;
+
+ if (priv->tx_copybreak < priv->min_tx_copybreak)
+ priv->tx_copybreak = priv->min_tx_copybreak;
+ if (handle->kinfo.tx_spare_buf_size < priv->min_tx_spare_buf_size)
+ handle->kinfo.tx_spare_buf_size = priv->min_tx_spare_buf_size;
+}
+
static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
unsigned int ring_type)
{
@@ -5102,6 +5135,7 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv)
int i, j;
int ret;
+ hns3_update_tx_spare_buf_config(priv);
for (i = 0; i < ring_num; i++) {
ret = hns3_alloc_ring_memory(&priv->ring[i]);
if (ret) {
@@ -5111,6 +5145,7 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv)
}
u64_stats_init(&priv->ring[i].syncp);
+ cond_resched();
}
return 0;
@@ -5220,7 +5255,7 @@ static void hns3_info_show(struct hns3_nic_priv *priv)
static void hns3_set_cq_period_mode(struct hns3_nic_priv *priv,
enum dim_cq_period_mode mode, bool is_tx)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle);
struct hnae3_handle *handle = priv->ae_handle;
int i;
@@ -5258,7 +5293,7 @@ void hns3_cq_period_mode_init(struct hns3_nic_priv *priv,
static void hns3_state_init(struct hnae3_handle *handle)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
struct net_device *netdev = handle->kinfo.netdev;
struct hns3_nic_priv *priv = netdev_priv(netdev);
@@ -5293,6 +5328,8 @@ static int hns3_client_init(struct hnae3_handle *handle)
struct net_device *netdev;
int ret;
+ ae_dev->handle = handle;
+
handle->ae_algo->ops->get_tqps_and_rss_info(handle, &alloc_tqps,
&max_rss_size);
netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), alloc_tqps);
@@ -5305,6 +5342,8 @@ static int hns3_client_init(struct hnae3_handle *handle)
priv->ae_handle = handle;
priv->tx_timeout_count = 0;
priv->max_non_tso_bd_num = ae_dev->dev_specs.max_non_tso_bd_num;
+ priv->min_tx_copybreak = 0;
+ priv->min_tx_spare_buf_size = 0;
set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL);
@@ -5724,6 +5763,9 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
struct net_device *netdev = handle->kinfo.netdev;
struct hns3_nic_priv *priv = netdev_priv(netdev);
+ if (!test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
+ hns3_nic_net_stop(netdev);
+
if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
netdev_warn(netdev, "already uninitialized\n");
return 0;
@@ -5862,8 +5904,6 @@ int hns3_set_channels(struct net_device *netdev,
void hns3_external_lb_prepare(struct net_device *ndev, bool if_running)
{
struct hns3_nic_priv *priv = netdev_priv(ndev);
- struct hnae3_handle *h = priv->ae_handle;
- int i;
if (!if_running)
return;
@@ -5874,11 +5914,7 @@ void hns3_external_lb_prepare(struct net_device *ndev, bool if_running)
netif_carrier_off(ndev);
netif_tx_disable(ndev);
- for (i = 0; i < priv->vector_num; i++)
- hns3_vector_disable(&priv->tqp_vector[i]);
-
- for (i = 0; i < h->kinfo.num_tqps; i++)
- hns3_tqp_disable(h->kinfo.tqp[i]);
+ hns3_disable_irqs_and_tqps(ndev);
/* delay ring buffer clearing to hns3_reset_notify_uninit_enet
* during reset process, because driver may not be able
@@ -5894,7 +5930,6 @@ void hns3_external_lb_restore(struct net_device *ndev, bool if_running)
{
struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hnae3_handle *h = priv->ae_handle;
- int i;
if (!if_running)
return;
@@ -5910,11 +5945,7 @@ void hns3_external_lb_restore(struct net_device *ndev, bool if_running)
clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
- for (i = 0; i < priv->vector_num; i++)
- hns3_vector_enable(&priv->tqp_vector[i]);
-
- for (i = 0; i < h->kinfo.num_tqps; i++)
- hns3_tqp_enable(h->kinfo.tqp[i]);
+ hns3_enable_irqs_and_tqps(ndev);
netif_tx_wake_all_queues(ndev);
@@ -5936,7 +5967,7 @@ static const struct hns3_hw_error_info hns3_hw_err[] = {
static void hns3_process_hw_error(struct hnae3_handle *handle,
enum hnae3_hw_error_type type)
{
- int i;
+ u32 i;
for (i = 0; i < ARRAY_SIZE(hns3_hw_err); i++) {
if (hns3_hw_err[i].type == type) {
@@ -5963,8 +5994,8 @@ static int __init hns3_init_module(void)
{
int ret;
- pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string);
- pr_info("%s: %s\n", hns3_driver_name, hns3_copyright);
+ pr_debug("%s: %s - version\n", hns3_driver_name, hns3_driver_string);
+ pr_debug("%s: %s\n", hns3_driver_name, hns3_copyright);
client.type = HNAE3_CLIENT_KNIC;
snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH, "%s",
@@ -6000,9 +6031,11 @@ module_init(hns3_init_module);
*/
static void __exit hns3_exit_module(void)
{
+ hnae3_acquire_unload_lock();
pci_unregister_driver(&hns3_driver);
hnae3_unregister_client(&client);
hns3_dbg_unregister_debugfs();
+ hnae3_release_unload_lock();
}
module_exit(hns3_exit_module);