summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/wangxun/libwx/wx_lib.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/wangxun/libwx/wx_lib.c')
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.c165
1 files changed, 133 insertions, 32 deletions
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
index 8706223a6e5a..2b3d6586f44a 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
@@ -148,10 +148,11 @@ static struct wx_dec_ptype wx_ptype_lookup[256] = {
[0xFD] = WX_PTT(IP, IPV6, IGMV, IPV6, SCTP, PAY4),
};
-static struct wx_dec_ptype wx_decode_ptype(const u8 ptype)
+struct wx_dec_ptype wx_decode_ptype(const u8 ptype)
{
return wx_ptype_lookup[ptype];
}
+EXPORT_SYMBOL(wx_decode_ptype);
/* wx_test_staterr - tests bits in Rx descriptor status and error fields */
static __le32 wx_test_staterr(union wx_rx_desc *rx_desc,
@@ -250,10 +251,7 @@ static struct sk_buff *wx_build_skb(struct wx_ring *rx_ring,
rx_buffer->page_offset;
/* prefetch first cache line of first page */
- prefetch(page_addr);
-#if L1_CACHE_BYTES < 128
- prefetch(page_addr + L1_CACHE_BYTES);
-#endif
+ net_prefetch(page_addr);
/* allocate a skb to store the frags */
skb = napi_alloc_skb(&rx_ring->q_vector->napi, WX_RXBUFFER_256);
@@ -1257,7 +1255,7 @@ static int wx_tso(struct wx_ring *tx_ring, struct wx_tx_buffer *first,
/* compute header lengths */
l4len = enc ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb);
- *hdr_len = enc ? (skb_inner_transport_header(skb) - skb->data) :
+ *hdr_len = enc ? skb_inner_transport_offset(skb) :
skb_transport_offset(skb);
*hdr_len += l4len;
@@ -1453,6 +1451,7 @@ static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first,
static netdev_tx_t wx_xmit_frame_ring(struct sk_buff *skb,
struct wx_ring *tx_ring)
{
+ struct wx *wx = netdev_priv(tx_ring->netdev);
u16 count = TXD_USE_COUNT(skb_headlen(skb));
struct wx_tx_buffer *first;
u8 hdr_len = 0, ptype;
@@ -1498,6 +1497,10 @@ static netdev_tx_t wx_xmit_frame_ring(struct sk_buff *skb,
goto out_drop;
else if (!tso)
wx_tx_csum(tx_ring, first, ptype);
+
+ if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags) && tx_ring->atr_sample_rate)
+ wx->atr(tx_ring, first, ptype);
+
wx_tx_map(tx_ring, first, hdr_len);
return NETDEV_TX_OK;
@@ -1574,8 +1577,27 @@ static void wx_set_rss_queues(struct wx *wx)
f = &wx->ring_feature[RING_F_RSS];
f->indices = f->limit;
- wx->num_rx_queues = f->limit;
- wx->num_tx_queues = f->limit;
+ if (!(test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)))
+ goto out;
+
+ clear_bit(WX_FLAG_FDIR_HASH, wx->flags);
+
+ /* Use Flow Director in addition to RSS to ensure the best
+ * distribution of flows across cores, even when an FDIR flow
+ * isn't matched.
+ */
+ if (f->indices > 1) {
+ f = &wx->ring_feature[RING_F_FDIR];
+
+ f->indices = f->limit;
+
+ if (!(test_bit(WX_FLAG_FDIR_PERFECT, wx->flags)))
+ set_bit(WX_FLAG_FDIR_HASH, wx->flags);
+ }
+
+out:
+ wx->num_rx_queues = f->indices;
+ wx->num_tx_queues = f->indices;
}
static void wx_set_num_queues(struct wx *wx)
@@ -1598,7 +1620,7 @@ static void wx_set_num_queues(struct wx *wx)
*/
static int wx_acquire_msix_vectors(struct wx *wx)
{
- struct irq_affinity affd = {0, };
+ struct irq_affinity affd = { .pre_vectors = 1 };
int nvecs, i;
/* We start by asking for one vector per queue pair */
@@ -1614,14 +1636,12 @@ static int wx_acquire_msix_vectors(struct wx *wx)
/* One for non-queue interrupts */
nvecs += 1;
- if (!wx->msix_in_use) {
- wx->msix_entry = kcalloc(1, sizeof(struct msix_entry),
- GFP_KERNEL);
- if (!wx->msix_entry) {
- kfree(wx->msix_q_entries);
- wx->msix_q_entries = NULL;
- return -ENOMEM;
- }
+ wx->msix_entry = kcalloc(1, sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!wx->msix_entry) {
+ kfree(wx->msix_q_entries);
+ wx->msix_q_entries = NULL;
+ return -ENOMEM;
}
nvecs = pci_alloc_irq_vectors_affinity(wx->pdev, nvecs,
@@ -1676,18 +1696,19 @@ static int wx_set_interrupt_capability(struct wx *wx)
/* minmum one for queue, one for misc*/
nvecs = 1;
nvecs = pci_alloc_irq_vectors(pdev, nvecs,
- nvecs, PCI_IRQ_MSI | PCI_IRQ_LEGACY);
+ nvecs, PCI_IRQ_MSI | PCI_IRQ_INTX);
if (nvecs == 1) {
if (pdev->msi_enabled)
wx_err(wx, "Fallback to MSI.\n");
else
- wx_err(wx, "Fallback to LEGACY.\n");
+ wx_err(wx, "Fallback to INTx.\n");
} else {
- wx_err(wx, "Failed to allocate MSI/LEGACY interrupts. Error: %d\n", nvecs);
+ wx_err(wx, "Failed to allocate MSI/INTx interrupts. Error: %d\n", nvecs);
return nvecs;
}
pdev->irq = pci_irq_vector(pdev, 0);
+ wx->num_q_vectors = 1;
return 0;
}
@@ -1931,10 +1952,8 @@ void wx_reset_interrupt_capability(struct wx *wx)
if (pdev->msix_enabled) {
kfree(wx->msix_q_entries);
wx->msix_q_entries = NULL;
- if (!wx->msix_in_use) {
- kfree(wx->msix_entry);
- wx->msix_entry = NULL;
- }
+ kfree(wx->msix_entry);
+ wx->msix_entry = NULL;
}
pci_free_irq_vectors(wx->pdev);
}
@@ -2000,7 +2019,8 @@ void wx_free_irq(struct wx *wx)
int vector;
if (!(pdev->msix_enabled)) {
- free_irq(pdev->irq, wx);
+ if (!wx->misc_irq_domain)
+ free_irq(pdev->irq, wx);
return;
}
@@ -2015,7 +2035,7 @@ void wx_free_irq(struct wx *wx)
free_irq(entry->vector, q_vector);
}
- if (wx->mac.type == wx_mac_em)
+ if (!wx->misc_irq_domain)
free_irq(wx->msix_entry->vector, wx);
}
EXPORT_SYMBOL(wx_free_irq);
@@ -2030,6 +2050,9 @@ int wx_setup_isb_resources(struct wx *wx)
{
struct pci_dev *pdev = wx->pdev;
+ if (wx->isb_mem)
+ return 0;
+
wx->isb_mem = dma_alloc_coherent(&pdev->dev,
sizeof(u32) * 4,
&wx->isb_dma,
@@ -2131,7 +2154,7 @@ void wx_write_eitr(struct wx_q_vector *q_vector)
* wx_configure_vectors - Configure vectors for hardware
* @wx: board private structure
*
- * wx_configure_vectors sets up the hardware to properly generate MSI-X/MSI/LEGACY
+ * wx_configure_vectors sets up the hardware to properly generate MSI-X/MSI/INTx
* interrupts.
**/
void wx_configure_vectors(struct wx *wx)
@@ -2389,7 +2412,6 @@ static void wx_free_all_tx_resources(struct wx *wx)
void wx_free_resources(struct wx *wx)
{
- wx_free_isb_resources(wx);
wx_free_all_rx_resources(wx);
wx_free_all_tx_resources(wx);
}
@@ -2684,6 +2706,7 @@ int wx_set_features(struct net_device *netdev, netdev_features_t features)
{
netdev_features_t changed = netdev->features ^ features;
struct wx *wx = netdev_priv(netdev);
+ bool need_reset = false;
if (features & NETIF_F_RXHASH) {
wr32m(wx, WX_RDB_RA_CTL, WX_RDB_RA_CTL_RSS_EN,
@@ -2694,15 +2717,93 @@ int wx_set_features(struct net_device *netdev, netdev_features_t features)
wx->rss_enabled = false;
}
- if (changed &
- (NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_STAG_RX))
+ netdev->features = features;
+
+ if (wx->mac.type == wx_mac_sp && changed & NETIF_F_HW_VLAN_CTAG_RX)
+ wx->do_reset(netdev);
+ else if (changed & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER))
wx_set_rx_mode(netdev);
- return 1;
+ if (!(test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)))
+ return 0;
+
+ /* Check if Flow Director n-tuple support was enabled or disabled. If
+ * the state changed, we need to reset.
+ */
+ switch (features & NETIF_F_NTUPLE) {
+ case NETIF_F_NTUPLE:
+ /* turn off ATR, enable perfect filters and reset */
+ if (!(test_and_set_bit(WX_FLAG_FDIR_PERFECT, wx->flags)))
+ need_reset = true;
+
+ clear_bit(WX_FLAG_FDIR_HASH, wx->flags);
+ break;
+ default:
+ /* turn off perfect filters, enable ATR and reset */
+ if (test_and_clear_bit(WX_FLAG_FDIR_PERFECT, wx->flags))
+ need_reset = true;
+
+ /* We cannot enable ATR if RSS is disabled */
+ if (wx->ring_feature[RING_F_RSS].limit <= 1)
+ break;
+
+ set_bit(WX_FLAG_FDIR_HASH, wx->flags);
+ break;
+ }
+
+ if (need_reset)
+ wx->do_reset(netdev);
+
+ return 0;
}
EXPORT_SYMBOL(wx_set_features);
+#define NETIF_VLAN_STRIPPING_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
+ NETIF_F_HW_VLAN_STAG_RX)
+
+#define NETIF_VLAN_INSERTION_FEATURES (NETIF_F_HW_VLAN_CTAG_TX | \
+ NETIF_F_HW_VLAN_STAG_TX)
+
+#define NETIF_VLAN_FILTERING_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \
+ NETIF_F_HW_VLAN_STAG_FILTER)
+
+netdev_features_t wx_fix_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ netdev_features_t changed = netdev->features ^ features;
+ struct wx *wx = netdev_priv(netdev);
+
+ if (changed & NETIF_VLAN_STRIPPING_FEATURES) {
+ if ((features & NETIF_VLAN_STRIPPING_FEATURES) != NETIF_VLAN_STRIPPING_FEATURES &&
+ (features & NETIF_VLAN_STRIPPING_FEATURES) != 0) {
+ features &= ~NETIF_VLAN_STRIPPING_FEATURES;
+ features |= netdev->features & NETIF_VLAN_STRIPPING_FEATURES;
+ wx_err(wx, "802.1Q and 802.1ad VLAN stripping must be either both on or both off.");
+ }
+ }
+
+ if (changed & NETIF_VLAN_INSERTION_FEATURES) {
+ if ((features & NETIF_VLAN_INSERTION_FEATURES) != NETIF_VLAN_INSERTION_FEATURES &&
+ (features & NETIF_VLAN_INSERTION_FEATURES) != 0) {
+ features &= ~NETIF_VLAN_INSERTION_FEATURES;
+ features |= netdev->features & NETIF_VLAN_INSERTION_FEATURES;
+ wx_err(wx, "802.1Q and 802.1ad VLAN insertion must be either both on or both off.");
+ }
+ }
+
+ if (changed & NETIF_VLAN_FILTERING_FEATURES) {
+ if ((features & NETIF_VLAN_FILTERING_FEATURES) != NETIF_VLAN_FILTERING_FEATURES &&
+ (features & NETIF_VLAN_FILTERING_FEATURES) != 0) {
+ features &= ~NETIF_VLAN_FILTERING_FEATURES;
+ features |= netdev->features & NETIF_VLAN_FILTERING_FEATURES;
+ wx_err(wx, "802.1Q and 802.1ad VLAN filtering must be either both on or both off.");
+ }
+ }
+
+ return features;
+}
+EXPORT_SYMBOL(wx_fix_features);
+
void wx_set_ring(struct wx *wx, u32 new_tx_count,
u32 new_rx_count, struct wx_ring *temp_ring)
{