summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/qlogic/qed/qed_main.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-05-02 16:40:27 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2017-05-02 16:40:27 -0700
commit8d65b08debc7e62b2c6032d7fe7389d895b92cbc (patch)
tree0c3141b60c3a03cc32742b5750c5e763b9dae489 /drivers/net/ethernet/qlogic/qed/qed_main.c
parent5a0387a8a8efb90ae7fea1e2e5c62de3efa74691 (diff)
parent5d15af6778b8e4ed1fd41b040283af278e7a9a72 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Millar: "Here are some highlights from the 2065 networking commits that happened this development cycle: 1) XDP support for IXGBE (John Fastabend) and thunderx (Sunil Kowuri) 2) Add a generic XDP driver, so that anyone can test XDP even if they lack a networking device whose driver has explicit XDP support (me). 3) Sparc64 now has an eBPF JIT too (me) 4) Add a BPF program testing framework via BPF_PROG_TEST_RUN (Alexei Starovoitov) 5) Make netfitler network namespace teardown less expensive (Florian Westphal) 6) Add symmetric hashing support to nft_hash (Laura Garcia Liebana) 7) Implement NAPI and GRO in netvsc driver (Stephen Hemminger) 8) Support TC flower offload statistics in mlxsw (Arkadi Sharshevsky) 9) Multiqueue support in stmmac driver (Joao Pinto) 10) Remove TCP timewait recycling, it never really could possibly work well in the real world and timestamp randomization really zaps any hint of usability this feature had (Soheil Hassas Yeganeh) 11) Support level3 vs level4 ECMP route hashing in ipv4 (Nikolay Aleksandrov) 12) Add socket busy poll support to epoll (Sridhar Samudrala) 13) Netlink extended ACK support (Johannes Berg, Pablo Neira Ayuso, and several others) 14) IPSEC hw offload infrastructure (Steffen Klassert)" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (2065 commits) tipc: refactor function tipc_sk_recv_stream() tipc: refactor function tipc_sk_recvmsg() net: thunderx: Optimize page recycling for XDP net: thunderx: Support for XDP header adjustment net: thunderx: Add support for XDP_TX net: thunderx: Add support for XDP_DROP net: thunderx: Add basic XDP support net: thunderx: Cleanup receive buffer allocation net: thunderx: Optimize CQE_TX handling net: thunderx: Optimize RBDR descriptor handling net: thunderx: Support for page recycling ipx: call ipxitf_put() in ioctl error path net: sched: add helpers to handle extended actions qed*: Fix issues in the ptp filter config implementation. qede: Fix concurrency issue in PTP Tx path processing. stmmac: Add support for SIMATIC IOT2000 platform net: hns: fix ethtool_get_strings overflow in hns driver tcp: fix wraparound issue in tcp_lp bpf, arm64: fix jit branch offset related to ldimm64 bpf, arm64: implement jiting of BPF_XADD ...
Diffstat (limited to 'drivers/net/ethernet/qlogic/qed/qed_main.c')
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c149
1 files changed, 106 insertions, 43 deletions
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index eef30a598b40..59992cf20d42 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -45,6 +45,7 @@
#include <linux/ethtool.h>
#include <linux/etherdevice.h>
#include <linux/vmalloc.h>
+#include <linux/crash_dump.h>
#include <linux/qed/qed_if.h>
#include <linux/qed/qed_ll2_if.h>
@@ -54,6 +55,8 @@
#include "qed_dev_api.h"
#include "qed_ll2.h"
#include "qed_fcoe.h"
+#include "qed_iscsi.h"
+
#include "qed_mcp.h"
#include "qed_hw.h"
#include "qed_selftest.h"
@@ -227,10 +230,25 @@ err0:
int qed_fill_dev_info(struct qed_dev *cdev,
struct qed_dev_info *dev_info)
{
+ struct qed_tunnel_info *tun = &cdev->tunnel;
struct qed_ptt *ptt;
memset(dev_info, 0, sizeof(struct qed_dev_info));
+ if (tun->vxlan.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
+ tun->vxlan.b_mode_enabled)
+ dev_info->vxlan_enable = true;
+
+ if (tun->l2_gre.b_mode_enabled && tun->ip_gre.b_mode_enabled &&
+ tun->l2_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
+ tun->ip_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN)
+ dev_info->gre_enable = true;
+
+ if (tun->l2_geneve.b_mode_enabled && tun->ip_geneve.b_mode_enabled &&
+ tun->l2_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
+ tun->ip_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN)
+ dev_info->geneve_enable = true;
+
dev_info->num_hwfns = cdev->num_hwfns;
dev_info->pci_mem_start = cdev->pci_params.mem_start;
dev_info->pci_mem_end = cdev->pci_params.mem_end;
@@ -238,6 +256,7 @@ int qed_fill_dev_info(struct qed_dev *cdev,
dev_info->rdma_supported = (cdev->hwfns[0].hw_info.personality ==
QED_PCI_ETH_ROCE);
dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]);
+ dev_info->dev_type = cdev->type;
ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr);
if (IS_PF(cdev)) {
@@ -588,6 +607,19 @@ int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
return rc;
}
+void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn)
+{
+ struct qed_dev *cdev = p_hwfn->cdev;
+ u8 id = p_hwfn->my_id;
+ u32 int_mode;
+
+ int_mode = cdev->int_params.out.int_mode;
+ if (int_mode == QED_INT_MODE_MSIX)
+ synchronize_irq(cdev->int_params.msix_table[id].vector);
+ else
+ synchronize_irq(cdev->pdev->irq);
+}
+
static void qed_slowpath_irq_free(struct qed_dev *cdev)
{
int i;
@@ -630,19 +662,6 @@ static int qed_nic_stop(struct qed_dev *cdev)
return rc;
}
-static int qed_nic_reset(struct qed_dev *cdev)
-{
- int rc;
-
- rc = qed_hw_reset(cdev);
- if (rc)
- return rc;
-
- qed_resc_free(cdev);
-
- return 0;
-}
-
static int qed_nic_setup(struct qed_dev *cdev)
{
int rc, i;
@@ -743,7 +762,8 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors -
cdev->num_hwfns;
- if (!IS_ENABLED(CONFIG_QED_RDMA))
+ if (!IS_ENABLED(CONFIG_QED_RDMA) ||
+ QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH_ROCE)
return 0;
for_each_hwfn(cdev, i)
@@ -875,10 +895,12 @@ static void qed_update_pf_params(struct qed_dev *cdev,
params->rdma_pf_params.num_qps = QED_ROCE_QPS;
params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
/* divide by 3 the MRs to avoid MF ILT overflow */
- params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS;
params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
}
+ if (cdev->num_hwfns > 1 || IS_VF(cdev))
+ params->eth_pf_params.num_arfs_filters = 0;
+
/* In case we might support RDMA, don't allow qede to be greedy
* with the L2 contexts. Allow for 64 queues [rx, tx, xdp] per hwfn.
*/
@@ -900,11 +922,15 @@ static void qed_update_pf_params(struct qed_dev *cdev,
static int qed_slowpath_start(struct qed_dev *cdev,
struct qed_slowpath_params *params)
{
- struct qed_tunn_start_params tunn_info;
+ struct qed_drv_load_params drv_load_params;
+ struct qed_hw_init_params hw_init_params;
struct qed_mcp_drv_version drv_version;
+ struct qed_tunnel_info tunn_info;
const u8 *data = NULL;
struct qed_hwfn *hwfn;
+#ifdef CONFIG_RFS_ACCEL
struct qed_ptt *p_ptt;
+#endif
int rc = -EINVAL;
if (qed_iov_wq_start(cdev))
@@ -920,13 +946,18 @@ static int qed_slowpath_start(struct qed_dev *cdev,
goto err;
}
- p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
- if (p_ptt) {
- QED_LEADING_HWFN(cdev)->p_ptp_ptt = p_ptt;
- } else {
- DP_NOTICE(cdev, "Failed to acquire PTT for PTP\n");
- goto err;
+#ifdef CONFIG_RFS_ACCEL
+ if (cdev->num_hwfns == 1) {
+ p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
+ if (p_ptt) {
+ QED_LEADING_HWFN(cdev)->p_arfs_ptt = p_ptt;
+ } else {
+ DP_NOTICE(cdev,
+ "Failed to acquire PTT for aRFS\n");
+ goto err;
+ }
}
+#endif
}
cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
@@ -953,27 +984,47 @@ static int qed_slowpath_start(struct qed_dev *cdev,
qed_dbg_pf_init(cdev);
}
- memset(&tunn_info, 0, sizeof(tunn_info));
- tunn_info.tunn_mode |= 1 << QED_MODE_VXLAN_TUNN |
- 1 << QED_MODE_L2GRE_TUNN |
- 1 << QED_MODE_IPGRE_TUNN |
- 1 << QED_MODE_L2GENEVE_TUNN |
- 1 << QED_MODE_IPGENEVE_TUNN;
-
- tunn_info.tunn_clss_vxlan = QED_TUNN_CLSS_MAC_VLAN;
- tunn_info.tunn_clss_l2gre = QED_TUNN_CLSS_MAC_VLAN;
- tunn_info.tunn_clss_ipgre = QED_TUNN_CLSS_MAC_VLAN;
-
/* Start the slowpath */
- rc = qed_hw_init(cdev, &tunn_info, true,
- cdev->int_params.out.int_mode,
- true, data);
+ memset(&hw_init_params, 0, sizeof(hw_init_params));
+ memset(&tunn_info, 0, sizeof(tunn_info));
+ tunn_info.vxlan.b_mode_enabled = true;
+ tunn_info.l2_gre.b_mode_enabled = true;
+ tunn_info.ip_gre.b_mode_enabled = true;
+ tunn_info.l2_geneve.b_mode_enabled = true;
+ tunn_info.ip_geneve.b_mode_enabled = true;
+ tunn_info.vxlan.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
+ tunn_info.l2_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
+ tunn_info.ip_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
+ tunn_info.l2_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
+ tunn_info.ip_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
+ hw_init_params.p_tunn = &tunn_info;
+ hw_init_params.b_hw_start = true;
+ hw_init_params.int_mode = cdev->int_params.out.int_mode;
+ hw_init_params.allow_npar_tx_switch = true;
+ hw_init_params.bin_fw_data = data;
+
+ memset(&drv_load_params, 0, sizeof(drv_load_params));
+ drv_load_params.is_crash_kernel = is_kdump_kernel();
+ drv_load_params.mfw_timeout_val = QED_LOAD_REQ_LOCK_TO_DEFAULT;
+ drv_load_params.avoid_eng_reset = false;
+ drv_load_params.override_force_load = QED_OVERRIDE_FORCE_LOAD_NONE;
+ hw_init_params.p_drv_load_params = &drv_load_params;
+
+ rc = qed_hw_init(cdev, &hw_init_params);
if (rc)
goto err2;
DP_INFO(cdev,
"HW initialization and function start completed successfully\n");
+ if (IS_PF(cdev)) {
+ cdev->tunn_feature_mask = (BIT(QED_MODE_VXLAN_TUNN) |
+ BIT(QED_MODE_L2GENEVE_TUNN) |
+ BIT(QED_MODE_IPGENEVE_TUNN) |
+ BIT(QED_MODE_L2GRE_TUNN) |
+ BIT(QED_MODE_IPGRE_TUNN));
+ }
+
/* Allocate LL2 interface if needed */
if (QED_LEADING_HWFN(cdev)->using_ll2) {
rc = qed_ll2_alloc_if(cdev);
@@ -1014,9 +1065,12 @@ err:
if (IS_PF(cdev))
release_firmware(cdev->firmware);
- if (IS_PF(cdev) && QED_LEADING_HWFN(cdev)->p_ptp_ptt)
+#ifdef CONFIG_RFS_ACCEL
+ if (IS_PF(cdev) && (cdev->num_hwfns == 1) &&
+ QED_LEADING_HWFN(cdev)->p_arfs_ptt)
qed_ptt_release(QED_LEADING_HWFN(cdev),
- QED_LEADING_HWFN(cdev)->p_ptp_ptt);
+ QED_LEADING_HWFN(cdev)->p_arfs_ptt);
+#endif
qed_iov_wq_stop(cdev, false);
@@ -1031,8 +1085,11 @@ static int qed_slowpath_stop(struct qed_dev *cdev)
qed_ll2_dealloc_if(cdev);
if (IS_PF(cdev)) {
- qed_ptt_release(QED_LEADING_HWFN(cdev),
- QED_LEADING_HWFN(cdev)->p_ptp_ptt);
+#ifdef CONFIG_RFS_ACCEL
+ if (cdev->num_hwfns == 1)
+ qed_ptt_release(QED_LEADING_HWFN(cdev),
+ QED_LEADING_HWFN(cdev)->p_arfs_ptt);
+#endif
qed_free_stream_mem(cdev);
if (IS_QED_ETH_IF(cdev))
qed_sriov_disable(cdev, true);
@@ -1042,7 +1099,8 @@ static int qed_slowpath_stop(struct qed_dev *cdev)
}
qed_disable_msix(cdev);
- qed_nic_reset(cdev);
+
+ qed_resc_free(cdev);
qed_iov_wq_stop(cdev, true);
@@ -1653,13 +1711,18 @@ void qed_get_protocol_stats(struct qed_dev *cdev,
switch (type) {
case QED_MCP_LAN_STATS:
qed_get_vport_stats(cdev, &eth_stats);
- stats->lan_stats.ucast_rx_pkts = eth_stats.rx_ucast_pkts;
- stats->lan_stats.ucast_tx_pkts = eth_stats.tx_ucast_pkts;
+ stats->lan_stats.ucast_rx_pkts =
+ eth_stats.common.rx_ucast_pkts;
+ stats->lan_stats.ucast_tx_pkts =
+ eth_stats.common.tx_ucast_pkts;
stats->lan_stats.fcs_err = -1;
break;
case QED_MCP_FCOE_STATS:
qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats);
break;
+ case QED_MCP_ISCSI_STATS:
+ qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats);
+ break;
default:
DP_ERR(cdev, "Invalid protocol type = %d\n", type);
return;