summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c')
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c901
1 files changed, 431 insertions, 470 deletions
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index c12b4d3e946e..6a1cc2032bf3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -29,7 +29,6 @@
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
-#include <linux/aer.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -74,52 +73,60 @@
__stringify(BCM_5710_FW_MINOR_VERSION) "." \
__stringify(BCM_5710_FW_REVISION_VERSION) "." \
__stringify(BCM_5710_FW_ENGINEERING_VERSION)
+
+#define FW_FILE_VERSION_V15 \
+ __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
+ __stringify(BCM_5710_FW_MINOR_VERSION) "." \
+ __stringify(BCM_5710_FW_REVISION_VERSION_V15) "." \
+ __stringify(BCM_5710_FW_ENGINEERING_VERSION)
+
#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
+#define FW_FILE_NAME_E1_V15 "bnx2x/bnx2x-e1-" FW_FILE_VERSION_V15 ".fw"
+#define FW_FILE_NAME_E1H_V15 "bnx2x/bnx2x-e1h-" FW_FILE_VERSION_V15 ".fw"
+#define FW_FILE_NAME_E2_V15 "bnx2x/bnx2x-e2-" FW_FILE_VERSION_V15 ".fw"
/* Time in jiffies before concluding the transmitter is hung */
#define TX_TIMEOUT (5*HZ)
-static char version[] =
- "QLogic 5771x/578xx 10/20-Gigabit Ethernet Driver "
- DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
-
MODULE_AUTHOR("Eliezer Tamir");
MODULE_DESCRIPTION("QLogic "
"BCM57710/57711/57711E/"
"57712/57712_MF/57800/57800_MF/57810/57810_MF/"
"57840/57840_MF Driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_MODULE_VERSION);
MODULE_FIRMWARE(FW_FILE_NAME_E1);
MODULE_FIRMWARE(FW_FILE_NAME_E1H);
MODULE_FIRMWARE(FW_FILE_NAME_E2);
+MODULE_FIRMWARE(FW_FILE_NAME_E1_V15);
+MODULE_FIRMWARE(FW_FILE_NAME_E1H_V15);
+MODULE_FIRMWARE(FW_FILE_NAME_E2_V15);
int bnx2x_num_queues;
-module_param_named(num_queues, bnx2x_num_queues, int, S_IRUGO);
+module_param_named(num_queues, bnx2x_num_queues, int, 0444);
MODULE_PARM_DESC(num_queues,
" Set number of queues (default is as a number of CPUs)");
static int disable_tpa;
-module_param(disable_tpa, int, S_IRUGO);
+module_param(disable_tpa, int, 0444);
MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
static int int_mode;
-module_param(int_mode, int, S_IRUGO);
+module_param(int_mode, int, 0444);
MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
"(1 INT#x; 2 MSI)");
static int dropless_fc;
-module_param(dropless_fc, int, S_IRUGO);
+module_param(dropless_fc, int, 0444);
MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
static int mrrs = -1;
-module_param(mrrs, int, S_IRUGO);
+module_param(mrrs, int, 0444);
MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
static int debug;
-module_param(debug, int, S_IRUGO);
+module_param(debug, int, 0444);
MODULE_PARM_DESC(debug, " Default debug msglevel");
static struct workqueue_struct *bnx2x_wq;
@@ -281,9 +288,16 @@ static const struct pci_device_id bnx2x_pci_tbl[] = {
MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
+const u32 dmae_reg_go_c[] = {
+ DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
+ DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
+ DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
+ DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
+};
+
/* Global resources for unloading a previously loaded device */
#define BNX2X_PREV_WAIT_NEEDED 1
-static DEFINE_SEMAPHORE(bnx2x_prev_sem);
+static DEFINE_SEMAPHORE(bnx2x_prev_sem, 1);
static LIST_HEAD(bnx2x_prev_list);
/* Forward declaration */
@@ -294,8 +308,11 @@ static int bnx2x_set_storm_rx_mode(struct bnx2x *bp);
/****************************************************************************
* General service functions
****************************************************************************/
-
-static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr);
+static int bnx2x_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
+static int bnx2x_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *config);
static void __storm_memset_dma_mapping(struct bnx2x *bp,
u32 addr, dma_addr_t mapping)
@@ -745,9 +762,7 @@ static int bnx2x_mc_assert(struct bnx2x *bp)
CHIP_IS_E1(bp) ? "everest1" :
CHIP_IS_E1H(bp) ? "everest1h" :
CHIP_IS_E2(bp) ? "everest2" : "everest3",
- BCM_5710_FW_MAJOR_VERSION,
- BCM_5710_FW_MINOR_VERSION,
- BCM_5710_FW_REVISION_VERSION);
+ bp->fw_major, bp->fw_minor, bp->fw_rev);
return rc;
}
@@ -869,9 +884,6 @@ static void bnx2x_hc_int_disable(struct bnx2x *bp)
"write %x to HC %d (addr 0x%x)\n",
val, port, addr);
- /* flush all outstanding writes */
- mmiowb();
-
REG_WR(bp, addr, val);
if (REG_RD(bp, addr) != val)
BNX2X_ERR("BUG! Proper val not read from IGU!\n");
@@ -887,9 +899,6 @@ static void bnx2x_igu_int_disable(struct bnx2x *bp)
DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val);
- /* flush all outstanding writes */
- mmiowb();
-
REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
BNX2X_ERR("BUG! Proper val not read from IGU!\n");
@@ -1180,9 +1189,18 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
}
#endif
if (IS_PF(bp)) {
+ int tmp_msg_en = bp->msg_enable;
+
bnx2x_fw_dump(bp);
+ bp->msg_enable |= NETIF_MSG_HW;
+ BNX2X_ERR("Idle check (1st round) ----------\n");
+ bnx2x_idle_chk(bp);
+ BNX2X_ERR("Idle check (2nd round) ----------\n");
+ bnx2x_idle_chk(bp);
+ bp->msg_enable = tmp_msg_en;
bnx2x_mc_assert(bp);
}
+
BNX2X_ERR("end crash dump -----------------\n");
}
@@ -1390,7 +1408,6 @@ int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt)
u32 op_gen_command = 0;
u32 comp_addr = BAR_CSTRORM_INTMEM +
CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func);
- int ret = 0;
if (REG_RD(bp, comp_addr)) {
BNX2X_ERR("Cleanup complete was not 0 before sending\n");
@@ -1415,7 +1432,7 @@ int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt)
/* Zero completion for next FLR */
REG_WR(bp, comp_addr, 0);
- return ret;
+ return 0;
}
u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
@@ -1595,7 +1612,6 @@ static void bnx2x_hc_int_enable(struct bnx2x *bp)
/*
* Ensure that HC_CONFIG is written before leading/trailing edge config
*/
- mmiowb();
barrier();
if (!CHIP_IS_E1(bp)) {
@@ -1611,9 +1627,6 @@ static void bnx2x_hc_int_enable(struct bnx2x *bp)
REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
}
-
- /* Make sure that interrupts are indeed enabled from here on */
- mmiowb();
}
static void bnx2x_igu_int_enable(struct bnx2x *bp)
@@ -1674,9 +1687,6 @@ static void bnx2x_igu_int_enable(struct bnx2x *bp)
REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
-
- /* Make sure that interrupts are indeed enabled from here on */
- mmiowb();
}
void bnx2x_int_enable(struct bnx2x *bp)
@@ -1761,7 +1771,7 @@ static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
* @bp: driver handle
*
* Returns the recovery leader resource id according to the engine this function
- * belongs to. Currently only only 2 engines is supported.
+ * belongs to. Currently only 2 engines is supported.
*/
static int bnx2x_get_leader_lock_resource(struct bnx2x *bp)
{
@@ -2925,6 +2935,10 @@ static void bnx2x_handle_update_svid_cmd(struct bnx2x *bp)
func_params.f_obj = &bp->func_obj;
func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
+ /* Prepare parameters for function state transitions */
+ __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+ __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
+
if (IS_MF_UFP(bp) || IS_MF_BD(bp)) {
int func = BP_ABS_FUNC(bp);
u32 val;
@@ -3084,9 +3098,9 @@ void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
/**
* bnx2x_get_common_flags - Return common flags
*
- * @bp device handle
- * @fp queue handle
- * @zero_stats TRUE if statistics zeroing is needed
+ * @bp: device handle
+ * @fp: queue handle
+ * @zero_stats: TRUE if statistics zeroing is needed
*
* Return the flags that are common for the Tx-only and not normal connections.
*/
@@ -3373,7 +3387,7 @@ static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
&bp->sp_objs->mac_obj;
int i;
- strlcpy(ether_stat->version, DRV_MODULE_VERSION,
+ strscpy(ether_stat->version, DRV_MODULE_VERSION,
ETH_STAT_INFO_VERSION_LEN);
/* get DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED macs, placing them in the
@@ -3536,6 +3550,16 @@ static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
*/
static void bnx2x_config_mf_bw(struct bnx2x *bp)
{
+ /* Workaround for MFW bug.
+ * MFW is not supposed to generate BW attention in
+ * single function mode.
+ */
+ if (!IS_MF(bp)) {
+ DP(BNX2X_MSG_MCP,
+ "Ignoring MF BW config in single function mode\n");
+ return;
+ }
+
if (bp->link_vars.link_up) {
bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
bnx2x_link_sync_notify(bp);
@@ -3817,9 +3841,8 @@ static void bnx2x_sp_prod_update(struct bnx2x *bp)
*/
mb();
- REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
- bp->spq_prod_idx);
- mmiowb();
+ REG_WR16_RELAXED(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
+ bp->spq_prod_idx);
}
/**
@@ -4301,7 +4324,8 @@ static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
bnx2x_handle_eee_event(bp);
if (val & DRV_STATUS_OEM_UPDATE_SVID)
- bnx2x_handle_update_svid_cmd(bp);
+ bnx2x_schedule_sp_rtnl(bp,
+ BNX2X_SP_RTNL_UPDATE_SVID, 0);
if (bp->link_vars.periodic_flags &
PERIODIC_FLAGS_LINK_EVENT) {
@@ -5229,7 +5253,6 @@ static void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
{
/* No memory barriers */
storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
- mmiowb(); /* keep prod updates ordered */
}
static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
@@ -5761,9 +5784,9 @@ void bnx2x_drv_pulse(struct bnx2x *bp)
bp->fw_drv_pulse_wr_seq);
}
-static void bnx2x_timer(unsigned long data)
+static void bnx2x_timer(struct timer_list *t)
{
- struct bnx2x *bp = (struct bnx2x *) data;
+ struct bnx2x *bp = timer_container_of(bp, t, timer);
if (!netif_running(bp->dev))
return;
@@ -6302,11 +6325,11 @@ static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
case FW_MSG_CODE_DRV_LOAD_COMMON:
case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
bnx2x_init_internal_common(bp);
- /* no break */
+ fallthrough;
case FW_MSG_CODE_DRV_LOAD_PORT:
/* nothing to do */
- /* no break */
+ fallthrough;
case FW_MSG_CODE_DRV_LOAD_FUNCTION:
/* internal memory per function is
@@ -6498,7 +6521,6 @@ void bnx2x_nic_init_cnic(struct bnx2x *bp)
/* flush all */
mb();
- mmiowb();
}
void bnx2x_pre_irq_nic_init(struct bnx2x *bp)
@@ -6538,7 +6560,6 @@ void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code)
/* flush all before enabling interrupts */
mb();
- mmiowb();
bnx2x_int_enable(bp);
@@ -7713,6 +7734,9 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
REG_WR(bp, reg_addr, val);
}
+ if (CHIP_IS_E3B0(bp))
+ bp->flags |= PTP_SUPPORTED;
+
return 0;
}
@@ -7757,12 +7781,10 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, bool is_pf)
DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
data, igu_addr_data);
REG_WR(bp, igu_addr_data, data);
- mmiowb();
barrier();
DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
ctl, igu_addr_ctl);
REG_WR(bp, igu_addr_ctl, ctl);
- mmiowb();
barrier();
/* wait for clean up to finish */
@@ -8408,7 +8430,7 @@ alloc_mem_err:
* Init service functions
*/
-int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
+int bnx2x_set_mac_one(struct bnx2x *bp, const u8 *mac,
struct bnx2x_vlan_mac_obj *obj, bool set,
int mac_type, unsigned long *ramrod_flags)
{
@@ -8462,6 +8484,7 @@ int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
/* Fill a user request section if needed */
if (!test_bit(RAMROD_CONT, ramrod_flags)) {
ramrod_param.user_req.u.vlan.vlan = vlan;
+ __set_bit(BNX2X_VLAN, &ramrod_param.user_req.vlan_mac_flags);
/* Set the command: ADD or DEL */
if (set)
ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
@@ -8482,6 +8505,34 @@ int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
return rc;
}
+void bnx2x_clear_vlan_info(struct bnx2x *bp)
+{
+ struct bnx2x_vlan_entry *vlan;
+
+ /* Mark that hw forgot all entries */
+ list_for_each_entry(vlan, &bp->vlan_reg, link)
+ vlan->hw = false;
+
+ bp->vlan_cnt = 0;
+}
+
+static int bnx2x_del_all_vlans(struct bnx2x *bp)
+{
+ struct bnx2x_vlan_mac_obj *vlan_obj = &bp->sp_objs[0].vlan_obj;
+ unsigned long ramrod_flags = 0, vlan_flags = 0;
+ int rc;
+
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ __set_bit(BNX2X_VLAN, &vlan_flags);
+ rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_flags, &ramrod_flags);
+ if (rc)
+ return rc;
+
+ bnx2x_clear_vlan_info(bp);
+
+ return 0;
+}
+
int bnx2x_del_all_macs(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *mac_obj,
int mac_type, bool wait_for_comp)
@@ -8561,11 +8612,11 @@ int bnx2x_set_int_mode(struct bnx2x *bp)
bp->num_queues,
1 + bp->num_cnic_queues);
- /* falling through... */
+ fallthrough;
case BNX2X_INT_MODE_MSI:
bnx2x_enable_msi(bp);
- /* falling through... */
+ fallthrough;
case BNX2X_INT_MODE_INTX:
bp->num_ethernet_queues = 1;
bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
@@ -9108,7 +9159,7 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
else if (bp->wol) {
u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
- u8 *mac_addr = bp->dev->dev_addr;
+ const u8 *mac_addr = bp->dev->dev_addr;
struct pci_dev *pdev = bp->pdev;
u32 val;
u16 pmc;
@@ -9320,6 +9371,17 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n",
rc);
+ /* The whole *vlan_obj structure may be not initialized if VLAN
+ * filtering offload is not supported by hardware. Currently this is
+ * true for all hardware covered by CHIP_IS_E1x().
+ */
+ if (!CHIP_IS_E1x(bp)) {
+ /* Remove all currently configured VLANs */
+ rc = bnx2x_del_all_vlans(bp);
+ if (rc < 0)
+ BNX2X_ERR("Failed to delete all VLANs\n");
+ }
+
/* Disable LLH */
if (!CHIP_IS_E1(bp))
REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
@@ -9332,7 +9394,7 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
/* Schedule the rx_mode command */
if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
- else
+ else if (bp->slowpath)
bnx2x_set_storm_rx_mode(bp);
/* Cleanup multicast configuration */
@@ -9407,18 +9469,26 @@ unload_error:
* function stop ramrod is sent, since as part of this ramrod FW access
* PTP registers.
*/
- if (bp->flags & PTP_SUPPORTED)
+ if (bp->flags & PTP_SUPPORTED) {
bnx2x_stop_ptp(bp);
+ if (bp->ptp_clock) {
+ ptp_clock_unregister(bp->ptp_clock);
+ bp->ptp_clock = NULL;
+ }
+ }
- /* Disable HW interrupts, NAPI */
- bnx2x_netif_stop(bp, 1);
- /* Delete all NAPI objects */
- bnx2x_del_all_napi(bp);
- if (CNIC_LOADED(bp))
- bnx2x_del_all_napi_cnic(bp);
+ if (!bp->nic_stopped) {
+ /* Disable HW interrupts, NAPI */
+ bnx2x_netif_stop(bp, 1);
+ /* Delete all NAPI objects */
+ bnx2x_del_all_napi(bp);
+ if (CNIC_LOADED(bp))
+ bnx2x_del_all_napi_cnic(bp);
- /* Release IRQs */
- bnx2x_free_irq(bp);
+ /* Release IRQs */
+ bnx2x_free_irq(bp);
+ bp->nic_stopped = true;
+ }
/* Reset the chip, unless PCI function is offline. If we reach this
* point following a PCI error handling, it means device is really
@@ -9494,7 +9564,6 @@ static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "%s gates #2, #3 and #4\n",
close ? "closing" : "opening");
- mmiowb();
}
#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
@@ -9578,6 +9647,15 @@ static int bnx2x_init_shmem(struct bnx2x *bp)
do {
bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
+
+ /* If we read all 0xFFs, means we are in PCI error state and
+ * should bail out to avoid crashes on adapter's FW reads.
+ */
+ if (bp->common.shmem_base == 0xFFFFFFFF) {
+ bp->flags |= NO_MCP_FLAG;
+ return -ENODEV;
+ }
+
if (bp->common.shmem_base) {
val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
if (val & SHR_MEM_VALIDITY_MB)
@@ -9609,7 +9687,6 @@ static void bnx2x_pxp_prep(struct bnx2x *bp)
if (!CHIP_IS_E1(bp)) {
REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
- mmiowb();
}
}
@@ -9709,16 +9786,13 @@ static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global)
reset_mask1 & (~not_reset_mask1));
barrier();
- mmiowb();
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
reset_mask2 & (~stay_reset2));
barrier();
- mmiowb();
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
- mmiowb();
}
/**
@@ -9802,9 +9876,6 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global)
REG_WR(bp, MISC_REG_UNPREPARED, 0);
barrier();
- /* Make sure all is written to the chip before the reset */
- mmiowb();
-
/* Wait for 1ms to empty GLUE and PCI-E core queues,
* PSWHST, GRC and PSWRD Tetris buffer.
*/
@@ -9931,10 +10002,18 @@ static void bnx2x_recovery_failed(struct bnx2x *bp)
*/
static void bnx2x_parity_recover(struct bnx2x *bp)
{
- bool global = false;
u32 error_recovered, error_unrecovered;
- bool is_parity;
+ bool is_parity, global = false;
+#ifdef CONFIG_BNX2X_SRIOV
+ int vf_idx;
+
+ for (vf_idx = 0; vf_idx < bp->requested_nr_virtfn; vf_idx++) {
+ struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
+ if (vf)
+ vf->state = VF_LOST;
+ }
+#endif
DP(NETIF_MSG_HW, "Handling parity\n");
while (1) {
switch (bp->recovery_state) {
@@ -10088,7 +10167,6 @@ static int bnx2x_udp_port_update(struct bnx2x *bp)
{
struct bnx2x_func_switch_update_params *switch_update_params;
struct bnx2x_func_state_params func_params = {NULL};
- struct bnx2x_udp_tunnel *udp_tunnel;
u16 vxlan_port = 0, geneve_port = 0;
int rc;
@@ -10105,15 +10183,13 @@ static int bnx2x_udp_port_update(struct bnx2x *bp)
__set_bit(BNX2X_F_UPDATE_TUNNEL_CFG_CHNG,
&switch_update_params->changes);
- if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE].count) {
- udp_tunnel = &bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE];
- geneve_port = udp_tunnel->dst_port;
+ if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE]) {
+ geneve_port = bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE];
switch_update_params->geneve_dst_port = geneve_port;
}
- if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN].count) {
- udp_tunnel = &bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN];
- vxlan_port = udp_tunnel->dst_port;
+ if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN]) {
+ vxlan_port = bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN];
switch_update_params->vxlan_dst_port = vxlan_port;
}
@@ -10133,94 +10209,26 @@ static int bnx2x_udp_port_update(struct bnx2x *bp)
return rc;
}
-static void __bnx2x_add_udp_port(struct bnx2x *bp, u16 port,
- enum bnx2x_udp_port_type type)
-{
- struct bnx2x_udp_tunnel *udp_port = &bp->udp_tunnel_ports[type];
-
- if (!netif_running(bp->dev) || !IS_PF(bp) || CHIP_IS_E1x(bp))
- return;
-
- if (udp_port->count && udp_port->dst_port == port) {
- udp_port->count++;
- return;
- }
-
- if (udp_port->count) {
- DP(BNX2X_MSG_SP,
- "UDP tunnel [%d] - destination port limit reached\n",
- type);
- return;
- }
-
- udp_port->dst_port = port;
- udp_port->count = 1;
- bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_CHANGE_UDP_PORT, 0);
-}
-
-static void __bnx2x_del_udp_port(struct bnx2x *bp, u16 port,
- enum bnx2x_udp_port_type type)
-{
- struct bnx2x_udp_tunnel *udp_port = &bp->udp_tunnel_ports[type];
-
- if (!IS_PF(bp) || CHIP_IS_E1x(bp))
- return;
-
- if (!udp_port->count || udp_port->dst_port != port) {
- DP(BNX2X_MSG_SP, "Invalid UDP tunnel [%d] port\n",
- type);
- return;
- }
-
- /* Remove reference, and make certain it's no longer in use */
- udp_port->count--;
- if (udp_port->count)
- return;
- udp_port->dst_port = 0;
-
- if (netif_running(bp->dev))
- bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_CHANGE_UDP_PORT, 0);
- else
- DP(BNX2X_MSG_SP, "Deleted UDP tunnel [%d] port %d\n",
- type, port);
-}
-
-static void bnx2x_udp_tunnel_add(struct net_device *netdev,
- struct udp_tunnel_info *ti)
+static int bnx2x_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
{
struct bnx2x *bp = netdev_priv(netdev);
- u16 t_port = ntohs(ti->port);
+ struct udp_tunnel_info ti;
- switch (ti->type) {
- case UDP_TUNNEL_TYPE_VXLAN:
- __bnx2x_add_udp_port(bp, t_port, BNX2X_UDP_PORT_VXLAN);
- break;
- case UDP_TUNNEL_TYPE_GENEVE:
- __bnx2x_add_udp_port(bp, t_port, BNX2X_UDP_PORT_GENEVE);
- break;
- default:
- break;
- }
-}
+ udp_tunnel_nic_get_port(netdev, table, 0, &ti);
+ bp->udp_tunnel_ports[table] = be16_to_cpu(ti.port);
-static void bnx2x_udp_tunnel_del(struct net_device *netdev,
- struct udp_tunnel_info *ti)
-{
- struct bnx2x *bp = netdev_priv(netdev);
- u16 t_port = ntohs(ti->port);
-
- switch (ti->type) {
- case UDP_TUNNEL_TYPE_VXLAN:
- __bnx2x_del_udp_port(bp, t_port, BNX2X_UDP_PORT_VXLAN);
- break;
- case UDP_TUNNEL_TYPE_GENEVE:
- __bnx2x_del_udp_port(bp, t_port, BNX2X_UDP_PORT_GENEVE);
- break;
- default:
- break;
- }
+ return bnx2x_udp_port_update(bp);
}
+static const struct udp_tunnel_nic_info bnx2x_udp_tunnels = {
+ .sync_table = bnx2x_udp_tunnel_sync,
+ .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
+ .tables = {
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
+ },
+};
+
static int bnx2x_close(struct net_device *dev);
/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
@@ -10270,9 +10278,22 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work)
bp->sp_rtnl_state = 0;
smp_mb();
- bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
- bnx2x_nic_load(bp, LOAD_NORMAL);
+ /* Immediately indicate link as down */
+ bp->link_vars.link_up = 0;
+ bp->force_link_down = true;
+ netif_carrier_off(bp->dev);
+ BNX2X_ERR("Indicating link is down due to Tx-timeout\n");
+ bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
+ /* When ret value shows failure of allocation failure,
+ * the nic is rebooted again. If open still fails, a error
+ * message to notify the user.
+ */
+ if (bnx2x_nic_load(bp, LOAD_NORMAL) == -ENOMEM) {
+ bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
+ if (bnx2x_nic_load(bp, LOAD_NORMAL))
+ BNX2X_ERR("Open the NIC fails again!\n");
+ }
rtnl_unlock();
return;
}
@@ -10327,23 +10348,8 @@ sp_rtnl_not_reset:
&bp->sp_rtnl_state))
bnx2x_update_mng_version(bp);
- if (test_and_clear_bit(BNX2X_SP_RTNL_CHANGE_UDP_PORT,
- &bp->sp_rtnl_state)) {
- if (bnx2x_udp_port_update(bp)) {
- /* On error, forget configuration */
- memset(bp->udp_tunnel_ports, 0,
- sizeof(struct bnx2x_udp_tunnel) *
- BNX2X_UDP_PORT_MAX);
- } else {
- /* Since we don't store additional port information,
- * if no ports are configured for any feature ask for
- * information about currently configured ports.
- */
- if (!bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN].count &&
- !bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE].count)
- udp_tunnel_get_rx_info(bp->dev);
- }
- }
+ if (test_and_clear_bit(BNX2X_SP_RTNL_UPDATE_SVID, &bp->sp_rtnl_state))
+ bnx2x_handle_update_svid_cmd(bp);
/* work which needs rtnl lock not-taken (as it takes the lock itself and
* can be called from other contexts as well)
@@ -11217,7 +11223,7 @@ static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
dev_info.port_hw_config[port].external_phy_config),
SHMEM_RD(bp,
dev_info.port_hw_config[port].external_phy_config2));
- return;
+ return;
}
if (CHIP_IS_E3(bp))
@@ -11718,8 +11724,10 @@ static void bnx2x_get_fcoe_info(struct bnx2x *bp)
* If maximum allowed number of connections is zero -
* disable the feature.
*/
- if (!bp->cnic_eth_dev.max_fcoe_conn)
+ if (!bp->cnic_eth_dev.max_fcoe_conn) {
bp->flags |= NO_FCOE_FLAG;
+ eth_zero_addr(bp->fip_mac);
+ }
}
static void bnx2x_get_cnic_info(struct bnx2x *bp)
@@ -11797,7 +11805,7 @@ static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp)
* as the SAN mac was copied from the primary MAC.
*/
if (IS_MF_FCOE_AFEX(bp))
- memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN);
+ eth_hw_addr_set(bp->dev, fip_mac);
} else {
val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
iscsi_mac_upper);
@@ -11830,9 +11838,10 @@ static void bnx2x_get_mac_hwinfo(struct bnx2x *bp)
u32 val, val2;
int func = BP_ABS_FUNC(bp);
int port = BP_PORT(bp);
+ u8 addr[ETH_ALEN] = {};
/* Zero primary MAC configuration */
- eth_zero_addr(bp->dev->dev_addr);
+ eth_hw_addr_set(bp->dev, addr);
if (BP_NOMCP(bp)) {
BNX2X_ERROR("warning: random MAC workaround active\n");
@@ -11841,8 +11850,10 @@ static void bnx2x_get_mac_hwinfo(struct bnx2x *bp)
val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
- (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
- bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
+ (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
+ bnx2x_set_mac_buf(addr, val, val2);
+ eth_hw_addr_set(bp->dev, addr);
+ }
if (CNIC_SUPPORT(bp))
bnx2x_get_cnic_mac_hwinfo(bp);
@@ -11850,7 +11861,8 @@ static void bnx2x_get_mac_hwinfo(struct bnx2x *bp)
/* in SF read MACs from port configuration */
val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
- bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
+ bnx2x_set_mac_buf(addr, val, val2);
+ eth_hw_addr_set(bp->dev, addr);
if (CNIC_SUPPORT(bp))
bnx2x_get_cnic_mac_hwinfo(bp);
@@ -11915,7 +11927,7 @@ static void validate_set_si_mode(struct bnx2x *bp)
static int bnx2x_get_hwinfo(struct bnx2x *bp)
{
int /*abs*/func = BP_ABS_FUNC(bp);
- int vn, mfw_vn;
+ int vn;
u32 val = 0, val2 = 0;
int rc = 0;
@@ -12000,12 +12012,10 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
/*
* Initialize MF configuration
*/
-
bp->mf_ov = 0;
bp->mf_mode = 0;
bp->mf_sub_mode = 0;
vn = BP_VN(bp);
- mfw_vn = BP_FW_MB_IDX(bp);
if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n",
@@ -12198,87 +12208,35 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
static void bnx2x_read_fwinfo(struct bnx2x *bp)
{
- int cnt, i, block_end, rodi;
- char vpd_start[BNX2X_VPD_LEN+1];
- char str_id_reg[VENDOR_ID_LEN+1];
- char str_id_cap[VENDOR_ID_LEN+1];
- char *vpd_data;
- char *vpd_extended_data = NULL;
- u8 len;
+ char str_id[VENDOR_ID_LEN + 1];
+ unsigned int vpd_len, kw_len;
+ u8 *vpd_data;
+ int rodi;
- cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_start);
memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
- if (cnt < BNX2X_VPD_LEN)
- goto out_not_found;
-
- /* VPD RO tag should be first tag after identifier string, hence
- * we should be able to find it in first BNX2X_VPD_LEN chars
- */
- i = pci_vpd_find_tag(vpd_start, 0, BNX2X_VPD_LEN,
- PCI_VPD_LRDT_RO_DATA);
- if (i < 0)
- goto out_not_found;
-
- block_end = i + PCI_VPD_LRDT_TAG_SIZE +
- pci_vpd_lrdt_size(&vpd_start[i]);
-
- i += PCI_VPD_LRDT_TAG_SIZE;
-
- if (block_end > BNX2X_VPD_LEN) {
- vpd_extended_data = kmalloc(block_end, GFP_KERNEL);
- if (vpd_extended_data == NULL)
- goto out_not_found;
-
- /* read rest of vpd image into vpd_extended_data */
- memcpy(vpd_extended_data, vpd_start, BNX2X_VPD_LEN);
- cnt = pci_read_vpd(bp->pdev, BNX2X_VPD_LEN,
- block_end - BNX2X_VPD_LEN,
- vpd_extended_data + BNX2X_VPD_LEN);
- if (cnt < (block_end - BNX2X_VPD_LEN))
- goto out_not_found;
- vpd_data = vpd_extended_data;
- } else
- vpd_data = vpd_start;
-
- /* now vpd_data holds full vpd content in both cases */
-
- rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
- PCI_VPD_RO_KEYWORD_MFR_ID);
- if (rodi < 0)
- goto out_not_found;
-
- len = pci_vpd_info_field_size(&vpd_data[rodi]);
+ vpd_data = pci_vpd_alloc(bp->pdev, &vpd_len);
+ if (IS_ERR(vpd_data))
+ return;
- if (len != VENDOR_ID_LEN)
+ rodi = pci_vpd_find_ro_info_keyword(vpd_data, vpd_len,
+ PCI_VPD_RO_KEYWORD_MFR_ID, &kw_len);
+ if (rodi < 0 || kw_len != VENDOR_ID_LEN)
goto out_not_found;
- rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
-
/* vendor specific info */
- snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
- snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
- if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
- !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
-
- rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
- PCI_VPD_RO_KEYWORD_VENDOR0);
- if (rodi >= 0) {
- len = pci_vpd_info_field_size(&vpd_data[rodi]);
-
- rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
-
- if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
- memcpy(bp->fw_ver, &vpd_data[rodi], len);
- bp->fw_ver[len] = ' ';
- }
+ snprintf(str_id, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
+ if (!strncasecmp(str_id, &vpd_data[rodi], VENDOR_ID_LEN)) {
+ rodi = pci_vpd_find_ro_info_keyword(vpd_data, vpd_len,
+ PCI_VPD_RO_KEYWORD_VENDOR0,
+ &kw_len);
+ if (rodi >= 0 && kw_len < sizeof(bp->fw_ver)) {
+ memcpy(bp->fw_ver, &vpd_data[rodi], kw_len);
+ bp->fw_ver[kw_len] = ' ';
}
- kfree(vpd_extended_data);
- return;
}
out_not_found:
- kfree(vpd_extended_data);
- return;
+ kfree(vpd_data);
}
static void bnx2x_set_modes_bitmap(struct bnx2x *bp)
@@ -12352,7 +12310,9 @@ static int bnx2x_init_bp(struct bnx2x *bp)
if (rc)
return rc;
} else {
- eth_zero_addr(bp->dev->dev_addr);
+ static const u8 zero_addr[ETH_ALEN] = {};
+
+ eth_hw_addr_set(bp->dev, zero_addr);
}
bnx2x_set_modes_bitmap(bp);
@@ -12393,12 +12353,12 @@ static int bnx2x_init_bp(struct bnx2x *bp)
/* Set TPA flags */
if (bp->disable_tpa) {
- bp->dev->hw_features &= ~NETIF_F_LRO;
- bp->dev->features &= ~NETIF_F_LRO;
+ bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
+ bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
}
if (CHIP_IS_E1(bp))
- bp->dropless_fc = 0;
+ bp->dropless_fc = false;
else
bp->dropless_fc = dropless_fc | bnx2x_get_dropless_info(bp);
@@ -12414,10 +12374,8 @@ static int bnx2x_init_bp(struct bnx2x *bp)
bp->current_interval = CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ;
- init_timer(&bp->timer);
+ timer_setup(&bp->timer, bnx2x_timer, 0);
bp->timer.expires = jiffies + bp->current_interval;
- bp->timer.data = (unsigned long) bp;
- bp->timer.function = bnx2x_timer;
if (SHMEM2_HAS(bp, dcbx_lldp_params_offset) &&
SHMEM2_HAS(bp, dcbx_lldp_dcbx_stat_offset) &&
@@ -12464,9 +12422,6 @@ static int bnx2x_init_bp(struct bnx2x *bp)
bp->dump_preset_idx = 1;
- if (CHIP_IS_E3B0(bp))
- bp->flags |= PTP_SUPPORTED;
-
return rc;
}
@@ -12545,9 +12500,6 @@ static int bnx2x_open(struct net_device *dev)
if (rc)
return rc;
- if (IS_PF(bp))
- udp_tunnel_get_rx_info(dev);
-
return 0;
}
@@ -12864,29 +12816,11 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (!netif_running(dev))
return -EAGAIN;
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return bnx2x_hwtstamp_ioctl(bp, ifr);
- default:
- DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
- mdio->phy_id, mdio->reg_num, mdio->val_in);
- return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
- }
+ DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
+ mdio->phy_id, mdio->reg_num, mdio->val_in);
+ return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void poll_bnx2x(struct net_device *dev)
-{
- struct bnx2x *bp = netdev_priv(dev);
- int i;
-
- for_each_eth_queue(bp, i) {
- struct bnx2x_fastpath *fp = &bp->fp[i];
- napi_schedule(&bnx2x_fp(bp, fp->index, napi));
- }
-}
-#endif
-
static int bnx2x_validate_addr(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
@@ -12920,6 +12854,24 @@ static netdev_features_t bnx2x_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features)
{
+ /*
+ * A skb with gso_size + header length > 9700 will cause a
+ * firmware panic. Drop GSO support.
+ *
+ * Eventually the upper layer should not pass these packets down.
+ *
+ * For speed, if the gso_size is <= 9000, assume there will
+ * not be 700 bytes of headers and pass it through. Only do a
+ * full (slow) validation if the gso_size is > 9000.
+ *
+ * (Due to the way SKB_BY_FRAGS works this will also do a full
+ * validation in that case.)
+ */
+ if (unlikely(skb_is_gso(skb) &&
+ (skb_shinfo(skb)->gso_size > 9000) &&
+ !skb_gso_validate_mac_len(skb, 9700)))
+ features &= ~NETIF_F_GSO_MASK;
+
features = vlan_features_check(skb, features);
return vxlan_features_check(skb, features);
}
@@ -12989,13 +12941,6 @@ static void bnx2x_vlan_configure(struct bnx2x *bp, bool set_rx_mode)
int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp)
{
- struct bnx2x_vlan_entry *vlan;
-
- /* The hw forgot all entries after reload */
- list_for_each_entry(vlan, &bp->vlan_reg, link)
- vlan->hw = false;
- bp->vlan_cnt = 0;
-
/* Don't set rx mode here. Our caller will do it. */
bnx2x_vlan_configure(bp, false);
@@ -13068,21 +13013,19 @@ static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_set_rx_mode = bnx2x_set_rx_mode,
.ndo_set_mac_address = bnx2x_change_mac_addr,
.ndo_validate_addr = bnx2x_validate_addr,
- .ndo_do_ioctl = bnx2x_ioctl,
+ .ndo_eth_ioctl = bnx2x_ioctl,
.ndo_change_mtu = bnx2x_change_mtu,
.ndo_fix_features = bnx2x_fix_features,
.ndo_set_features = bnx2x_set_features,
.ndo_tx_timeout = bnx2x_tx_timeout,
.ndo_vlan_rx_add_vid = bnx2x_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = bnx2x_vlan_rx_kill_vid,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = poll_bnx2x,
-#endif
.ndo_setup_tc = __bnx2x_setup_tc,
#ifdef CONFIG_BNX2X_SRIOV
.ndo_set_vf_mac = bnx2x_set_vf_mac,
.ndo_set_vf_vlan = bnx2x_set_vf_vlan,
.ndo_get_vf_config = bnx2x_get_vf_config,
+ .ndo_set_vf_spoofchk = bnx2x_set_vf_spoofchk,
#endif
#ifdef NETDEV_FCOE_WWNN
.ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn,
@@ -13091,31 +13034,10 @@ static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_get_phys_port_id = bnx2x_get_phys_port_id,
.ndo_set_vf_link_state = bnx2x_set_vf_link_state,
.ndo_features_check = bnx2x_features_check,
- .ndo_udp_tunnel_add = bnx2x_udp_tunnel_add,
- .ndo_udp_tunnel_del = bnx2x_udp_tunnel_del,
+ .ndo_hwtstamp_get = bnx2x_hwtstamp_get,
+ .ndo_hwtstamp_set = bnx2x_hwtstamp_set,
};
-static int bnx2x_set_coherency_mask(struct bnx2x *bp)
-{
- struct device *dev = &bp->pdev->dev;
-
- if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) != 0 &&
- dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)) != 0) {
- dev_err(dev, "System does not support DMA, aborting\n");
- return -EIO;
- }
-
- return 0;
-}
-
-static void bnx2x_disable_pcie_error_reporting(struct bnx2x *bp)
-{
- if (bp->flags & AER_ENABLED) {
- pci_disable_pcie_error_reporting(bp->pdev);
- bp->flags &= ~AER_ENABLED;
- }
-}
-
static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
struct net_device *dev, unsigned long board_type)
{
@@ -13185,9 +13107,11 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
goto err_out_release;
}
- rc = bnx2x_set_coherency_mask(bp);
- if (rc)
+ rc = dma_set_mask_and_coherent(&bp->pdev->dev, DMA_BIT_MASK(64));
+ if (rc) {
+ dev_err(&bp->pdev->dev, "System does not support DMA, aborting\n");
goto err_out_release;
+ }
dev->mem_start = pci_resource_start(pdev, 0);
dev->base_addr = dev->mem_start;
@@ -13226,13 +13150,6 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
/* Set PCIe reset type to fundamental for EEH recovery */
pdev->needs_freset = 1;
- /* AER (Advanced Error reporting) configuration */
- rc = pci_enable_pcie_error_reporting(pdev);
- if (!rc)
- bp->flags |= AER_ENABLED;
- else
- BNX2X_DEV_INFO("Failed To configure PCIe AER [%d]\n", rc);
-
/*
* Clean the following indirect addresses for all functions since it
* is not used by the driver.
@@ -13268,7 +13185,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
- NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |
+ NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO | NETIF_F_GRO_HW |
NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
if (!chip_is_e1x) {
dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM |
@@ -13287,6 +13204,9 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
dev->gso_partial_features = NETIF_F_GSO_GRE_CSUM |
NETIF_F_GSO_UDP_TUNNEL_CSUM;
+
+ if (IS_PF(bp))
+ dev->udp_tunnel_nic_info = &bnx2x_udp_tunnels;
}
dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
@@ -13304,6 +13224,8 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
dev->features |= NETIF_F_HIGHDMA;
+ if (dev->features & NETIF_F_LRO)
+ dev->features &= ~NETIF_F_GRO_HW;
/* Add Loopback capability to the device */
dev->hw_features |= NETIF_F_LOOPBACK;
@@ -13381,16 +13303,11 @@ static int bnx2x_check_firmware(struct bnx2x *bp)
/* Check FW version */
offset = be32_to_cpu(fw_hdr->fw_version.offset);
fw_ver = firmware->data + offset;
- if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
- (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
- (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
- (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
+ if (fw_ver[0] != bp->fw_major || fw_ver[1] != bp->fw_minor ||
+ fw_ver[2] != bp->fw_rev || fw_ver[3] != bp->fw_eng) {
BNX2X_ERR("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
- fw_ver[0], fw_ver[1], fw_ver[2], fw_ver[3],
- BCM_5710_FW_MAJOR_VERSION,
- BCM_5710_FW_MINOR_VERSION,
- BCM_5710_FW_REVISION_VERSION,
- BCM_5710_FW_ENGINEERING_VERSION);
+ fw_ver[0], fw_ver[1], fw_ver[2], fw_ver[3],
+ bp->fw_major, bp->fw_minor, bp->fw_rev, bp->fw_eng);
return -EINVAL;
}
@@ -13470,32 +13387,49 @@ do { \
static int bnx2x_init_firmware(struct bnx2x *bp)
{
- const char *fw_file_name;
+ const char *fw_file_name, *fw_file_name_v15;
struct bnx2x_fw_file_hdr *fw_hdr;
int rc;
if (bp->firmware)
return 0;
- if (CHIP_IS_E1(bp))
+ if (CHIP_IS_E1(bp)) {
fw_file_name = FW_FILE_NAME_E1;
- else if (CHIP_IS_E1H(bp))
+ fw_file_name_v15 = FW_FILE_NAME_E1_V15;
+ } else if (CHIP_IS_E1H(bp)) {
fw_file_name = FW_FILE_NAME_E1H;
- else if (!CHIP_IS_E1x(bp))
+ fw_file_name_v15 = FW_FILE_NAME_E1H_V15;
+ } else if (!CHIP_IS_E1x(bp)) {
fw_file_name = FW_FILE_NAME_E2;
- else {
+ fw_file_name_v15 = FW_FILE_NAME_E2_V15;
+ } else {
BNX2X_ERR("Unsupported chip revision\n");
return -EINVAL;
}
+
BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
if (rc) {
- BNX2X_ERR("Can't load firmware file %s\n",
- fw_file_name);
- goto request_firmware_exit;
+ BNX2X_DEV_INFO("Trying to load older fw %s\n", fw_file_name_v15);
+
+ /* try to load prev version */
+ rc = request_firmware(&bp->firmware, fw_file_name_v15, &bp->pdev->dev);
+
+ if (rc)
+ goto request_firmware_exit;
+
+ bp->fw_rev = BCM_5710_FW_REVISION_VERSION_V15;
+ } else {
+ bp->fw_cap |= FW_CAP_INVALIDATE_VF_FP_HSI;
+ bp->fw_rev = BCM_5710_FW_REVISION_VERSION;
}
+ bp->fw_major = BCM_5710_FW_MAJOR_VERSION;
+ bp->fw_minor = BCM_5710_FW_MINOR_VERSION;
+ bp->fw_eng = BCM_5710_FW_ENGINEERING_VERSION;
+
rc = bnx2x_check_firmware(bp);
if (rc) {
BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
@@ -13605,9 +13539,9 @@ static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
}
/**
- * bnx2x_get_num_none_def_sbs - return the number of none default SBs
- *
- * @dev: pci device
+ * bnx2x_get_num_non_def_sbs - return the number of none default SBs
+ * @pdev: pci device
+ * @cnic_cnt: count
*
*/
static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, int cnic_cnt)
@@ -13723,19 +13657,20 @@ static int bnx2x_send_update_drift_ramrod(struct bnx2x *bp, int drift_dir,
return bnx2x_func_state_change(bp, &func_params);
}
-static int bnx2x_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+static int bnx2x_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
int rc;
int drift_dir = 1;
int val, period, period1, period2, dif, dif1, dif2;
int best_dif = BNX2X_MAX_PHC_DRIFT, best_period = 0, best_val = 0;
+ s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
- DP(BNX2X_MSG_PTP, "PTP adjfreq called, ppb = %d\n", ppb);
+ DP(BNX2X_MSG_PTP, "PTP adjfine called, ppb = %d\n", ppb);
if (!netif_running(bp->dev)) {
DP(BNX2X_MSG_PTP,
- "PTP adjfreq called while the interface is down\n");
+ "PTP adjfine called while the interface is down\n");
return -ENETDOWN;
}
@@ -13860,7 +13795,7 @@ static int bnx2x_ptp_enable(struct ptp_clock_info *ptp,
return -ENOTSUPP;
}
-static void bnx2x_register_phc(struct bnx2x *bp)
+void bnx2x_register_phc(struct bnx2x *bp)
{
/* Fill the ptp_clock_info struct and register PTP clock*/
bp->ptp_clock_info.owner = THIS_MODULE;
@@ -13870,7 +13805,7 @@ static void bnx2x_register_phc(struct bnx2x *bp)
bp->ptp_clock_info.n_ext_ts = 0;
bp->ptp_clock_info.n_per_out = 0;
bp->ptp_clock_info.pps = 0;
- bp->ptp_clock_info.adjfreq = bnx2x_ptp_adjfreq;
+ bp->ptp_clock_info.adjfine = bnx2x_ptp_adjfine;
bp->ptp_clock_info.adjtime = bnx2x_ptp_adjtime;
bp->ptp_clock_info.gettime64 = bnx2x_ptp_gettime;
bp->ptp_clock_info.settime64 = bnx2x_ptp_settime;
@@ -13879,7 +13814,7 @@ static void bnx2x_register_phc(struct bnx2x *bp)
bp->ptp_clock = ptp_clock_register(&bp->ptp_clock_info, &bp->pdev->dev);
if (IS_ERR(bp->ptp_clock)) {
bp->ptp_clock = NULL;
- BNX2X_ERR("PTP clock registeration failed\n");
+ BNX2X_ERR("PTP clock registration failed\n");
}
}
@@ -13888,8 +13823,6 @@ static int bnx2x_init_one(struct pci_dev *pdev,
{
struct net_device *dev = NULL;
struct bnx2x *bp;
- enum pcie_link_width pcie_width;
- enum pci_bus_speed pcie_speed;
int rc, max_non_def_sbs;
int rx_count, tx_count, rss_count, doorbell_size;
int max_cos_est;
@@ -13993,7 +13926,7 @@ static int bnx2x_init_one(struct pci_dev *pdev,
rc = -ENOMEM;
goto init_one_freemem;
}
- bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
+ bp->doorbells = ioremap(pci_resource_start(pdev, 2),
doorbell_size);
}
if (!bp->doorbells) {
@@ -14057,23 +13990,12 @@ static int bnx2x_init_one(struct pci_dev *pdev,
dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
rtnl_unlock();
}
- if (pcie_get_minimum_link(bp->pdev, &pcie_speed, &pcie_width) ||
- pcie_speed == PCI_SPEED_UNKNOWN ||
- pcie_width == PCIE_LNK_WIDTH_UNKNOWN)
- BNX2X_DEV_INFO("Failed to determine PCI Express Bandwidth\n");
- else
- BNX2X_DEV_INFO(
- "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
- board_info[ent->driver_data].name,
- (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
- pcie_width,
- pcie_speed == PCIE_SPEED_2_5GT ? "2.5GHz" :
- pcie_speed == PCIE_SPEED_5_0GT ? "5.0GHz" :
- pcie_speed == PCIE_SPEED_8_0GT ? "8.0GHz" :
- "Unknown",
- dev->base_addr, bp->pdev->irq, dev->dev_addr);
-
- bnx2x_register_phc(bp);
+ BNX2X_DEV_INFO(
+ "%s (%c%d) PCI-E found at mem %lx, IRQ %d, node addr %pM\n",
+ board_info[ent->driver_data].name,
+ (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
+ dev->base_addr, bp->pdev->irq, dev->dev_addr);
+ pcie_print_link_status(bp->pdev);
if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
@@ -14084,8 +14006,6 @@ init_one_freemem:
bnx2x_free_mem_bp(bp);
init_one_exit:
- bnx2x_disable_pcie_error_reporting(bp);
-
if (bp->regview)
iounmap(bp->regview);
@@ -14107,11 +14027,6 @@ static void __bnx2x_remove(struct pci_dev *pdev,
struct bnx2x *bp,
bool remove_netdev)
{
- if (bp->ptp_clock) {
- ptp_clock_unregister(bp->ptp_clock);
- bp->ptp_clock = NULL;
- }
-
/* Delete storage MAC address */
if (!NO_FCOE(bp)) {
rtnl_lock();
@@ -14171,7 +14086,6 @@ static void __bnx2x_remove(struct pci_dev *pdev,
pci_set_power_state(pdev, PCI_D3hot);
}
- bnx2x_disable_pcie_error_reporting(bp);
if (remove_netdev) {
if (bp->regview)
iounmap(bp->regview);
@@ -14223,13 +14137,9 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
/* Stop Tx */
bnx2x_tx_disable(bp);
- /* Delete all NAPI objects */
- bnx2x_del_all_napi(bp);
- if (CNIC_LOADED(bp))
- bnx2x_del_all_napi_cnic(bp);
netdev_reset_tc(bp->dev);
- del_timer_sync(&bp->timer);
+ timer_delete_sync(&bp->timer);
cancel_delayed_work_sync(&bp->sp_task);
cancel_delayed_work_sync(&bp->period_task);
@@ -14306,7 +14216,6 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
pci_set_master(pdev);
pci_restore_state(pdev);
- pci_save_state(pdev);
if (netif_running(dev))
bnx2x_set_power_state(bp, PCI_D0);
@@ -14315,7 +14224,10 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
BNX2X_ERR("IO slot reset --> driver unload\n");
/* MCP should have been reset; Need to wait for validity */
- bnx2x_init_shmem(bp);
+ if (bnx2x_init_shmem(bp)) {
+ rtnl_unlock();
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
u32 v;
@@ -14327,8 +14239,16 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
}
bnx2x_drain_tx_queues(bp);
bnx2x_send_unload_req(bp, UNLOAD_RECOVERY);
- bnx2x_netif_stop(bp, 1);
- bnx2x_free_irq(bp);
+ if (!bp->nic_stopped) {
+ bnx2x_netif_stop(bp, 1);
+ bnx2x_del_all_napi(bp);
+
+ if (CNIC_LOADED(bp))
+ bnx2x_del_all_napi_cnic(bp);
+
+ bnx2x_free_irq(bp);
+ bp->nic_stopped = true;
+ }
/* Report UNLOAD_DONE to MCP */
bnx2x_send_unload_done(bp, true);
@@ -14353,14 +14273,6 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
rtnl_unlock();
- /* If AER, perform cleanup of the PCIe registers */
- if (bp->flags & AER_ENABLED) {
- if (pci_cleanup_aer_uncorrect_error_status(pdev))
- BNX2X_ERR("pci_cleanup_aer_uncorrect_error_status failed\n");
- else
- DP(NETIF_MSG_HW, "pci_cleanup_aer_uncorrect_error_status succeeded\n");
- }
-
return PCI_ERS_RESULT_RECOVERED;
}
@@ -14386,11 +14298,16 @@ static void bnx2x_io_resume(struct pci_dev *pdev)
bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
DRV_MSG_SEQ_NUMBER_MASK;
- if (netif_running(dev))
- bnx2x_nic_load(bp, LOAD_NORMAL);
+ if (netif_running(dev)) {
+ if (bnx2x_nic_load(bp, LOAD_NORMAL)) {
+ netdev_err(bp->dev, "Error during driver initialization, try unloading/reloading the driver\n");
+ goto done;
+ }
+ }
netif_device_attach(dev);
+done:
rtnl_unlock();
}
@@ -14428,8 +14345,7 @@ static struct pci_driver bnx2x_pci_driver = {
.id_table = bnx2x_pci_tbl,
.probe = bnx2x_init_one,
.remove = bnx2x_remove_one,
- .suspend = bnx2x_suspend,
- .resume = bnx2x_resume,
+ .driver.pm = &bnx2x_pm_ops,
.err_handler = &bnx2x_err_handler,
#ifdef CONFIG_BNX2X_SRIOV
.sriov_configure = bnx2x_sriov_configure,
@@ -14441,8 +14357,6 @@ static int __init bnx2x_init(void)
{
int ret;
- pr_info("%s", version);
-
bnx2x_wq = create_singlethread_workqueue("bnx2x");
if (bnx2x_wq == NULL) {
pr_err("Cannot create workqueue\n");
@@ -14492,9 +14406,7 @@ module_exit(bnx2x_cleanup);
/**
* bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s).
- *
* @bp: driver handle
- * @set: set or clear the CAM entry
*
* This function will wait until the ramrod completion returns.
* Return 0 if success, -ENODEV if ramrod doesn't return.
@@ -14777,7 +14689,6 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
if (rc)
break;
- mmiowb();
barrier();
/* Start accepting on iSCSI L2 ring */
@@ -14812,7 +14723,6 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
if (!bnx2x_wait_sp_comp(bp, sp_bits))
BNX2X_ERR("rx_mode completion timed out!\n");
- mmiowb();
barrier();
/* Unset iSCSI L2 MAC */
@@ -15000,9 +14910,11 @@ void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
else
cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
+ cp->irq_arr[0].status_blk_map = bp->cnic_sb_mapping;
cp->irq_arr[0].status_blk_num = bnx2x_cnic_fw_sb_id(bp);
cp->irq_arr[0].status_blk_num2 = bnx2x_cnic_igu_sb_id(bp);
cp->irq_arr[1].status_blk = bp->def_status_blk;
+ cp->irq_arr[1].status_blk_map = bp->def_status_blk_mapping;
cp->irq_arr[1].status_blk_num = DEF_SB_ID;
cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
@@ -15192,11 +15104,24 @@ static void bnx2x_ptp_task(struct work_struct *work)
u32 val_seq;
u64 timestamp, ns;
struct skb_shared_hwtstamps shhwtstamps;
+ bool bail = true;
+ int i;
- /* Read Tx timestamp registers */
- val_seq = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
- NIG_REG_P0_TLLH_PTP_BUF_SEQID);
- if (val_seq & 0x10000) {
+ /* FW may take a while to complete timestamping; try a bit and if it's
+ * still not complete, may indicate an error state - bail out then.
+ */
+ for (i = 0; i < 10; i++) {
+ /* Read Tx timestamp registers */
+ val_seq = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
+ NIG_REG_P0_TLLH_PTP_BUF_SEQID);
+ if (val_seq & 0x10000) {
+ bail = false;
+ break;
+ }
+ msleep(1 << i);
+ }
+
+ if (!bail) {
/* There is a valid timestamp value */
timestamp = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_MSB :
NIG_REG_P0_TLLH_PTP_BUF_TS_MSB);
@@ -15211,16 +15136,18 @@ static void bnx2x_ptp_task(struct work_struct *work)
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
shhwtstamps.hwtstamp = ns_to_ktime(ns);
skb_tstamp_tx(bp->ptp_tx_skb, &shhwtstamps);
- dev_kfree_skb_any(bp->ptp_tx_skb);
- bp->ptp_tx_skb = NULL;
DP(BNX2X_MSG_PTP, "Tx timestamp, timestamp cycles = %llu, ns = %llu\n",
timestamp, ns);
} else {
- DP(BNX2X_MSG_PTP, "There is no valid Tx timestamp yet\n");
- /* Reschedule to keep checking for a valid timestamp value */
- schedule_work(&bp->ptp_task);
+ DP(BNX2X_MSG_PTP,
+ "Tx timestamp is not recorded (register read=%u)\n",
+ val_seq);
+ bp->eth_stats.ptp_skip_tx_ts++;
}
+
+ dev_kfree_skb_any(bp->ptp_tx_skb);
+ bp->ptp_tx_skb = NULL;
}
void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb)
@@ -15247,7 +15174,7 @@ void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb)
}
/* Read the PHC */
-static u64 bnx2x_cyclecounter_read(const struct cyclecounter *cc)
+static u64 bnx2x_cyclecounter_read(struct cyclecounter *cc)
{
struct bnx2x *bp = container_of(cc, struct bnx2x, cyclecounter);
int port = BP_PORT(bp);
@@ -15325,27 +15252,48 @@ static int bnx2x_enable_ptp_packets(struct bnx2x *bp)
return 0;
}
+#define BNX2X_P2P_DETECT_PARAM_MASK 0x5F5
+#define BNX2X_P2P_DETECT_RULE_MASK 0x3DBB
+#define BNX2X_PTP_TX_ON_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x6AA)
+#define BNX2X_PTP_TX_ON_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3EEE)
+#define BNX2X_PTP_V1_L4_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x7EE)
+#define BNX2X_PTP_V1_L4_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3FFE)
+#define BNX2X_PTP_V2_L4_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x7EA)
+#define BNX2X_PTP_V2_L4_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3FEE)
+#define BNX2X_PTP_V2_L2_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x6BF)
+#define BNX2X_PTP_V2_L2_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3EFF)
+#define BNX2X_PTP_V2_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x6AA)
+#define BNX2X_PTP_V2_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3EEE)
+
int bnx2x_configure_ptp_filters(struct bnx2x *bp)
{
int port = BP_PORT(bp);
+ u32 param, rule;
int rc;
if (!bp->hwtstamp_ioctl_called)
return 0;
+ param = port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
+ NIG_REG_P0_TLLH_PTP_PARAM_MASK;
+ rule = port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
+ NIG_REG_P0_TLLH_PTP_RULE_MASK;
switch (bp->tx_type) {
case HWTSTAMP_TX_ON:
bp->flags |= TX_TIMESTAMPING_EN;
- REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
- NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x6AA);
- REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
- NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3EEE);
+ REG_WR(bp, param, BNX2X_PTP_TX_ON_PARAM_MASK);
+ REG_WR(bp, rule, BNX2X_PTP_TX_ON_RULE_MASK);
break;
case HWTSTAMP_TX_ONESTEP_SYNC:
+ case HWTSTAMP_TX_ONESTEP_P2P:
BNX2X_ERR("One-step timestamping is not supported\n");
return -ERANGE;
}
+ param = port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
+ NIG_REG_P0_LLH_PTP_PARAM_MASK;
+ rule = port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
+ NIG_REG_P0_LLH_PTP_RULE_MASK;
switch (bp->rx_filter) {
case HWTSTAMP_FILTER_NONE:
break;
@@ -15359,30 +15307,24 @@ int bnx2x_configure_ptp_filters(struct bnx2x *bp)
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
bp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
/* Initialize PTP detection for UDP/IPv4 events */
- REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
- NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7EE);
- REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
- NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFE);
+ REG_WR(bp, param, BNX2X_PTP_V1_L4_PARAM_MASK);
+ REG_WR(bp, rule, BNX2X_PTP_V1_L4_RULE_MASK);
break;
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
/* Initialize PTP detection for UDP/IPv4 or UDP/IPv6 events */
- REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
- NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7EA);
- REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
- NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FEE);
+ REG_WR(bp, param, BNX2X_PTP_V2_L4_PARAM_MASK);
+ REG_WR(bp, rule, BNX2X_PTP_V2_L4_RULE_MASK);
break;
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
/* Initialize PTP detection L2 events */
- REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
- NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x6BF);
- REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
- NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3EFF);
+ REG_WR(bp, param, BNX2X_PTP_V2_L2_PARAM_MASK);
+ REG_WR(bp, rule, BNX2X_PTP_V2_L2_RULE_MASK);
break;
case HWTSTAMP_FILTER_PTP_V2_EVENT:
@@ -15390,10 +15332,8 @@ int bnx2x_configure_ptp_filters(struct bnx2x *bp)
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
/* Initialize PTP detection L2, UDP/IPv4 or UDP/IPv6 events */
- REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
- NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x6AA);
- REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
- NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3EEE);
+ REG_WR(bp, param, BNX2X_PTP_V2_PARAM_MASK);
+ REG_WR(bp, rule, BNX2X_PTP_V2_RULE_MASK);
break;
}
@@ -15409,36 +15349,57 @@ int bnx2x_configure_ptp_filters(struct bnx2x *bp)
return 0;
}
-static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr)
+static int bnx2x_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
- struct hwtstamp_config config;
+ struct bnx2x *bp = netdev_priv(dev);
int rc;
- DP(BNX2X_MSG_PTP, "HWTSTAMP IOCTL called\n");
+ DP(BNX2X_MSG_PTP, "HWTSTAMP SET called\n");
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
+ if (!netif_running(dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Device is down");
+ return -EAGAIN;
+ }
DP(BNX2X_MSG_PTP, "Requested tx_type: %d, requested rx_filters = %d\n",
- config.tx_type, config.rx_filter);
+ config->tx_type, config->rx_filter);
- if (config.flags) {
- BNX2X_ERR("config.flags is reserved for future use\n");
- return -EINVAL;
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_ON:
+ case HWTSTAMP_TX_OFF:
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "One-step timestamping is not supported");
+ return -ERANGE;
}
- bp->hwtstamp_ioctl_called = 1;
- bp->tx_type = config.tx_type;
- bp->rx_filter = config.rx_filter;
+ bp->hwtstamp_ioctl_called = true;
+ bp->tx_type = config->tx_type;
+ bp->rx_filter = config->rx_filter;
rc = bnx2x_configure_ptp_filters(bp);
- if (rc)
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "HW configuration failure");
return rc;
+ }
- config.rx_filter = bp->rx_filter;
+ config->rx_filter = bp->rx_filter;
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
+}
+
+static int bnx2x_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *config)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ config->rx_filter = bp->rx_filter;
+ config->tx_type = bp->tx_type;
+
+ return 0;
}
/* Configures HW for PTP */
@@ -15509,7 +15470,7 @@ void bnx2x_init_ptp(struct bnx2x *bp)
bnx2x_init_cyclecounter(bp);
timecounter_init(&bp->timecounter, &bp->cyclecounter,
ktime_to_ns(ktime_get_real()));
- bp->timecounter_init_done = 1;
+ bp->timecounter_init_done = true;
}
DP(BNX2X_MSG_PTP, "PTP initialization ended successfully\n");