summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/cavium/liquidio
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/cavium/liquidio')
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c82
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c728
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_ethtool.c352
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c956
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c598
-rw-r--r--drivers/net/ethernet/cavium/liquidio/liquidio_common.h11
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_config.h13
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_console.c153
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.c27
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.h20
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.c10
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_main.h6
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_network.h35
-rw-r--r--drivers/net/ethernet/cavium/liquidio/request_manager.c11
16 files changed, 1576 insertions, 1430 deletions
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
index 4b0ca9fb2cb4..e8b290473ee2 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
@@ -1150,14 +1150,50 @@ static void cn23xx_get_pcie_qlmport(struct octeon_device *oct)
oct->pcie_port);
}
-static void cn23xx_get_pf_num(struct octeon_device *oct)
+static int cn23xx_get_pf_num(struct octeon_device *oct)
{
u32 fdl_bit = 0;
+ u64 pkt0_in_ctl, d64;
+ int pfnum, mac, trs, ret;
+
+ ret = 0;
/** Read Function Dependency Link reg to get the function number */
- pci_read_config_dword(oct->pci_dev, CN23XX_PCIE_SRIOV_FDL, &fdl_bit);
- oct->pf_num = ((fdl_bit >> CN23XX_PCIE_SRIOV_FDL_BIT_POS) &
- CN23XX_PCIE_SRIOV_FDL_MASK);
+ if (pci_read_config_dword(oct->pci_dev, CN23XX_PCIE_SRIOV_FDL,
+ &fdl_bit) == 0) {
+ oct->pf_num = ((fdl_bit >> CN23XX_PCIE_SRIOV_FDL_BIT_POS) &
+ CN23XX_PCIE_SRIOV_FDL_MASK);
+ } else {
+ ret = EINVAL;
+
+ /* Under some virtual environments, extended PCI regs are
+ * inaccessible, in which case the above read will have failed.
+ * In this case, read the PF number from the
+ * SLI_PKT0_INPUT_CONTROL reg (written by f/w)
+ */
+ pkt0_in_ctl = octeon_read_csr64(oct,
+ CN23XX_SLI_IQ_PKT_CONTROL64(0));
+ pfnum = (pkt0_in_ctl >> CN23XX_PKT_INPUT_CTL_PF_NUM_POS) &
+ CN23XX_PKT_INPUT_CTL_PF_NUM_MASK;
+ mac = (octeon_read_csr(oct, CN23XX_SLI_MAC_NUMBER)) & 0xff;
+
+ /* validate PF num by reading RINFO; f/w writes RINFO.trs == 1*/
+ d64 = octeon_read_csr64(oct,
+ CN23XX_SLI_PKT_MAC_RINFO64(mac, pfnum));
+ trs = (int)(d64 >> CN23XX_PKT_MAC_CTL_RINFO_TRS_BIT_POS) & 0xff;
+ if (trs == 1) {
+ dev_err(&oct->pci_dev->dev,
+ "OCTEON: error reading PCI cfg space pfnum, re-read %u\n",
+ pfnum);
+ oct->pf_num = pfnum;
+ ret = 0;
+ } else {
+ dev_err(&oct->pci_dev->dev,
+ "OCTEON: error reading PCI cfg space pfnum; could not ascertain PF number\n");
+ }
+ }
+
+ return ret;
}
static void cn23xx_setup_reg_address(struct octeon_device *oct)
@@ -1269,6 +1305,26 @@ static int cn23xx_sriov_config(struct octeon_device *oct)
int setup_cn23xx_octeon_pf_device(struct octeon_device *oct)
{
+ u32 data32;
+ u64 BAR0, BAR1;
+
+ pci_read_config_dword(oct->pci_dev, PCI_BASE_ADDRESS_0, &data32);
+ BAR0 = (u64)(data32 & ~0xf);
+ pci_read_config_dword(oct->pci_dev, PCI_BASE_ADDRESS_1, &data32);
+ BAR0 |= ((u64)data32 << 32);
+ pci_read_config_dword(oct->pci_dev, PCI_BASE_ADDRESS_2, &data32);
+ BAR1 = (u64)(data32 & ~0xf);
+ pci_read_config_dword(oct->pci_dev, PCI_BASE_ADDRESS_3, &data32);
+ BAR1 |= ((u64)data32 << 32);
+
+ if (!BAR0 || !BAR1) {
+ if (!BAR0)
+ dev_err(&oct->pci_dev->dev, "device BAR0 unassigned\n");
+ if (!BAR1)
+ dev_err(&oct->pci_dev->dev, "device BAR1 unassigned\n");
+ return 1;
+ }
+
if (octeon_map_pci_barx(oct, 0, 0))
return 1;
@@ -1279,7 +1335,8 @@ int setup_cn23xx_octeon_pf_device(struct octeon_device *oct)
return 1;
}
- cn23xx_get_pf_num(oct);
+ if (cn23xx_get_pf_num(oct) != 0)
+ return 1;
if (cn23xx_sriov_config(oct)) {
octeon_unmap_pci_barx(oct, 0);
@@ -1405,8 +1462,19 @@ int cn23xx_fw_loaded(struct octeon_device *oct)
{
u64 val;
- val = octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1);
- return (val >> 1) & 1ULL;
+ /* If there's more than one active PF on this NIC, then that
+ * implies that the NIC firmware is loaded and running. This check
+ * prevents a rare false negative that might occur if we only relied
+ * on checking the SCR2_BIT_FW_LOADED flag. The false negative would
+ * happen if the PF driver sees SCR2_BIT_FW_LOADED as cleared even
+ * though the firmware was already loaded but still booting and has yet
+ * to set SCR2_BIT_FW_LOADED.
+ */
+ if (atomic_read(oct->adapter_refcount) > 1)
+ return 1;
+
+ val = octeon_read_csr64(oct, CN23XX_SLI_SCRATCH2);
+ return (val >> SCR2_BIT_FW_LOADED) & 1ULL;
}
void cn23xx_tell_vf_its_macaddr_changed(struct octeon_device *oct, int vfidx,
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h
index dee604651ba7..2aba5247b6d8 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h
@@ -24,8 +24,6 @@
#include "cn23xx_pf_regs.h"
-#define LIO_CMD_WAIT_TM 100
-
/* Register address and configuration for a CN23XX devices.
* If device specific changes need to be made then add a struct to include
* device specific fields as shown in the commented section
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h
index 3f98c7334957..2d06097d3f61 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h
@@ -36,8 +36,6 @@ struct octeon_cn23xx_vf {
#define CN23XX_MAILBOX_MSGPARAM_SIZE 6
-#define MAX_VF_IP_OP_PENDING_PKT_COUNT 100
-
void cn23xx_vf_ask_pf_to_do_flr(struct octeon_device *oct);
int cn23xx_octeon_pfvf_handshake(struct octeon_device *oct);
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c
index adde7745d069..23f6b60030c5 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_core.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c
@@ -165,9 +165,6 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
/* If command is successful, change the MTU. */
netif_info(lio, probe, lio->netdev, "MTU Changed from %d to %d\n",
netdev->mtu, nctrl->ncmd.s.param1);
- dev_info(&oct->pci_dev->dev, "%s MTU Changed from %d to %d\n",
- netdev->name, netdev->mtu,
- nctrl->ncmd.s.param1);
netdev->mtu = nctrl->ncmd.s.param1;
queue_delayed_work(lio->link_status_wq.wq,
&lio->link_status_wq.wk.work, 0);
@@ -275,6 +272,11 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
netif_info(lio, probe, lio->netdev, "Set RX/TX flow control parameters\n");
break;
+ case OCTNET_CMD_QUEUE_COUNT_CTL:
+ netif_info(lio, probe, lio->netdev, "Queue count updated to %d\n",
+ nctrl->ncmd.s.param1);
+ break;
+
default:
dev_err(&oct->pci_dev->dev, "%s Unknown cmd %d\n", __func__,
nctrl->ncmd.s.cmd);
@@ -364,3 +366,723 @@ void cleanup_rx_oom_poll_fn(struct net_device *netdev)
destroy_workqueue(lio->rxq_status_wq.wq);
}
}
+
+/* Runs in interrupt context. */
+static void lio_update_txq_status(struct octeon_device *oct, int iq_num)
+{
+ struct octeon_instr_queue *iq = oct->instr_queue[iq_num];
+ struct net_device *netdev;
+ struct lio *lio;
+
+ netdev = oct->props[iq->ifidx].netdev;
+
+ /* This is needed because the first IQ does not have
+ * a netdev associated with it.
+ */
+ if (!netdev)
+ return;
+
+ lio = GET_LIO(netdev);
+ if (netif_is_multiqueue(netdev)) {
+ if (__netif_subqueue_stopped(netdev, iq->q_index) &&
+ lio->linfo.link.s.link_up &&
+ (!octnet_iq_is_full(oct, iq_num))) {
+ netif_wake_subqueue(netdev, iq->q_index);
+ INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num,
+ tx_restart, 1);
+ }
+ } else if (netif_queue_stopped(netdev) &&
+ lio->linfo.link.s.link_up &&
+ (!octnet_iq_is_full(oct, lio->txq))) {
+ INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq,
+ tx_restart, 1);
+ netif_wake_queue(netdev);
+ }
+}
+
+/**
+ * \brief Setup output queue
+ * @param oct octeon device
+ * @param q_no which queue
+ * @param num_descs how many descriptors
+ * @param desc_size size of each descriptor
+ * @param app_ctx application context
+ */
+static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs,
+ int desc_size, void *app_ctx)
+{
+ int ret_val;
+
+ dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no);
+ /* droq creation and local register settings. */
+ ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx);
+ if (ret_val < 0)
+ return ret_val;
+
+ if (ret_val == 1) {
+ dev_dbg(&oct->pci_dev->dev, "Using default droq %d\n", q_no);
+ return 0;
+ }
+
+ /* Enable the droq queues */
+ octeon_set_droq_pkt_op(oct, q_no, 1);
+
+ /* Send Credit for Octeon Output queues. Credits are always
+ * sent after the output queue is enabled.
+ */
+ writel(oct->droq[q_no]->max_count, oct->droq[q_no]->pkts_credit_reg);
+
+ return ret_val;
+}
+
+/** Routine to push packets arriving on Octeon interface upto network layer.
+ * @param oct_id - octeon device id.
+ * @param skbuff - skbuff struct to be passed to network layer.
+ * @param len - size of total data received.
+ * @param rh - Control header associated with the packet
+ * @param param - additional control data with the packet
+ * @param arg - farg registered in droq_ops
+ */
+static void
+liquidio_push_packet(u32 octeon_id __attribute__((unused)),
+ void *skbuff,
+ u32 len,
+ union octeon_rh *rh,
+ void *param,
+ void *arg)
+{
+ struct net_device *netdev = (struct net_device *)arg;
+ struct octeon_droq *droq =
+ container_of(param, struct octeon_droq, napi);
+ struct sk_buff *skb = (struct sk_buff *)skbuff;
+ struct skb_shared_hwtstamps *shhwtstamps;
+ struct napi_struct *napi = param;
+ u16 vtag = 0;
+ u32 r_dh_off;
+ u64 ns;
+
+ if (netdev) {
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ int packet_was_received;
+
+ /* Do not proceed if the interface is not in RUNNING state. */
+ if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
+ recv_buffer_free(skb);
+ droq->stats.rx_dropped++;
+ return;
+ }
+
+ skb->dev = netdev;
+
+ skb_record_rx_queue(skb, droq->q_no);
+ if (likely(len > MIN_SKB_SIZE)) {
+ struct octeon_skb_page_info *pg_info;
+ unsigned char *va;
+
+ pg_info = ((struct octeon_skb_page_info *)(skb->cb));
+ if (pg_info->page) {
+ /* For Paged allocation use the frags */
+ va = page_address(pg_info->page) +
+ pg_info->page_offset;
+ memcpy(skb->data, va, MIN_SKB_SIZE);
+ skb_put(skb, MIN_SKB_SIZE);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ pg_info->page,
+ pg_info->page_offset +
+ MIN_SKB_SIZE,
+ len - MIN_SKB_SIZE,
+ LIO_RXBUFFER_SZ);
+ }
+ } else {
+ struct octeon_skb_page_info *pg_info =
+ ((struct octeon_skb_page_info *)(skb->cb));
+ skb_copy_to_linear_data(skb, page_address(pg_info->page)
+ + pg_info->page_offset, len);
+ skb_put(skb, len);
+ put_page(pg_info->page);
+ }
+
+ r_dh_off = (rh->r_dh.len - 1) * BYTES_PER_DHLEN_UNIT;
+
+ if (oct->ptp_enable) {
+ if (rh->r_dh.has_hwtstamp) {
+ /* timestamp is included from the hardware at
+ * the beginning of the packet.
+ */
+ if (ifstate_check
+ (lio,
+ LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) {
+ /* Nanoseconds are in the first 64-bits
+ * of the packet.
+ */
+ memcpy(&ns, (skb->data + r_dh_off),
+ sizeof(ns));
+ r_dh_off -= BYTES_PER_DHLEN_UNIT;
+ shhwtstamps = skb_hwtstamps(skb);
+ shhwtstamps->hwtstamp =
+ ns_to_ktime(ns +
+ lio->ptp_adjust);
+ }
+ }
+ }
+
+ if (rh->r_dh.has_hash) {
+ __be32 *hash_be = (__be32 *)(skb->data + r_dh_off);
+ u32 hash = be32_to_cpu(*hash_be);
+
+ skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
+ r_dh_off -= BYTES_PER_DHLEN_UNIT;
+ }
+
+ skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT);
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
+ if ((netdev->features & NETIF_F_RXCSUM) &&
+ (((rh->r_dh.encap_on) &&
+ (rh->r_dh.csum_verified & CNNIC_TUN_CSUM_VERIFIED)) ||
+ (!(rh->r_dh.encap_on) &&
+ (rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED))))
+ /* checksum has already been verified */
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* Setting Encapsulation field on basis of status received
+ * from the firmware
+ */
+ if (rh->r_dh.encap_on) {
+ skb->encapsulation = 1;
+ skb->csum_level = 1;
+ droq->stats.rx_vxlan++;
+ }
+
+ /* inbound VLAN tag */
+ if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ rh->r_dh.vlan) {
+ u16 priority = rh->r_dh.priority;
+ u16 vid = rh->r_dh.vlan;
+
+ vtag = (priority << VLAN_PRIO_SHIFT) | vid;
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
+ }
+
+ packet_was_received = (napi_gro_receive(napi, skb) != GRO_DROP);
+
+ if (packet_was_received) {
+ droq->stats.rx_bytes_received += len;
+ droq->stats.rx_pkts_received++;
+ } else {
+ droq->stats.rx_dropped++;
+ netif_info(lio, rx_err, lio->netdev,
+ "droq:%d error rx_dropped:%llu\n",
+ droq->q_no, droq->stats.rx_dropped);
+ }
+
+ } else {
+ recv_buffer_free(skb);
+ }
+}
+
+/**
+ * \brief wrapper for calling napi_schedule
+ * @param param parameters to pass to napi_schedule
+ *
+ * Used when scheduling on different CPUs
+ */
+static void napi_schedule_wrapper(void *param)
+{
+ struct napi_struct *napi = param;
+
+ napi_schedule(napi);
+}
+
+/**
+ * \brief callback when receive interrupt occurs and we are in NAPI mode
+ * @param arg pointer to octeon output queue
+ */
+static void liquidio_napi_drv_callback(void *arg)
+{
+ struct octeon_device *oct;
+ struct octeon_droq *droq = arg;
+ int this_cpu = smp_processor_id();
+
+ oct = droq->oct_dev;
+
+ if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct) ||
+ droq->cpu_id == this_cpu) {
+ napi_schedule_irqoff(&droq->napi);
+ } else {
+ call_single_data_t *csd = &droq->csd;
+
+ csd->func = napi_schedule_wrapper;
+ csd->info = &droq->napi;
+ csd->flags = 0;
+
+ smp_call_function_single_async(droq->cpu_id, csd);
+ }
+}
+
+/**
+ * \brief Entry point for NAPI polling
+ * @param napi NAPI structure
+ * @param budget maximum number of items to process
+ */
+static int liquidio_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct octeon_instr_queue *iq;
+ struct octeon_device *oct;
+ struct octeon_droq *droq;
+ int tx_done = 0, iq_no;
+ int work_done;
+
+ droq = container_of(napi, struct octeon_droq, napi);
+ oct = droq->oct_dev;
+ iq_no = droq->q_no;
+
+ /* Handle Droq descriptors */
+ work_done = octeon_process_droq_poll_cmd(oct, droq->q_no,
+ POLL_EVENT_PROCESS_PKTS,
+ budget);
+
+ /* Flush the instruction queue */
+ iq = oct->instr_queue[iq_no];
+ if (iq) {
+ /* TODO: move this check to inside octeon_flush_iq,
+ * once check_db_timeout is removed
+ */
+ if (atomic_read(&iq->instr_pending))
+ /* Process iq buffers with in the budget limits */
+ tx_done = octeon_flush_iq(oct, iq, budget);
+ else
+ tx_done = 1;
+ /* Update iq read-index rather than waiting for next interrupt.
+ * Return back if tx_done is false.
+ */
+ /* sub-queue status update */
+ lio_update_txq_status(oct, iq_no);
+ } else {
+ dev_err(&oct->pci_dev->dev, "%s: iq (%d) num invalid\n",
+ __func__, iq_no);
+ }
+
+#define MAX_REG_CNT 2000000U
+ /* force enable interrupt if reg cnts are high to avoid wraparound */
+ if ((work_done < budget && tx_done) ||
+ (iq && iq->pkt_in_done >= MAX_REG_CNT) ||
+ (droq->pkt_count >= MAX_REG_CNT)) {
+ tx_done = 1;
+ napi_complete_done(napi, work_done);
+
+ octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no,
+ POLL_EVENT_ENABLE_INTR, 0);
+ return 0;
+ }
+
+ return (!tx_done) ? (budget) : (work_done);
+}
+
+/**
+ * \brief Setup input and output queues
+ * @param octeon_dev octeon device
+ * @param ifidx Interface index
+ *
+ * Note: Queues are with respect to the octeon device. Thus
+ * an input queue is for egress packets, and output queues
+ * are for ingress packets.
+ */
+int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx,
+ u32 num_iqs, u32 num_oqs)
+{
+ struct octeon_droq_ops droq_ops;
+ struct net_device *netdev;
+ struct octeon_droq *droq;
+ struct napi_struct *napi;
+ int cpu_id_modulus;
+ int num_tx_descs;
+ struct lio *lio;
+ int retval = 0;
+ int q, q_no;
+ int cpu_id;
+
+ netdev = octeon_dev->props[ifidx].netdev;
+
+ lio = GET_LIO(netdev);
+
+ memset(&droq_ops, 0, sizeof(struct octeon_droq_ops));
+
+ droq_ops.fptr = liquidio_push_packet;
+ droq_ops.farg = netdev;
+
+ droq_ops.poll_mode = 1;
+ droq_ops.napi_fn = liquidio_napi_drv_callback;
+ cpu_id = 0;
+ cpu_id_modulus = num_present_cpus();
+
+ /* set up DROQs. */
+ for (q = 0; q < num_oqs; q++) {
+ q_no = lio->linfo.rxpciq[q].s.q_no;
+ dev_dbg(&octeon_dev->pci_dev->dev,
+ "%s index:%d linfo.rxpciq.s.q_no:%d\n",
+ __func__, q, q_no);
+ retval = octeon_setup_droq(
+ octeon_dev, q_no,
+ CFG_GET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(octeon_dev),
+ lio->ifidx),
+ CFG_GET_NUM_RX_BUF_SIZE_NIC_IF(octeon_get_conf(octeon_dev),
+ lio->ifidx),
+ NULL);
+ if (retval) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ "%s : Runtime DROQ(RxQ) creation failed.\n",
+ __func__);
+ return 1;
+ }
+
+ droq = octeon_dev->droq[q_no];
+ napi = &droq->napi;
+ dev_dbg(&octeon_dev->pci_dev->dev, "netif_napi_add netdev:%llx oct:%llx\n",
+ (u64)netdev, (u64)octeon_dev);
+ netif_napi_add(netdev, napi, liquidio_napi_poll, 64);
+
+ /* designate a CPU for this droq */
+ droq->cpu_id = cpu_id;
+ cpu_id++;
+ if (cpu_id >= cpu_id_modulus)
+ cpu_id = 0;
+
+ octeon_register_droq_ops(octeon_dev, q_no, &droq_ops);
+ }
+
+ if (OCTEON_CN23XX_PF(octeon_dev) || OCTEON_CN23XX_VF(octeon_dev)) {
+ /* 23XX PF/VF can send/recv control messages (via the first
+ * PF/VF-owned droq) from the firmware even if the ethX
+ * interface is down, so that's why poll_mode must be off
+ * for the first droq.
+ */
+ octeon_dev->droq[0]->ops.poll_mode = 0;
+ }
+
+ /* set up IQs. */
+ for (q = 0; q < num_iqs; q++) {
+ num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF(
+ octeon_get_conf(octeon_dev), lio->ifidx);
+ retval = octeon_setup_iq(octeon_dev, ifidx, q,
+ lio->linfo.txpciq[q], num_tx_descs,
+ netdev_get_tx_queue(netdev, q));
+ if (retval) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ " %s : Runtime IQ(TxQ) creation failed.\n",
+ __func__);
+ return 1;
+ }
+
+ /* XPS */
+ if (!OCTEON_CN23XX_VF(octeon_dev) && octeon_dev->msix_on &&
+ octeon_dev->ioq_vector) {
+ struct octeon_ioq_vector *ioq_vector;
+
+ ioq_vector = &octeon_dev->ioq_vector[q];
+ netif_set_xps_queue(netdev,
+ &ioq_vector->affinity_mask,
+ ioq_vector->iq_index);
+ }
+ }
+
+ return 0;
+}
+
+static
+int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret)
+{
+ struct octeon_device *oct = droq->oct_dev;
+ struct octeon_device_priv *oct_priv =
+ (struct octeon_device_priv *)oct->priv;
+
+ if (droq->ops.poll_mode) {
+ droq->ops.napi_fn(droq);
+ } else {
+ if (ret & MSIX_PO_INT) {
+ if (OCTEON_CN23XX_VF(oct))
+ dev_err(&oct->pci_dev->dev,
+ "should not come here should not get rx when poll mode = 0 for vf\n");
+ tasklet_schedule(&oct_priv->droq_tasklet);
+ return 1;
+ }
+ /* this will be flushed periodically by check iq db */
+ if (ret & MSIX_PI_INT)
+ return 0;
+ }
+
+ return 0;
+}
+
+irqreturn_t
+liquidio_msix_intr_handler(int irq __attribute__((unused)), void *dev)
+{
+ struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev;
+ struct octeon_device *oct = ioq_vector->oct_dev;
+ struct octeon_droq *droq = oct->droq[ioq_vector->droq_index];
+ u64 ret;
+
+ ret = oct->fn_list.msix_interrupt_handler(ioq_vector);
+
+ if (ret & MSIX_PO_INT || ret & MSIX_PI_INT)
+ liquidio_schedule_msix_droq_pkt_handler(droq, ret);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * \brief Droq packet processor sceduler
+ * @param oct octeon device
+ */
+static void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct)
+{
+ struct octeon_device_priv *oct_priv =
+ (struct octeon_device_priv *)oct->priv;
+ struct octeon_droq *droq;
+ u64 oq_no;
+
+ if (oct->int_status & OCT_DEV_INTR_PKT_DATA) {
+ for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct);
+ oq_no++) {
+ if (!(oct->droq_intr & BIT_ULL(oq_no)))
+ continue;
+
+ droq = oct->droq[oq_no];
+
+ if (droq->ops.poll_mode) {
+ droq->ops.napi_fn(droq);
+ oct_priv->napi_mask |= (1 << oq_no);
+ } else {
+ tasklet_schedule(&oct_priv->droq_tasklet);
+ }
+ }
+ }
+}
+
+/**
+ * \brief Interrupt handler for octeon
+ * @param irq unused
+ * @param dev octeon device
+ */
+static
+irqreturn_t liquidio_legacy_intr_handler(int irq __attribute__((unused)),
+ void *dev)
+{
+ struct octeon_device *oct = (struct octeon_device *)dev;
+ irqreturn_t ret;
+
+ /* Disable our interrupts for the duration of ISR */
+ oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
+
+ ret = oct->fn_list.process_interrupt_regs(oct);
+
+ if (ret == IRQ_HANDLED)
+ liquidio_schedule_droq_pkt_handlers(oct);
+
+ /* Re-enable our interrupts */
+ if (!(atomic_read(&oct->status) == OCT_DEV_IN_RESET))
+ oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
+
+ return ret;
+}
+
+/**
+ * \brief Setup interrupt for octeon device
+ * @param oct octeon device
+ *
+ * Enable interrupt in Octeon device as given in the PCI interrupt mask.
+ */
+int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs)
+{
+ struct msix_entry *msix_entries;
+ char *queue_irq_names = NULL;
+ int i, num_interrupts = 0;
+ int num_alloc_ioq_vectors;
+ char *aux_irq_name = NULL;
+ int num_ioq_vectors;
+ int irqret, err;
+
+ oct->num_msix_irqs = num_ioqs;
+ if (oct->msix_on) {
+ if (OCTEON_CN23XX_PF(oct)) {
+ num_interrupts = MAX_IOQ_INTERRUPTS_PER_PF + 1;
+
+ /* one non ioq interrupt for handling
+ * sli_mac_pf_int_sum
+ */
+ oct->num_msix_irqs += 1;
+ } else if (OCTEON_CN23XX_VF(oct)) {
+ num_interrupts = MAX_IOQ_INTERRUPTS_PER_VF;
+ }
+
+ /* allocate storage for the names assigned to each irq */
+ oct->irq_name_storage =
+ kcalloc(num_interrupts, INTRNAMSIZ, GFP_KERNEL);
+ if (!oct->irq_name_storage) {
+ dev_err(&oct->pci_dev->dev, "Irq name storage alloc failed...\n");
+ return -ENOMEM;
+ }
+
+ queue_irq_names = oct->irq_name_storage;
+
+ if (OCTEON_CN23XX_PF(oct))
+ aux_irq_name = &queue_irq_names
+ [IRQ_NAME_OFF(MAX_IOQ_INTERRUPTS_PER_PF)];
+
+ oct->msix_entries = kcalloc(oct->num_msix_irqs,
+ sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!oct->msix_entries) {
+ dev_err(&oct->pci_dev->dev, "Memory Alloc failed...\n");
+ kfree(oct->irq_name_storage);
+ oct->irq_name_storage = NULL;
+ return -ENOMEM;
+ }
+
+ msix_entries = (struct msix_entry *)oct->msix_entries;
+
+ /*Assumption is that pf msix vectors start from pf srn to pf to
+ * trs and not from 0. if not change this code
+ */
+ if (OCTEON_CN23XX_PF(oct)) {
+ for (i = 0; i < oct->num_msix_irqs - 1; i++)
+ msix_entries[i].entry =
+ oct->sriov_info.pf_srn + i;
+
+ msix_entries[oct->num_msix_irqs - 1].entry =
+ oct->sriov_info.trs;
+ } else if (OCTEON_CN23XX_VF(oct)) {
+ for (i = 0; i < oct->num_msix_irqs; i++)
+ msix_entries[i].entry = i;
+ }
+ num_alloc_ioq_vectors = pci_enable_msix_range(
+ oct->pci_dev, msix_entries,
+ oct->num_msix_irqs,
+ oct->num_msix_irqs);
+ if (num_alloc_ioq_vectors < 0) {
+ dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n");
+ kfree(oct->msix_entries);
+ oct->msix_entries = NULL;
+ kfree(oct->irq_name_storage);
+ oct->irq_name_storage = NULL;
+ return num_alloc_ioq_vectors;
+ }
+
+ dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n");
+
+ num_ioq_vectors = oct->num_msix_irqs;
+ /** For PF, there is one non-ioq interrupt handler */
+ if (OCTEON_CN23XX_PF(oct)) {
+ num_ioq_vectors -= 1;
+
+ snprintf(aux_irq_name, INTRNAMSIZ,
+ "LiquidIO%u-pf%u-aux", oct->octeon_id,
+ oct->pf_num);
+ irqret = request_irq(
+ msix_entries[num_ioq_vectors].vector,
+ liquidio_legacy_intr_handler, 0,
+ aux_irq_name, oct);
+ if (irqret) {
+ dev_err(&oct->pci_dev->dev,
+ "Request_irq failed for MSIX interrupt Error: %d\n",
+ irqret);
+ pci_disable_msix(oct->pci_dev);
+ kfree(oct->msix_entries);
+ kfree(oct->irq_name_storage);
+ oct->irq_name_storage = NULL;
+ oct->msix_entries = NULL;
+ return irqret;
+ }
+ }
+ for (i = 0 ; i < num_ioq_vectors ; i++) {
+ if (OCTEON_CN23XX_PF(oct))
+ snprintf(&queue_irq_names[IRQ_NAME_OFF(i)],
+ INTRNAMSIZ, "LiquidIO%u-pf%u-rxtx-%u",
+ oct->octeon_id, oct->pf_num, i);
+
+ if (OCTEON_CN23XX_VF(oct))
+ snprintf(&queue_irq_names[IRQ_NAME_OFF(i)],
+ INTRNAMSIZ, "LiquidIO%u-vf%u-rxtx-%u",
+ oct->octeon_id, oct->vf_num, i);
+
+ irqret = request_irq(msix_entries[i].vector,
+ liquidio_msix_intr_handler, 0,
+ &queue_irq_names[IRQ_NAME_OFF(i)],
+ &oct->ioq_vector[i]);
+
+ if (irqret) {
+ dev_err(&oct->pci_dev->dev,
+ "Request_irq failed for MSIX interrupt Error: %d\n",
+ irqret);
+ /** Freeing the non-ioq irq vector here . */
+ free_irq(msix_entries[num_ioq_vectors].vector,
+ oct);
+
+ while (i) {
+ i--;
+ /** clearing affinity mask. */
+ irq_set_affinity_hint(
+ msix_entries[i].vector,
+ NULL);
+ free_irq(msix_entries[i].vector,
+ &oct->ioq_vector[i]);
+ }
+ pci_disable_msix(oct->pci_dev);
+ kfree(oct->msix_entries);
+ kfree(oct->irq_name_storage);
+ oct->irq_name_storage = NULL;
+ oct->msix_entries = NULL;
+ return irqret;
+ }
+ oct->ioq_vector[i].vector = msix_entries[i].vector;
+ /* assign the cpu mask for this msix interrupt vector */
+ irq_set_affinity_hint(msix_entries[i].vector,
+ &oct->ioq_vector[i].affinity_mask
+ );
+ }
+ dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: MSI-X enabled\n",
+ oct->octeon_id);
+ } else {
+ err = pci_enable_msi(oct->pci_dev);
+ if (err)
+ dev_warn(&oct->pci_dev->dev, "Reverting to legacy interrupts. Error: %d\n",
+ err);
+ else
+ oct->flags |= LIO_FLAG_MSI_ENABLED;
+
+ /* allocate storage for the names assigned to the irq */
+ oct->irq_name_storage = kcalloc(1, INTRNAMSIZ, GFP_KERNEL);
+ if (!oct->irq_name_storage)
+ return -ENOMEM;
+
+ queue_irq_names = oct->irq_name_storage;
+
+ if (OCTEON_CN23XX_PF(oct))
+ snprintf(&queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ,
+ "LiquidIO%u-pf%u-rxtx-%u",
+ oct->octeon_id, oct->pf_num, 0);
+
+ if (OCTEON_CN23XX_VF(oct))
+ snprintf(&queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ,
+ "LiquidIO%u-vf%u-rxtx-%u",
+ oct->octeon_id, oct->vf_num, 0);
+
+ irqret = request_irq(oct->pci_dev->irq,
+ liquidio_legacy_intr_handler,
+ IRQF_SHARED,
+ &queue_irq_names[IRQ_NAME_OFF(0)], oct);
+ if (irqret) {
+ if (oct->flags & LIO_FLAG_MSI_ENABLED)
+ pci_disable_msi(oct->pci_dev);
+ dev_err(&oct->pci_dev->dev, "Request IRQ failed with code: %d\n",
+ irqret);
+ kfree(oct->irq_name_storage);
+ oct->irq_name_storage = NULL;
+ return irqret;
+ }
+ }
+ return 0;
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
index ebd353bc78ff..a63ddf07f168 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
@@ -31,6 +31,7 @@
#include "cn23xx_pf_device.h"
#include "cn23xx_vf_device.h"
+static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs);
static int octnet_get_link_stats(struct net_device *netdev);
struct oct_intrmod_context {
@@ -105,6 +106,7 @@ static const char oct_stats_strings[][ETH_GSTRING_LEN] = {
"tx_total_sent",
"tx_total_fwd",
"tx_err_pko",
+ "tx_err_pki",
"tx_err_link",
"tx_err_drop",
@@ -299,6 +301,35 @@ lio_get_vf_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32);
}
+static int
+lio_send_queue_count_update(struct net_device *netdev, uint32_t num_queues)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ int ret = 0;
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = OCTNET_CMD_QUEUE_COUNT_CTL;
+ nctrl.ncmd.s.param1 = num_queues;
+ nctrl.ncmd.s.param2 = num_queues;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
+ nctrl.wait_time = 100;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
+ if (ret < 0) {
+ dev_err(&oct->pci_dev->dev, "Failed to send Queue reset command (ret: 0x%x)\n",
+ ret);
+ return -1;
+ }
+
+ return 0;
+}
+
static void
lio_ethtool_get_channels(struct net_device *dev,
struct ethtool_channels *channel)
@@ -306,6 +337,7 @@ lio_ethtool_get_channels(struct net_device *dev,
struct lio *lio = GET_LIO(dev);
struct octeon_device *oct = lio->oct_dev;
u32 max_rx = 0, max_tx = 0, tx_count = 0, rx_count = 0;
+ u32 combined_count = 0, max_combined = 0;
if (OCTEON_CN6XXX(oct)) {
struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx);
@@ -315,22 +347,137 @@ lio_ethtool_get_channels(struct net_device *dev,
rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx);
tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx);
} else if (OCTEON_CN23XX_PF(oct)) {
-
- max_rx = oct->sriov_info.num_pf_rings;
- max_tx = oct->sriov_info.num_pf_rings;
- rx_count = lio->linfo.num_rxpciq;
- tx_count = lio->linfo.num_txpciq;
+ max_combined = lio->linfo.num_txpciq;
+ combined_count = oct->num_iqs;
} else if (OCTEON_CN23XX_VF(oct)) {
- max_tx = oct->sriov_info.rings_per_vf;
- max_rx = oct->sriov_info.rings_per_vf;
- rx_count = lio->linfo.num_rxpciq;
- tx_count = lio->linfo.num_txpciq;
+ u64 reg_val = 0ULL;
+ u64 ctrl = CN23XX_VF_SLI_IQ_PKT_CONTROL64(0);
+
+ reg_val = octeon_read_csr64(oct, ctrl);
+ reg_val = reg_val >> CN23XX_PKT_INPUT_CTL_RPVF_POS;
+ max_combined = reg_val & CN23XX_PKT_INPUT_CTL_RPVF_MASK;
+ combined_count = oct->num_iqs;
}
channel->max_rx = max_rx;
channel->max_tx = max_tx;
+ channel->max_combined = max_combined;
channel->rx_count = rx_count;
channel->tx_count = tx_count;
+ channel->combined_count = combined_count;
+}
+
+static int
+lio_irq_reallocate_irqs(struct octeon_device *oct, uint32_t num_ioqs)
+{
+ struct msix_entry *msix_entries;
+ int num_msix_irqs = 0;
+ int i;
+
+ if (!oct->msix_on)
+ return 0;
+
+ /* Disable the input and output queues now. No more packets will
+ * arrive from Octeon.
+ */
+ oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
+
+ if (oct->msix_on) {
+ if (OCTEON_CN23XX_PF(oct))
+ num_msix_irqs = oct->num_msix_irqs - 1;
+ else if (OCTEON_CN23XX_VF(oct))
+ num_msix_irqs = oct->num_msix_irqs;
+
+ msix_entries = (struct msix_entry *)oct->msix_entries;
+ for (i = 0; i < num_msix_irqs; i++) {
+ if (oct->ioq_vector[i].vector) {
+ /* clear the affinity_cpumask */
+ irq_set_affinity_hint(msix_entries[i].vector,
+ NULL);
+ free_irq(msix_entries[i].vector,
+ &oct->ioq_vector[i]);
+ oct->ioq_vector[i].vector = 0;
+ }
+ }
+
+ /* non-iov vector's argument is oct struct */
+ if (OCTEON_CN23XX_PF(oct))
+ free_irq(msix_entries[i].vector, oct);
+
+ pci_disable_msix(oct->pci_dev);
+ kfree(oct->msix_entries);
+ oct->msix_entries = NULL;
+ }
+
+ kfree(oct->irq_name_storage);
+ oct->irq_name_storage = NULL;
+ if (octeon_setup_interrupt(oct, num_ioqs)) {
+ dev_info(&oct->pci_dev->dev, "Setup interrupt failed\n");
+ return 1;
+ }
+
+ /* Enable Octeon device interrupts */
+ oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
+
+ return 0;
+}
+
+static int
+lio_ethtool_set_channels(struct net_device *dev,
+ struct ethtool_channels *channel)
+{
+ u32 combined_count, max_combined;
+ struct lio *lio = GET_LIO(dev);
+ struct octeon_device *oct = lio->oct_dev;
+ int stopped = 0;
+
+ if (strcmp(oct->fw_info.liquidio_firmware_version, "1.6.1") < 0) {
+ dev_err(&oct->pci_dev->dev, "Minimum firmware version required is 1.6.1\n");
+ return -EINVAL;
+ }
+
+ if (!channel->combined_count || channel->other_count ||
+ channel->rx_count || channel->tx_count)
+ return -EINVAL;
+
+ combined_count = channel->combined_count;
+
+ if (OCTEON_CN23XX_PF(oct)) {
+ max_combined = channel->max_combined;
+ } else if (OCTEON_CN23XX_VF(oct)) {
+ u64 reg_val = 0ULL;
+ u64 ctrl = CN23XX_VF_SLI_IQ_PKT_CONTROL64(0);
+
+ reg_val = octeon_read_csr64(oct, ctrl);
+ reg_val = reg_val >> CN23XX_PKT_INPUT_CTL_RPVF_POS;
+ max_combined = reg_val & CN23XX_PKT_INPUT_CTL_RPVF_MASK;
+ } else {
+ return -EINVAL;
+ }
+
+ if (combined_count > max_combined || combined_count < 1)
+ return -EINVAL;
+
+ if (combined_count == oct->num_iqs)
+ return 0;
+
+ ifstate_set(lio, LIO_IFSTATE_RESETTING);
+
+ if (netif_running(dev)) {
+ dev->netdev_ops->ndo_stop(dev);
+ stopped = 1;
+ }
+
+ if (lio_reset_queues(dev, combined_count))
+ return -EINVAL;
+
+ lio_irq_reallocate_irqs(oct, combined_count);
+ if (stopped)
+ dev->netdev_ops->ndo_open(dev);
+
+ ifstate_reset(lio, LIO_IFSTATE_RESETTING);
+
+ return 0;
}
static int lio_get_eeprom_len(struct net_device *netdev)
@@ -577,23 +724,18 @@ static int lio_set_phys_id(struct net_device *netdev,
break;
case ETHTOOL_ID_ON:
- if (oct->chip_id == OCTEON_CN66XX) {
+ if (oct->chip_id == OCTEON_CN66XX)
octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
VITESSE_PHY_GPIO_HIGH);
-
- } else if (oct->chip_id == OCTEON_CN68XX) {
- return -EINVAL;
- } else {
+ else
return -EINVAL;
- }
+
break;
case ETHTOOL_ID_OFF:
if (oct->chip_id == OCTEON_CN66XX)
octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
VITESSE_PHY_GPIO_LOW);
- else if (oct->chip_id == OCTEON_CN68XX)
- return -EINVAL;
else
return -EINVAL;
@@ -641,6 +783,9 @@ lio_ethtool_get_ringparam(struct net_device *netdev,
u32 tx_max_pending = 0, rx_max_pending = 0, tx_pending = 0,
rx_pending = 0;
+ if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
+ return;
+
if (OCTEON_CN6XXX(oct)) {
struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx);
@@ -648,33 +793,147 @@ lio_ethtool_get_ringparam(struct net_device *netdev,
rx_max_pending = CN6XXX_MAX_OQ_DESCRIPTORS;
rx_pending = CFG_GET_NUM_RX_DESCS_NIC_IF(conf6x, lio->ifidx);
tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf6x, lio->ifidx);
- } else if (OCTEON_CN23XX_PF(oct)) {
- struct octeon_config *conf23 = CHIP_CONF(oct, cn23xx_pf);
-
+ } else if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) {
tx_max_pending = CN23XX_MAX_IQ_DESCRIPTORS;
rx_max_pending = CN23XX_MAX_OQ_DESCRIPTORS;
- rx_pending = CFG_GET_NUM_RX_DESCS_NIC_IF(conf23, lio->ifidx);
- tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf23, lio->ifidx);
- }
-
- if (lio->mtu > OCTNET_DEFAULT_FRM_SIZE - OCTNET_FRM_HEADER_SIZE) {
- ering->rx_pending = 0;
- ering->rx_max_pending = 0;
- ering->rx_mini_pending = 0;
- ering->rx_jumbo_pending = rx_pending;
- ering->rx_mini_max_pending = 0;
- ering->rx_jumbo_max_pending = rx_max_pending;
- } else {
- ering->rx_pending = rx_pending;
- ering->rx_max_pending = rx_max_pending;
- ering->rx_mini_pending = 0;
- ering->rx_jumbo_pending = 0;
- ering->rx_mini_max_pending = 0;
- ering->rx_jumbo_max_pending = 0;
+ rx_pending = oct->droq[0]->max_count;
+ tx_pending = oct->instr_queue[0]->max_count;
}
ering->tx_pending = tx_pending;
ering->tx_max_pending = tx_max_pending;
+ ering->rx_pending = rx_pending;
+ ering->rx_max_pending = rx_max_pending;
+ ering->rx_mini_pending = 0;
+ ering->rx_jumbo_pending = 0;
+ ering->rx_mini_max_pending = 0;
+ ering->rx_jumbo_max_pending = 0;
+}
+
+static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct napi_struct *napi, *n;
+ int i, update = 0;
+
+ if (wait_for_pending_requests(oct))
+ dev_err(&oct->pci_dev->dev, "There were pending requests\n");
+
+ if (lio_wait_for_instr_fetch(oct))
+ dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
+
+ if (octeon_set_io_queues_off(oct)) {
+ dev_err(&oct->pci_dev->dev, "setting io queues off failed\n");
+ return -1;
+ }
+
+ /* Disable the input and output queues now. No more packets will
+ * arrive from Octeon.
+ */
+ oct->fn_list.disable_io_queues(oct);
+ /* Delete NAPI */
+ list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
+ netif_napi_del(napi);
+
+ if (num_qs != oct->num_iqs) {
+ netif_set_real_num_rx_queues(netdev, num_qs);
+ netif_set_real_num_tx_queues(netdev, num_qs);
+ update = 1;
+ }
+
+ for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
+ if (!(oct->io_qmask.oq & BIT_ULL(i)))
+ continue;
+ octeon_delete_droq(oct, i);
+ }
+
+ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
+ if (!(oct->io_qmask.iq & BIT_ULL(i)))
+ continue;
+ octeon_delete_instr_queue(oct, i);
+ }
+
+ if (oct->fn_list.setup_device_regs(oct)) {
+ dev_err(&oct->pci_dev->dev, "Failed to configure device registers\n");
+ return -1;
+ }
+
+ if (liquidio_setup_io_queues(oct, 0, num_qs, num_qs)) {
+ dev_err(&oct->pci_dev->dev, "IO queues initialization failed\n");
+ return -1;
+ }
+
+ /* Enable the input and output queues for this Octeon device */
+ if (oct->fn_list.enable_io_queues(oct)) {
+ dev_err(&oct->pci_dev->dev, "Failed to enable input/output queues");
+ return -1;
+ }
+
+ if (update && lio_send_queue_count_update(netdev, num_qs))
+ return -1;
+
+ return 0;
+}
+
+static int lio_ethtool_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ering)
+{
+ u32 rx_count, tx_count, rx_count_old, tx_count_old;
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ int stopped = 0;
+
+ if (!OCTEON_CN23XX_PF(oct) && !OCTEON_CN23XX_VF(oct))
+ return -EINVAL;
+
+ if (ering->rx_mini_pending || ering->rx_jumbo_pending)
+ return -EINVAL;
+
+ rx_count = clamp_t(u32, ering->rx_pending, CN23XX_MIN_OQ_DESCRIPTORS,
+ CN23XX_MAX_OQ_DESCRIPTORS);
+ tx_count = clamp_t(u32, ering->tx_pending, CN23XX_MIN_IQ_DESCRIPTORS,
+ CN23XX_MAX_IQ_DESCRIPTORS);
+
+ rx_count_old = oct->droq[0]->max_count;
+ tx_count_old = oct->instr_queue[0]->max_count;
+
+ if (rx_count == rx_count_old && tx_count == tx_count_old)
+ return 0;
+
+ ifstate_set(lio, LIO_IFSTATE_RESETTING);
+
+ if (netif_running(netdev)) {
+ netdev->netdev_ops->ndo_stop(netdev);
+ stopped = 1;
+ }
+
+ /* Change RX/TX DESCS count */
+ if (tx_count != tx_count_old)
+ CFG_SET_NUM_TX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx,
+ tx_count);
+ if (rx_count != rx_count_old)
+ CFG_SET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx,
+ rx_count);
+
+ if (lio_reset_queues(netdev, lio->linfo.num_txpciq))
+ goto err_lio_reset_queues;
+
+ if (stopped)
+ netdev->netdev_ops->ndo_open(netdev);
+
+ ifstate_reset(lio, LIO_IFSTATE_RESETTING);
+
+ return 0;
+
+err_lio_reset_queues:
+ if (tx_count != tx_count_old)
+ CFG_SET_NUM_TX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx,
+ tx_count_old);
+ if (rx_count != rx_count_old)
+ CFG_SET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx,
+ rx_count_old);
+ return -EINVAL;
}
static u32 lio_get_msglevel(struct net_device *netdev)
@@ -795,6 +1054,9 @@ lio_get_ethtool_stats(struct net_device *netdev,
struct net_device_stats *netstats = &netdev->stats;
int i = 0, j;
+ if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
+ return;
+
netdev->netdev_ops->ndo_get_stats(netdev);
octnet_get_link_stats(netdev);
@@ -826,6 +1088,8 @@ lio_get_ethtool_stats(struct net_device *netdev,
data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_fwd);
/*per_core_stats[j].link_stats[i].fromhost.fw_err_pko */
data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_pko);
+ /*per_core_stats[j].link_stats[i].fromhost.fw_err_pki */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_pki);
/*per_core_stats[j].link_stats[i].fromhost.fw_err_link */
data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_link);
/*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
@@ -1057,6 +1321,9 @@ static void lio_vf_get_ethtool_stats(struct net_device *netdev,
struct octeon_device *oct_dev = lio->oct_dev;
int i = 0, j, vj;
+ if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
+ return;
+
netdev->netdev_ops->ndo_get_stats(netdev);
/* sum of oct->droq[oq_no]->stats->rx_pkts_received */
data[i++] = CVM_CAST64(netstats->rx_packets);
@@ -1079,7 +1346,7 @@ static void lio_vf_get_ethtool_stats(struct net_device *netdev,
/* lio->link_changes */
data[i++] = CVM_CAST64(lio->link_changes);
- for (vj = 0; vj < lio->linfo.num_txpciq; vj++) {
+ for (vj = 0; vj < oct_dev->num_iqs; vj++) {
j = lio->linfo.txpciq[vj].s.q_no;
/* packets to network port */
@@ -1121,7 +1388,7 @@ static void lio_vf_get_ethtool_stats(struct net_device *netdev,
}
/* RX */
- for (vj = 0; vj < lio->linfo.num_rxpciq; vj++) {
+ for (vj = 0; vj < oct_dev->num_oqs; vj++) {
j = lio->linfo.rxpciq[vj].s.q_no;
/* packets send to TCP/IP network stack */
@@ -1568,6 +1835,7 @@ octnet_nic_stats_callback(struct octeon_device *oct_dev,
tstats->fw_total_sent = rsp_tstats->fw_total_sent;
tstats->fw_total_fwd = rsp_tstats->fw_total_fwd;
tstats->fw_err_pko = rsp_tstats->fw_err_pko;
+ tstats->fw_err_pki = rsp_tstats->fw_err_pki;
tstats->fw_err_link = rsp_tstats->fw_err_link;
tstats->fw_err_drop = rsp_tstats->fw_err_drop;
tstats->fw_tso = rsp_tstats->fw_tso;
@@ -2587,7 +2855,9 @@ static const struct ethtool_ops lio_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_drvinfo = lio_get_drvinfo,
.get_ringparam = lio_ethtool_get_ringparam,
+ .set_ringparam = lio_ethtool_set_ringparam,
.get_channels = lio_ethtool_get_channels,
+ .set_channels = lio_ethtool_set_channels,
.set_phys_id = lio_set_phys_id,
.get_eeprom_len = lio_get_eeprom_len,
.get_eeprom = lio_get_eeprom,
@@ -2612,7 +2882,9 @@ static const struct ethtool_ops lio_vf_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_drvinfo = lio_get_vf_drvinfo,
.get_ringparam = lio_ethtool_get_ringparam,
+ .set_ringparam = lio_ethtool_set_ringparam,
.get_channels = lio_ethtool_get_channels,
+ .set_channels = lio_ethtool_set_channels,
.get_strings = lio_vf_get_strings,
.get_ethtool_stats = lio_vf_get_ethtool_stats,
.get_regs_len = lio_get_regs_len,
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 120b6e537b28..e7f54948173f 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -39,10 +39,14 @@ MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(LIQUIDIO_VERSION);
-MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME LIO_FW_NAME_SUFFIX);
-MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME LIO_FW_NAME_SUFFIX);
-MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME LIO_FW_NAME_SUFFIX);
-MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_23XX_NAME LIO_FW_NAME_SUFFIX);
+MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME
+ "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
+MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME
+ "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
+MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME
+ "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
+MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_23XX_NAME
+ "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
static int ddr_timeout = 10000;
module_param(ddr_timeout, int, 0644);
@@ -55,11 +59,24 @@ static int debug = -1;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
-static char fw_type[LIO_MAX_FW_TYPE_LEN];
-module_param_string(fw_type, fw_type, sizeof(fw_type), 0000);
-MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded. Default \"nic\"");
+static char fw_type[LIO_MAX_FW_TYPE_LEN] = LIO_FW_NAME_TYPE_NIC;
+module_param_string(fw_type, fw_type, sizeof(fw_type), 0444);
+MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded. Default \"nic\". Use \"none\" to load firmware from flash.");
-static int ptp_enable = 1;
+static u32 console_bitmask;
+module_param(console_bitmask, int, 0644);
+MODULE_PARM_DESC(console_bitmask,
+ "Bitmask indicating which consoles have debug output redirected to syslog.");
+
+/**
+ * \brief determines if a given console has debug enabled.
+ * @param console console to check
+ * @returns 1 = enabled. 0 otherwise
+ */
+static int octeon_console_debug_enabled(u32 console)
+{
+ return (console_bitmask >> (console)) & 0x1;
+}
/* Polling interval for determining when NIC application is alive */
#define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100
@@ -158,16 +175,13 @@ struct handshake {
int started_ok;
};
-struct octeon_device_priv {
- /** Tasklet structures for this device. */
- struct tasklet_struct droq_tasklet;
- unsigned long napi_mask;
-};
-
#ifdef CONFIG_PCI_IOV
static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs);
#endif
+static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num,
+ char *prefix, char *suffix);
+
static int octeon_device_init(struct octeon_device *);
static int liquidio_stop(struct net_device *netdev);
static void liquidio_remove(struct pci_dev *pdev);
@@ -256,32 +270,6 @@ static void force_io_queues_off(struct octeon_device *oct)
}
/**
- * \brief wait for all pending requests to complete
- * @param oct Pointer to Octeon device
- *
- * Called during shutdown sequence
- */
-static int wait_for_pending_requests(struct octeon_device *oct)
-{
- int i, pcount = 0;
-
- for (i = 0; i < 100; i++) {
- pcount =
- atomic_read(&oct->response_list
- [OCTEON_ORDERED_SC_LIST].pending_req_count);
- if (pcount)
- schedule_timeout_uninterruptible(HZ / 10);
- else
- break;
- }
-
- if (pcount)
- return 1;
-
- return 0;
-}
-
-/**
* \brief Cause device to go quiet so it can be safely removed/reset/etc
* @param oct Pointer to Octeon device
*/
@@ -572,7 +560,7 @@ static inline void txqs_wake(struct net_device *netdev)
for (i = 0; i < netdev->num_tx_queues; i++) {
int qno = lio->linfo.txpciq[i %
- (lio->linfo.num_txpciq)].s.q_no;
+ lio->oct_dev->num_iqs].s.q_no;
if (__netif_subqueue_stopped(netdev, i)) {
INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, qno,
@@ -652,7 +640,7 @@ static inline int check_txq_status(struct lio *lio)
/* check each sub-queue state */
for (q = 0; q < numqs; q++) {
iq = lio->linfo.txpciq[q %
- (lio->linfo.num_txpciq)].s.q_no;
+ lio->oct_dev->num_iqs].s.q_no;
if (octnet_iq_is_full(lio->oct_dev, iq))
continue;
if (__netif_subqueue_stopped(lio->netdev, q)) {
@@ -823,7 +811,8 @@ static void print_link_info(struct net_device *netdev)
{
struct lio *lio = GET_LIO(netdev);
- if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) {
+ if (!ifstate_check(lio, LIO_IFSTATE_RESETTING) &&
+ ifstate_check(lio, LIO_IFSTATE_REGISTERED)) {
struct oct_link_info *linfo = &lio->linfo;
if (linfo->link.s.link_up) {
@@ -912,295 +901,6 @@ static inline void update_link_status(struct net_device *netdev,
}
}
-/* Runs in interrupt context. */
-static void update_txq_status(struct octeon_device *oct, int iq_num)
-{
- struct net_device *netdev;
- struct lio *lio;
- struct octeon_instr_queue *iq = oct->instr_queue[iq_num];
-
- netdev = oct->props[iq->ifidx].netdev;
-
- /* This is needed because the first IQ does not have
- * a netdev associated with it.
- */
- if (!netdev)
- return;
-
- lio = GET_LIO(netdev);
- if (netif_is_multiqueue(netdev)) {
- if (__netif_subqueue_stopped(netdev, iq->q_index) &&
- lio->linfo.link.s.link_up &&
- (!octnet_iq_is_full(oct, iq_num))) {
- INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num,
- tx_restart, 1);
- netif_wake_subqueue(netdev, iq->q_index);
- }
- } else if (netif_queue_stopped(netdev) &&
- lio->linfo.link.s.link_up &&
- (!octnet_iq_is_full(oct, lio->txq))) {
- INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev,
- lio->txq, tx_restart, 1);
- netif_wake_queue(netdev);
- }
-}
-
-static
-int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret)
-{
- struct octeon_device *oct = droq->oct_dev;
- struct octeon_device_priv *oct_priv =
- (struct octeon_device_priv *)oct->priv;
-
- if (droq->ops.poll_mode) {
- droq->ops.napi_fn(droq);
- } else {
- if (ret & MSIX_PO_INT) {
- tasklet_schedule(&oct_priv->droq_tasklet);
- return 1;
- }
- /* this will be flushed periodically by check iq db */
- if (ret & MSIX_PI_INT)
- return 0;
- }
- return 0;
-}
-
-/**
- * \brief Droq packet processor sceduler
- * @param oct octeon device
- */
-static void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct)
-{
- struct octeon_device_priv *oct_priv =
- (struct octeon_device_priv *)oct->priv;
- u64 oq_no;
- struct octeon_droq *droq;
-
- if (oct->int_status & OCT_DEV_INTR_PKT_DATA) {
- for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct);
- oq_no++) {
- if (!(oct->droq_intr & BIT_ULL(oq_no)))
- continue;
-
- droq = oct->droq[oq_no];
-
- if (droq->ops.poll_mode) {
- droq->ops.napi_fn(droq);
- oct_priv->napi_mask |= (1 << oq_no);
- } else {
- tasklet_schedule(&oct_priv->droq_tasklet);
- }
- }
- }
-}
-
-static irqreturn_t
-liquidio_msix_intr_handler(int irq __attribute__((unused)), void *dev)
-{
- u64 ret;
- struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev;
- struct octeon_device *oct = ioq_vector->oct_dev;
- struct octeon_droq *droq = oct->droq[ioq_vector->droq_index];
-
- ret = oct->fn_list.msix_interrupt_handler(ioq_vector);
-
- if ((ret & MSIX_PO_INT) || (ret & MSIX_PI_INT))
- liquidio_schedule_msix_droq_pkt_handler(droq, ret);
-
- return IRQ_HANDLED;
-}
-
-/**
- * \brief Interrupt handler for octeon
- * @param irq unused
- * @param dev octeon device
- */
-static
-irqreturn_t liquidio_legacy_intr_handler(int irq __attribute__((unused)),
- void *dev)
-{
- struct octeon_device *oct = (struct octeon_device *)dev;
- irqreturn_t ret;
-
- /* Disable our interrupts for the duration of ISR */
- oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
-
- ret = oct->fn_list.process_interrupt_regs(oct);
-
- if (ret == IRQ_HANDLED)
- liquidio_schedule_droq_pkt_handlers(oct);
-
- /* Re-enable our interrupts */
- if (!(atomic_read(&oct->status) == OCT_DEV_IN_RESET))
- oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
-
- return ret;
-}
-
-/**
- * \brief Setup interrupt for octeon device
- * @param oct octeon device
- *
- * Enable interrupt in Octeon device as given in the PCI interrupt mask.
- */
-static int octeon_setup_interrupt(struct octeon_device *oct)
-{
- int irqret, err;
- struct msix_entry *msix_entries;
- int i;
- int num_ioq_vectors;
- int num_alloc_ioq_vectors;
- char *queue_irq_names = NULL;
- char *aux_irq_name = NULL;
-
- if (OCTEON_CN23XX_PF(oct) && oct->msix_on) {
- oct->num_msix_irqs = oct->sriov_info.num_pf_rings;
- /* one non ioq interrupt for handling sli_mac_pf_int_sum */
- oct->num_msix_irqs += 1;
-
- /* allocate storage for the names assigned to each irq */
- oct->irq_name_storage =
- kcalloc((MAX_IOQ_INTERRUPTS_PER_PF + 1), INTRNAMSIZ,
- GFP_KERNEL);
- if (!oct->irq_name_storage) {
- dev_err(&oct->pci_dev->dev, "Irq name storage alloc failed...\n");
- return -ENOMEM;
- }
-
- queue_irq_names = oct->irq_name_storage;
- aux_irq_name = &queue_irq_names
- [IRQ_NAME_OFF(MAX_IOQ_INTERRUPTS_PER_PF)];
-
- oct->msix_entries = kcalloc(
- oct->num_msix_irqs, sizeof(struct msix_entry), GFP_KERNEL);
- if (!oct->msix_entries) {
- dev_err(&oct->pci_dev->dev, "Memory Alloc failed...\n");
- kfree(oct->irq_name_storage);
- oct->irq_name_storage = NULL;
- return -ENOMEM;
- }
-
- msix_entries = (struct msix_entry *)oct->msix_entries;
- /*Assumption is that pf msix vectors start from pf srn to pf to
- * trs and not from 0. if not change this code
- */
- for (i = 0; i < oct->num_msix_irqs - 1; i++)
- msix_entries[i].entry = oct->sriov_info.pf_srn + i;
- msix_entries[oct->num_msix_irqs - 1].entry =
- oct->sriov_info.trs;
- num_alloc_ioq_vectors = pci_enable_msix_range(
- oct->pci_dev, msix_entries,
- oct->num_msix_irqs,
- oct->num_msix_irqs);
- if (num_alloc_ioq_vectors < 0) {
- dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n");
- kfree(oct->msix_entries);
- oct->msix_entries = NULL;
- kfree(oct->irq_name_storage);
- oct->irq_name_storage = NULL;
- return num_alloc_ioq_vectors;
- }
- dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n");
-
- num_ioq_vectors = oct->num_msix_irqs;
-
- /** For PF, there is one non-ioq interrupt handler */
- num_ioq_vectors -= 1;
-
- snprintf(aux_irq_name, INTRNAMSIZ,
- "LiquidIO%u-pf%u-aux", oct->octeon_id, oct->pf_num);
- irqret = request_irq(msix_entries[num_ioq_vectors].vector,
- liquidio_legacy_intr_handler, 0,
- aux_irq_name, oct);
- if (irqret) {
- dev_err(&oct->pci_dev->dev,
- "OCTEON: Request_irq failed for MSIX interrupt Error: %d\n",
- irqret);
- pci_disable_msix(oct->pci_dev);
- kfree(oct->msix_entries);
- oct->msix_entries = NULL;
- kfree(oct->irq_name_storage);
- oct->irq_name_storage = NULL;
- return irqret;
- }
-
- for (i = 0; i < num_ioq_vectors; i++) {
- snprintf(&queue_irq_names[IRQ_NAME_OFF(i)], INTRNAMSIZ,
- "LiquidIO%u-pf%u-rxtx-%u",
- oct->octeon_id, oct->pf_num, i);
-
- irqret = request_irq(msix_entries[i].vector,
- liquidio_msix_intr_handler, 0,
- &queue_irq_names[IRQ_NAME_OFF(i)],
- &oct->ioq_vector[i]);
- if (irqret) {
- dev_err(&oct->pci_dev->dev,
- "OCTEON: Request_irq failed for MSIX interrupt Error: %d\n",
- irqret);
- /** Freeing the non-ioq irq vector here . */
- free_irq(msix_entries[num_ioq_vectors].vector,
- oct);
-
- while (i) {
- i--;
- /** clearing affinity mask. */
- irq_set_affinity_hint(
- msix_entries[i].vector, NULL);
- free_irq(msix_entries[i].vector,
- &oct->ioq_vector[i]);
- }
- pci_disable_msix(oct->pci_dev);
- kfree(oct->msix_entries);
- oct->msix_entries = NULL;
- kfree(oct->irq_name_storage);
- oct->irq_name_storage = NULL;
- return irqret;
- }
- oct->ioq_vector[i].vector = msix_entries[i].vector;
- /* assign the cpu mask for this msix interrupt vector */
- irq_set_affinity_hint(
- msix_entries[i].vector,
- (&oct->ioq_vector[i].affinity_mask));
- }
- dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: MSI-X enabled\n",
- oct->octeon_id);
- } else {
- err = pci_enable_msi(oct->pci_dev);
- if (err)
- dev_warn(&oct->pci_dev->dev, "Reverting to legacy interrupts. Error: %d\n",
- err);
- else
- oct->flags |= LIO_FLAG_MSI_ENABLED;
-
- /* allocate storage for the names assigned to the irq */
- oct->irq_name_storage = kcalloc(1, INTRNAMSIZ, GFP_KERNEL);
- if (!oct->irq_name_storage)
- return -ENOMEM;
-
- queue_irq_names = oct->irq_name_storage;
-
- snprintf(&queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ,
- "LiquidIO%u-pf%u-rxtx-%u",
- oct->octeon_id, oct->pf_num, 0);
-
- irqret = request_irq(oct->pci_dev->irq,
- liquidio_legacy_intr_handler,
- IRQF_SHARED,
- &queue_irq_names[IRQ_NAME_OFF(0)], oct);
- if (irqret) {
- if (oct->flags & LIO_FLAG_MSI_ENABLED)
- pci_disable_msi(oct->pci_dev);
- dev_err(&oct->pci_dev->dev, "Request IRQ failed with code: %d\n",
- irqret);
- kfree(oct->irq_name_storage);
- oct->irq_name_storage = NULL;
- return irqret;
- }
- }
- return 0;
-}
-
static struct octeon_device *get_other_octeon_device(struct octeon_device *oct)
{
struct octeon_device *other_oct;
@@ -1344,6 +1044,13 @@ liquidio_probe(struct pci_dev *pdev,
if (pdev->device == OCTEON_CN23XX_PF_VID)
oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED;
+ /* Enable PTP for 6XXX Device */
+ if (((pdev->device == OCTEON_CN66XX) ||
+ (pdev->device == OCTEON_CN68XX)))
+ oct_dev->ptp_enable = true;
+ else
+ oct_dev->ptp_enable = false;
+
dev_info(&pdev->dev, "Initializing device %x:%x.\n",
(u32)pdev->vendor, (u32)pdev->device);
@@ -1415,6 +1122,33 @@ static bool fw_type_is_none(void)
}
/**
+ * \brief PCI FLR for each Octeon device.
+ * @param oct octeon device
+ */
+static void octeon_pci_flr(struct octeon_device *oct)
+{
+ int rc;
+
+ pci_save_state(oct->pci_dev);
+
+ pci_cfg_access_lock(oct->pci_dev);
+
+ /* Quiesce the device completely */
+ pci_write_config_word(oct->pci_dev, PCI_COMMAND,
+ PCI_COMMAND_INTX_DISABLE);
+
+ rc = __pci_reset_function_locked(oct->pci_dev);
+
+ if (rc != 0)
+ dev_err(&oct->pci_dev->dev, "Error %d resetting PCI function %d\n",
+ rc, oct->pf_num);
+
+ pci_cfg_access_unlock(oct->pci_dev);
+
+ pci_restore_state(oct->pci_dev);
+}
+
+/**
*\brief Destroy resources associated with octeon device
* @param pdev PCI device structure
* @param ent unused
@@ -1474,11 +1208,15 @@ static void octeon_destroy_resources(struct octeon_device *oct)
if (oct->msix_on) {
msix_entries = (struct msix_entry *)oct->msix_entries;
for (i = 0; i < oct->num_msix_irqs - 1; i++) {
- /* clear the affinity_cpumask */
- irq_set_affinity_hint(msix_entries[i].vector,
- NULL);
- free_irq(msix_entries[i].vector,
- &oct->ioq_vector[i]);
+ if (oct->ioq_vector[i].vector) {
+ /* clear the affinity_cpumask */
+ irq_set_affinity_hint(
+ msix_entries[i].vector,
+ NULL);
+ free_irq(msix_entries[i].vector,
+ &oct->ioq_vector[i]);
+ oct->ioq_vector[i].vector = 0;
+ }
}
/* non-iov vector's argument is oct struct */
free_irq(msix_entries[i].vector, oct);
@@ -1558,14 +1296,16 @@ static void octeon_destroy_resources(struct octeon_device *oct)
case OCT_DEV_PCI_MAP_DONE:
refcount = octeon_deregister_device(oct);
- if (!fw_type_is_none()) {
- /* Soft reset the octeon device before exiting.
- * Implementation note: here, we reset the device
- * if it is a CN6XXX OR the last CN23XX device.
- */
- if (OCTEON_CN6XXX(oct) || !refcount)
- oct->fn_list.soft_reset(oct);
- }
+ /* Soft reset the octeon device before exiting.
+ * However, if fw was loaded from card (i.e. autoboot),
+ * perform an FLR instead.
+ * Implementation note: only soft-reset the device
+ * if it is a CN6XXX OR the LAST CN23XX device.
+ */
+ if (fw_type_is_none())
+ octeon_pci_flr(oct);
+ else if (OCTEON_CN6XXX(oct) || !refcount)
+ oct->fn_list.soft_reset(oct);
octeon_unmap_pci_barx(oct, 0);
octeon_unmap_pci_barx(oct, 1);
@@ -1698,15 +1438,6 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
liquidio_stop(netdev);
- if (fw_type_is_none()) {
- struct octnic_ctrl_pkt nctrl;
-
- memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
- nctrl.ncmd.s.cmd = OCTNET_CMD_RESET_PF;
- nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
- octnet_send_nic_ctrl_pkt(oct, &nctrl);
- }
-
if (oct->props[lio->ifidx].napi_enabled == 1) {
list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
napi_disable(napi);
@@ -1717,6 +1448,10 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
oct->droq[0]->ops.poll_mode = 0;
}
+ /* Delete NAPI */
+ list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
+ netif_napi_del(napi);
+
if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
unregister_netdev(netdev);
@@ -1754,7 +1489,7 @@ static int liquidio_stop_nic_module(struct octeon_device *oct)
for (i = 0; i < oct->ifcount; i++) {
lio = GET_LIO(oct->props[i].netdev);
- for (j = 0; j < lio->linfo.num_rxpciq; j++)
+ for (j = 0; j < oct->num_oqs; j++)
octeon_unregister_droq_ops(oct,
lio->linfo.rxpciq[j].s.q_no);
}
@@ -1825,6 +1560,13 @@ static int octeon_chip_specific_setup(struct octeon_device *oct)
case OCTEON_CN23XX_PCIID_PF:
oct->chip_id = OCTEON_CN23XX_PF_VID;
ret = setup_cn23xx_octeon_pf_device(oct);
+ if (ret)
+ break;
+#ifdef CONFIG_PCI_IOV
+ if (!ret)
+ pci_sriov_set_totalvfs(oct->pci_dev,
+ oct->sriov_info.max_vfs);
+#endif
s = "CN23XX";
break;
@@ -1889,7 +1631,7 @@ static inline int check_txq_state(struct lio *lio, struct sk_buff *skb)
if (netif_is_multiqueue(lio->netdev)) {
q = skb->queue_mapping;
- iq = lio->linfo.txpciq[(q % (lio->linfo.num_txpciq))].s.q_no;
+ iq = lio->linfo.txpciq[(q % lio->oct_dev->num_iqs)].s.q_no;
} else {
iq = lio->txq;
q = iq;
@@ -2192,11 +1934,6 @@ static int load_firmware(struct octeon_device *oct)
char fw_name[LIO_MAX_FW_FILENAME_LEN];
char *tmp_fw_type;
- if (fw_type_is_none()) {
- dev_info(&oct->pci_dev->dev, "Skipping firmware load\n");
- return ret;
- }
-
if (fw_type[0] == '\0')
tmp_fw_type = LIO_FW_NAME_TYPE_NIC;
else
@@ -2222,43 +1959,6 @@ static int load_firmware(struct octeon_device *oct)
}
/**
- * \brief Setup output queue
- * @param oct octeon device
- * @param q_no which queue
- * @param num_descs how many descriptors
- * @param desc_size size of each descriptor
- * @param app_ctx application context
- */
-static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs,
- int desc_size, void *app_ctx)
-{
- int ret_val = 0;
-
- dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no);
- /* droq creation and local register settings. */
- ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx);
- if (ret_val < 0)
- return ret_val;
-
- if (ret_val == 1) {
- dev_dbg(&oct->pci_dev->dev, "Using default droq %d\n", q_no);
- return 0;
- }
- /* tasklet creation for the droq */
-
- /* Enable the droq queues */
- octeon_set_droq_pkt_op(oct, q_no, 1);
-
- /* Send Credit for Octeon Output queues. Credits are always
- * sent after the output queue is enabled.
- */
- writel(oct->droq[q_no]->max_count,
- oct->droq[q_no]->pkts_credit_reg);
-
- return ret_val;
-}
-
-/**
* \brief Callback for getting interface configuration
* @param status status of request
* @param buf pointer to resp structure
@@ -2291,352 +1991,6 @@ static void if_cfg_callback(struct octeon_device *oct,
wake_up_interruptible(&ctx->wc);
}
-/** Routine to push packets arriving on Octeon interface upto network layer.
- * @param oct_id - octeon device id.
- * @param skbuff - skbuff struct to be passed to network layer.
- * @param len - size of total data received.
- * @param rh - Control header associated with the packet
- * @param param - additional control data with the packet
- * @param arg - farg registered in droq_ops
- */
-static void
-liquidio_push_packet(u32 octeon_id __attribute__((unused)),
- void *skbuff,
- u32 len,
- union octeon_rh *rh,
- void *param,
- void *arg)
-{
- struct napi_struct *napi = param;
- struct sk_buff *skb = (struct sk_buff *)skbuff;
- struct skb_shared_hwtstamps *shhwtstamps;
- u64 ns;
- u16 vtag = 0;
- u32 r_dh_off;
- struct net_device *netdev = (struct net_device *)arg;
- struct octeon_droq *droq = container_of(param, struct octeon_droq,
- napi);
- if (netdev) {
- int packet_was_received;
- struct lio *lio = GET_LIO(netdev);
- struct octeon_device *oct = lio->oct_dev;
-
- /* Do not proceed if the interface is not in RUNNING state. */
- if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
- recv_buffer_free(skb);
- droq->stats.rx_dropped++;
- return;
- }
-
- skb->dev = netdev;
-
- skb_record_rx_queue(skb, droq->q_no);
- if (likely(len > MIN_SKB_SIZE)) {
- struct octeon_skb_page_info *pg_info;
- unsigned char *va;
-
- pg_info = ((struct octeon_skb_page_info *)(skb->cb));
- if (pg_info->page) {
- /* For Paged allocation use the frags */
- va = page_address(pg_info->page) +
- pg_info->page_offset;
- memcpy(skb->data, va, MIN_SKB_SIZE);
- skb_put(skb, MIN_SKB_SIZE);
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- pg_info->page,
- pg_info->page_offset +
- MIN_SKB_SIZE,
- len - MIN_SKB_SIZE,
- LIO_RXBUFFER_SZ);
- }
- } else {
- struct octeon_skb_page_info *pg_info =
- ((struct octeon_skb_page_info *)(skb->cb));
- skb_copy_to_linear_data(skb, page_address(pg_info->page)
- + pg_info->page_offset, len);
- skb_put(skb, len);
- put_page(pg_info->page);
- }
-
- r_dh_off = (rh->r_dh.len - 1) * BYTES_PER_DHLEN_UNIT;
-
- if (((oct->chip_id == OCTEON_CN66XX) ||
- (oct->chip_id == OCTEON_CN68XX)) &&
- ptp_enable) {
- if (rh->r_dh.has_hwtstamp) {
- /* timestamp is included from the hardware at
- * the beginning of the packet.
- */
- if (ifstate_check
- (lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) {
- /* Nanoseconds are in the first 64-bits
- * of the packet.
- */
- memcpy(&ns, (skb->data + r_dh_off),
- sizeof(ns));
- r_dh_off -= BYTES_PER_DHLEN_UNIT;
- shhwtstamps = skb_hwtstamps(skb);
- shhwtstamps->hwtstamp =
- ns_to_ktime(ns +
- lio->ptp_adjust);
- }
- }
- }
-
- if (rh->r_dh.has_hash) {
- __be32 *hash_be = (__be32 *)(skb->data + r_dh_off);
- u32 hash = be32_to_cpu(*hash_be);
-
- skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
- r_dh_off -= BYTES_PER_DHLEN_UNIT;
- }
-
- skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT);
-
- skb->protocol = eth_type_trans(skb, skb->dev);
- if ((netdev->features & NETIF_F_RXCSUM) &&
- (((rh->r_dh.encap_on) &&
- (rh->r_dh.csum_verified & CNNIC_TUN_CSUM_VERIFIED)) ||
- (!(rh->r_dh.encap_on) &&
- (rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED))))
- /* checksum has already been verified */
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- else
- skb->ip_summed = CHECKSUM_NONE;
-
- /* Setting Encapsulation field on basis of status received
- * from the firmware
- */
- if (rh->r_dh.encap_on) {
- skb->encapsulation = 1;
- skb->csum_level = 1;
- droq->stats.rx_vxlan++;
- }
-
- /* inbound VLAN tag */
- if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
- (rh->r_dh.vlan != 0)) {
- u16 vid = rh->r_dh.vlan;
- u16 priority = rh->r_dh.priority;
-
- vtag = priority << 13 | vid;
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
- }
-
- packet_was_received = napi_gro_receive(napi, skb) != GRO_DROP;
-
- if (packet_was_received) {
- droq->stats.rx_bytes_received += len;
- droq->stats.rx_pkts_received++;
- } else {
- droq->stats.rx_dropped++;
- netif_info(lio, rx_err, lio->netdev,
- "droq:%d error rx_dropped:%llu\n",
- droq->q_no, droq->stats.rx_dropped);
- }
-
- } else {
- recv_buffer_free(skb);
- }
-}
-
-/**
- * \brief wrapper for calling napi_schedule
- * @param param parameters to pass to napi_schedule
- *
- * Used when scheduling on different CPUs
- */
-static void napi_schedule_wrapper(void *param)
-{
- struct napi_struct *napi = param;
-
- napi_schedule(napi);
-}
-
-/**
- * \brief callback when receive interrupt occurs and we are in NAPI mode
- * @param arg pointer to octeon output queue
- */
-static void liquidio_napi_drv_callback(void *arg)
-{
- struct octeon_device *oct;
- struct octeon_droq *droq = arg;
- int this_cpu = smp_processor_id();
-
- oct = droq->oct_dev;
-
- if (OCTEON_CN23XX_PF(oct) || droq->cpu_id == this_cpu) {
- napi_schedule_irqoff(&droq->napi);
- } else {
- call_single_data_t *csd = &droq->csd;
-
- csd->func = napi_schedule_wrapper;
- csd->info = &droq->napi;
- csd->flags = 0;
-
- smp_call_function_single_async(droq->cpu_id, csd);
- }
-}
-
-/**
- * \brief Entry point for NAPI polling
- * @param napi NAPI structure
- * @param budget maximum number of items to process
- */
-static int liquidio_napi_poll(struct napi_struct *napi, int budget)
-{
- struct octeon_droq *droq;
- int work_done;
- int tx_done = 0, iq_no;
- struct octeon_instr_queue *iq;
- struct octeon_device *oct;
-
- droq = container_of(napi, struct octeon_droq, napi);
- oct = droq->oct_dev;
- iq_no = droq->q_no;
- /* Handle Droq descriptors */
- work_done = octeon_process_droq_poll_cmd(oct, droq->q_no,
- POLL_EVENT_PROCESS_PKTS,
- budget);
-
- /* Flush the instruction queue */
- iq = oct->instr_queue[iq_no];
- if (iq) {
- if (atomic_read(&iq->instr_pending))
- /* Process iq buffers with in the budget limits */
- tx_done = octeon_flush_iq(oct, iq, budget);
- else
- tx_done = 1;
- /* Update iq read-index rather than waiting for next interrupt.
- * Return back if tx_done is false.
- */
- update_txq_status(oct, iq_no);
- } else {
- dev_err(&oct->pci_dev->dev, "%s: iq (%d) num invalid\n",
- __func__, iq_no);
- }
-
- /* force enable interrupt if reg cnts are high to avoid wraparound */
- if ((work_done < budget && tx_done) ||
- (iq && iq->pkt_in_done >= MAX_REG_CNT) ||
- (droq->pkt_count >= MAX_REG_CNT)) {
- tx_done = 1;
- napi_complete_done(napi, work_done);
- octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no,
- POLL_EVENT_ENABLE_INTR, 0);
- return 0;
- }
-
- return (!tx_done) ? (budget) : (work_done);
-}
-
-/**
- * \brief Setup input and output queues
- * @param octeon_dev octeon device
- * @param ifidx Interface Index
- *
- * Note: Queues are with respect to the octeon device. Thus
- * an input queue is for egress packets, and output queues
- * are for ingress packets.
- */
-static inline int setup_io_queues(struct octeon_device *octeon_dev,
- int ifidx)
-{
- struct octeon_droq_ops droq_ops;
- struct net_device *netdev;
- static int cpu_id;
- static int cpu_id_modulus;
- struct octeon_droq *droq;
- struct napi_struct *napi;
- int q, q_no, retval = 0;
- struct lio *lio;
- int num_tx_descs;
-
- netdev = octeon_dev->props[ifidx].netdev;
-
- lio = GET_LIO(netdev);
-
- memset(&droq_ops, 0, sizeof(struct octeon_droq_ops));
-
- droq_ops.fptr = liquidio_push_packet;
- droq_ops.farg = (void *)netdev;
-
- droq_ops.poll_mode = 1;
- droq_ops.napi_fn = liquidio_napi_drv_callback;
- cpu_id = 0;
- cpu_id_modulus = num_present_cpus();
-
- /* set up DROQs. */
- for (q = 0; q < lio->linfo.num_rxpciq; q++) {
- q_no = lio->linfo.rxpciq[q].s.q_no;
- dev_dbg(&octeon_dev->pci_dev->dev,
- "setup_io_queues index:%d linfo.rxpciq.s.q_no:%d\n",
- q, q_no);
- retval = octeon_setup_droq(octeon_dev, q_no,
- CFG_GET_NUM_RX_DESCS_NIC_IF
- (octeon_get_conf(octeon_dev),
- lio->ifidx),
- CFG_GET_NUM_RX_BUF_SIZE_NIC_IF
- (octeon_get_conf(octeon_dev),
- lio->ifidx), NULL);
- if (retval) {
- dev_err(&octeon_dev->pci_dev->dev,
- "%s : Runtime DROQ(RxQ) creation failed.\n",
- __func__);
- return 1;
- }
-
- droq = octeon_dev->droq[q_no];
- napi = &droq->napi;
- dev_dbg(&octeon_dev->pci_dev->dev, "netif_napi_add netdev:%llx oct:%llx pf_num:%d\n",
- (u64)netdev, (u64)octeon_dev, octeon_dev->pf_num);
- netif_napi_add(netdev, napi, liquidio_napi_poll, 64);
-
- /* designate a CPU for this droq */
- droq->cpu_id = cpu_id;
- cpu_id++;
- if (cpu_id >= cpu_id_modulus)
- cpu_id = 0;
-
- octeon_register_droq_ops(octeon_dev, q_no, &droq_ops);
- }
-
- if (OCTEON_CN23XX_PF(octeon_dev)) {
- /* 23XX PF can receive control messages (via the first PF-owned
- * droq) from the firmware even if the ethX interface is down,
- * so that's why poll_mode must be off for the first droq.
- */
- octeon_dev->droq[0]->ops.poll_mode = 0;
- }
-
- /* set up IQs. */
- for (q = 0; q < lio->linfo.num_txpciq; q++) {
- num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF(octeon_get_conf
- (octeon_dev),
- lio->ifidx);
- retval = octeon_setup_iq(octeon_dev, ifidx, q,
- lio->linfo.txpciq[q], num_tx_descs,
- netdev_get_tx_queue(netdev, q));
- if (retval) {
- dev_err(&octeon_dev->pci_dev->dev,
- " %s : Runtime IQ(TxQ) creation failed.\n",
- __func__);
- return 1;
- }
-
- if (octeon_dev->ioq_vector) {
- struct octeon_ioq_vector *ioq_vector;
-
- ioq_vector = &octeon_dev->ioq_vector[q];
- netif_set_xps_queue(netdev,
- &ioq_vector->affinity_mask,
- ioq_vector->iq_index);
- }
- }
-
- return 0;
-}
-
/**
* \brief Poll routine for checking transmit queue status
* @param work work_struct data structure
@@ -2707,8 +2061,7 @@ static int liquidio_open(struct net_device *netdev)
oct->droq[0]->ops.poll_mode = 1;
}
- if ((oct->chip_id == OCTEON_CN66XX || oct->chip_id == OCTEON_CN68XX) &&
- ptp_enable)
+ if (oct->ptp_enable)
oct_ptp_open(netdev);
ifstate_set(lio, LIO_IFSTATE_RUNNING);
@@ -2746,6 +2099,17 @@ static int liquidio_stop(struct net_device *netdev)
{
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
+ struct napi_struct *napi, *n;
+
+ if (oct->props[lio->ifidx].napi_enabled) {
+ list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
+ napi_disable(napi);
+
+ oct->props[lio->ifidx].napi_enabled = 0;
+
+ if (OCTEON_CN23XX_PF(oct))
+ oct->droq[0]->ops.poll_mode = 0;
+ }
ifstate_reset(lio, LIO_IFSTATE_RUNNING);
@@ -2916,7 +2280,10 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev)
oct = lio->oct_dev;
- for (i = 0; i < lio->linfo.num_txpciq; i++) {
+ if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
+ return stats;
+
+ for (i = 0; i < oct->num_iqs; i++) {
iq_no = lio->linfo.txpciq[i].s.q_no;
iq_stats = &oct->instr_queue[iq_no]->stats;
pkts += iq_stats->tx_done;
@@ -2932,7 +2299,7 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev)
drop = 0;
bytes = 0;
- for (i = 0; i < lio->linfo.num_rxpciq; i++) {
+ for (i = 0; i < oct->num_oqs; i++) {
oq_no = lio->linfo.rxpciq[i].s.q_no;
oq_stats = &oct->droq[oq_no]->stats;
pkts += oq_stats->rx_pkts_received;
@@ -3052,8 +2419,7 @@ static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
switch (cmd) {
case SIOCSHWTSTAMP:
- if ((lio->oct_dev->chip_id == OCTEON_CN66XX ||
- lio->oct_dev->chip_id == OCTEON_CN68XX) && ptp_enable)
+ if (lio->oct_dev->ptp_enable)
return hwtstamp_ioctl(netdev, ifr);
default:
return -EOPNOTSUPP;
@@ -4188,7 +3554,9 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
*/
lio->txq = lio->linfo.txpciq[0].s.q_no;
lio->rxq = lio->linfo.rxpciq[0].s.q_no;
- if (setup_io_queues(octeon_dev, i)) {
+ if (liquidio_setup_io_queues(octeon_dev, i,
+ lio->linfo.num_txpciq,
+ lio->linfo.num_rxpciq)) {
dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n");
goto setup_nic_dev_fail;
}
@@ -4516,6 +3884,7 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
int j, ret;
int fw_loaded = 0;
char bootcmd[] = "\n";
+ char *dbg_enb = NULL;
struct octeon_device_priv *oct_priv =
(struct octeon_device_priv *)octeon_dev->priv;
atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE);
@@ -4548,18 +3917,16 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
octeon_dev->app_mode = CVM_DRV_INVALID_APP;
if (OCTEON_CN23XX_PF(octeon_dev)) {
- if (!cn23xx_fw_loaded(octeon_dev)) {
+ if (!cn23xx_fw_loaded(octeon_dev) && !fw_type_is_none()) {
fw_loaded = 0;
- if (!fw_type_is_none()) {
- /* Do a soft reset of the Octeon device. */
- if (octeon_dev->fn_list.soft_reset(octeon_dev))
- return 1;
- /* things might have changed */
- if (!cn23xx_fw_loaded(octeon_dev))
- fw_loaded = 0;
- else
- fw_loaded = 1;
- }
+ /* Do a soft reset of the Octeon device. */
+ if (octeon_dev->fn_list.soft_reset(octeon_dev))
+ return 1;
+ /* things might have changed */
+ if (!cn23xx_fw_loaded(octeon_dev))
+ fw_loaded = 0;
+ else
+ fw_loaded = 1;
} else {
fw_loaded = 1;
}
@@ -4666,7 +4033,8 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
/* Setup the interrupt handler and record the INT SUM register address
*/
- if (octeon_setup_interrupt(octeon_dev))
+ if (octeon_setup_interrupt(octeon_dev,
+ octeon_dev->sriov_info.num_pf_rings))
return 1;
/* Enable Octeon device interrupts */
@@ -4674,6 +4042,18 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
atomic_set(&octeon_dev->status, OCT_DEV_INTR_SET_DONE);
+ /* Send Credit for Octeon Output queues. Credits are always sent BEFORE
+ * the output queue is enabled.
+ * This ensures that we'll receive the f/w CORE DRV_ACTIVE message in
+ * case we've configured CN23XX_SLI_GBL_CONTROL[NOPTR_D] = 0.
+ * Otherwise, it is possible that the DRV_ACTIVE message will be sent
+ * before any credits have been issued, causing the ring to be reset
+ * (and the f/w appear to never have started).
+ */
+ for (j = 0; j < octeon_dev->num_oqs; j++)
+ writel(octeon_dev->droq[j]->max_count,
+ octeon_dev->droq[j]->pkts_credit_reg);
+
/* Enable the input and output queues for this Octeon device */
ret = octeon_dev->fn_list.enable_io_queues(octeon_dev);
if (ret) {
@@ -4722,10 +4102,19 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
dev_err(&octeon_dev->pci_dev->dev, "Could not access board consoles\n");
return 1;
}
- ret = octeon_add_console(octeon_dev, 0);
+ /* If console debug enabled, specify empty string to use default
+ * enablement ELSE specify NULL string for 'disabled'.
+ */
+ dbg_enb = octeon_console_debug_enabled(0) ? "" : NULL;
+ ret = octeon_add_console(octeon_dev, 0, dbg_enb);
if (ret) {
dev_err(&octeon_dev->pci_dev->dev, "Could not access board console\n");
return 1;
+ } else if (octeon_console_debug_enabled(0)) {
+ /* If console was added AND we're logging console output
+ * then set our console print function.
+ */
+ octeon_dev->console[0].print = octeon_dbg_console_print;
}
atomic_set(&octeon_dev->status, OCT_DEV_CONSOLE_INIT_DONE);
@@ -4736,12 +4125,6 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n");
return 1;
}
- /* set bit 1 of SLI_SCRATCH_1 to indicate that firmware is
- * loaded
- */
- if (OCTEON_CN23XX_PF(octeon_dev))
- octeon_write_csr64(octeon_dev, CN23XX_SLI_SCRATCH1,
- 2ULL);
}
handshake[octeon_dev->octeon_id].init_ok = 1;
@@ -4749,14 +4132,33 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
atomic_set(&octeon_dev->status, OCT_DEV_HOST_OK);
- /* Send Credit for Octeon Output queues. Credits are always sent after
- * the output queue is enabled.
- */
- for (j = 0; j < octeon_dev->num_oqs; j++)
- writel(octeon_dev->droq[j]->max_count,
- octeon_dev->droq[j]->pkts_credit_reg);
+ return 0;
+}
+
+/**
+ * \brief Debug console print function
+ * @param octeon_dev octeon device
+ * @param console_num console number
+ * @param prefix first portion of line to display
+ * @param suffix second portion of line to display
+ *
+ * The OCTEON debug console outputs entire lines (excluding '\n').
+ * Normally, the line will be passed in the 'prefix' parameter.
+ * However, due to buffering, it is possible for a line to be split into two
+ * parts, in which case they will be passed as the 'prefix' parameter and
+ * 'suffix' parameter.
+ */
+static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num,
+ char *prefix, char *suffix)
+{
+ if (prefix && suffix)
+ dev_info(&oct->pci_dev->dev, "%u: %s%s\n", console_num, prefix,
+ suffix);
+ else if (prefix)
+ dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, prefix);
+ else if (suffix)
+ dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, suffix);
- /* Packets can start arriving on the output queues from this point. */
return 0;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index 9b247102eb92..2e993ce43b66 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -107,12 +107,6 @@ struct octnic_gather {
dma_addr_t sg_dma_ptr;
};
-struct octeon_device_priv {
- /* Tasklet structures for this device. */
- struct tasklet_struct droq_tasklet;
- unsigned long napi_mask;
-};
-
static int
liquidio_vf_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
static void liquidio_vf_remove(struct pci_dev *pdev);
@@ -123,7 +117,7 @@ static int lio_wait_for_oq_pkts(struct octeon_device *oct)
{
struct octeon_device_priv *oct_priv =
(struct octeon_device_priv *)oct->priv;
- int retry = MAX_VF_IP_OP_PENDING_PKT_COUNT;
+ int retry = MAX_IO_PENDING_PKT_COUNT;
int pkt_cnt = 0, pending_pkts;
int i;
@@ -148,32 +142,6 @@ static int lio_wait_for_oq_pkts(struct octeon_device *oct)
}
/**
- * \brief wait for all pending requests to complete
- * @param oct Pointer to Octeon device
- *
- * Called during shutdown sequence
- */
-static int wait_for_pending_requests(struct octeon_device *oct)
-{
- int i, pcount = 0;
-
- for (i = 0; i < MAX_VF_IP_OP_PENDING_PKT_COUNT; i++) {
- pcount = atomic_read(
- &oct->response_list[OCTEON_ORDERED_SC_LIST]
- .pending_req_count);
- if (pcount)
- schedule_timeout_uninterruptible(HZ / 10);
- else
- break;
- }
-
- if (pcount)
- return 1;
-
- return 0;
-}
-
-/**
* \brief Cause device to go quiet so it can be safely removed/reset/etc
* @param oct Pointer to Octeon device
*/
@@ -374,7 +342,7 @@ static void txqs_wake(struct net_device *netdev)
int i;
for (i = 0; i < netdev->num_tx_queues; i++) {
- int qno = lio->linfo.txpciq[i % (lio->linfo.num_txpciq)]
+ int qno = lio->linfo.txpciq[i % lio->oct_dev->num_iqs]
.s.q_no;
if (__netif_subqueue_stopped(netdev, i)) {
INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, qno,
@@ -574,7 +542,8 @@ static void print_link_info(struct net_device *netdev)
{
struct lio *lio = GET_LIO(netdev);
- if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) {
+ if (!ifstate_check(lio, LIO_IFSTATE_RESETTING) &&
+ ifstate_check(lio, LIO_IFSTATE_REGISTERED)) {
struct oct_link_info *linfo = &lio->linfo;
if (linfo->link.s.link_up) {
@@ -661,6 +630,12 @@ static void update_link_status(struct net_device *netdev,
txqs_stop(netdev);
}
+ if (lio->linfo.link.s.mtu != netdev->max_mtu) {
+ dev_info(&oct->pci_dev->dev, "Max MTU Changed from %d to %d\n",
+ netdev->max_mtu, lio->linfo.link.s.mtu);
+ netdev->max_mtu = lio->linfo.link.s.mtu;
+ }
+
if (lio->linfo.link.s.mtu < netdev->mtu) {
dev_warn(&oct->pci_dev->dev,
"PF has changed the MTU for gmx port. Reducing the mtu from %d to %d\n",
@@ -673,167 +648,6 @@ static void update_link_status(struct net_device *netdev,
}
}
-static void update_txq_status(struct octeon_device *oct, int iq_num)
-{
- struct octeon_instr_queue *iq = oct->instr_queue[iq_num];
- struct net_device *netdev;
- struct lio *lio;
-
- netdev = oct->props[iq->ifidx].netdev;
- lio = GET_LIO(netdev);
- if (netif_is_multiqueue(netdev)) {
- if (__netif_subqueue_stopped(netdev, iq->q_index) &&
- lio->linfo.link.s.link_up &&
- (!octnet_iq_is_full(oct, iq_num))) {
- netif_wake_subqueue(netdev, iq->q_index);
- INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num,
- tx_restart, 1);
- }
- } else if (netif_queue_stopped(netdev) && lio->linfo.link.s.link_up &&
- (!octnet_iq_is_full(oct, lio->txq))) {
- INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev,
- lio->txq, tx_restart, 1);
- netif_wake_queue(netdev);
- }
-}
-
-static
-int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret)
-{
- struct octeon_device *oct = droq->oct_dev;
- struct octeon_device_priv *oct_priv =
- (struct octeon_device_priv *)oct->priv;
-
- if (droq->ops.poll_mode) {
- droq->ops.napi_fn(droq);
- } else {
- if (ret & MSIX_PO_INT) {
- dev_err(&oct->pci_dev->dev,
- "should not come here should not get rx when poll mode = 0 for vf\n");
- tasklet_schedule(&oct_priv->droq_tasklet);
- return 1;
- }
- /* this will be flushed periodically by check iq db */
- if (ret & MSIX_PI_INT)
- return 0;
- }
- return 0;
-}
-
-static irqreturn_t
-liquidio_msix_intr_handler(int irq __attribute__((unused)), void *dev)
-{
- struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev;
- struct octeon_device *oct = ioq_vector->oct_dev;
- struct octeon_droq *droq = oct->droq[ioq_vector->droq_index];
- u64 ret;
-
- ret = oct->fn_list.msix_interrupt_handler(ioq_vector);
-
- if ((ret & MSIX_PO_INT) || (ret & MSIX_PI_INT))
- liquidio_schedule_msix_droq_pkt_handler(droq, ret);
-
- return IRQ_HANDLED;
-}
-
-/**
- * \brief Setup interrupt for octeon device
- * @param oct octeon device
- *
- * Enable interrupt in Octeon device as given in the PCI interrupt mask.
- */
-static int octeon_setup_interrupt(struct octeon_device *oct)
-{
- struct msix_entry *msix_entries;
- char *queue_irq_names = NULL;
- int num_alloc_ioq_vectors;
- int num_ioq_vectors;
- int irqret;
- int i;
-
- if (oct->msix_on) {
- oct->num_msix_irqs = oct->sriov_info.rings_per_vf;
-
- /* allocate storage for the names assigned to each irq */
- oct->irq_name_storage =
- kcalloc(MAX_IOQ_INTERRUPTS_PER_VF, INTRNAMSIZ,
- GFP_KERNEL);
- if (!oct->irq_name_storage) {
- dev_err(&oct->pci_dev->dev, "Irq name storage alloc failed...\n");
- return -ENOMEM;
- }
-
- queue_irq_names = oct->irq_name_storage;
-
- oct->msix_entries = kcalloc(
- oct->num_msix_irqs, sizeof(struct msix_entry), GFP_KERNEL);
- if (!oct->msix_entries) {
- dev_err(&oct->pci_dev->dev, "Memory Alloc failed...\n");
- kfree(oct->irq_name_storage);
- oct->irq_name_storage = NULL;
- return -ENOMEM;
- }
-
- msix_entries = (struct msix_entry *)oct->msix_entries;
-
- for (i = 0; i < oct->num_msix_irqs; i++)
- msix_entries[i].entry = i;
- num_alloc_ioq_vectors = pci_enable_msix_range(
- oct->pci_dev, msix_entries,
- oct->num_msix_irqs,
- oct->num_msix_irqs);
- if (num_alloc_ioq_vectors < 0) {
- dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n");
- kfree(oct->msix_entries);
- oct->msix_entries = NULL;
- kfree(oct->irq_name_storage);
- oct->irq_name_storage = NULL;
- return num_alloc_ioq_vectors;
- }
- dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n");
-
- num_ioq_vectors = oct->num_msix_irqs;
-
- for (i = 0; i < num_ioq_vectors; i++) {
- snprintf(&queue_irq_names[IRQ_NAME_OFF(i)], INTRNAMSIZ,
- "LiquidIO%u-vf%u-rxtx-%u",
- oct->octeon_id, oct->vf_num, i);
-
- irqret = request_irq(msix_entries[i].vector,
- liquidio_msix_intr_handler, 0,
- &queue_irq_names[IRQ_NAME_OFF(i)],
- &oct->ioq_vector[i]);
- if (irqret) {
- dev_err(&oct->pci_dev->dev,
- "OCTEON: Request_irq failed for MSIX interrupt Error: %d\n",
- irqret);
-
- while (i) {
- i--;
- irq_set_affinity_hint(
- msix_entries[i].vector, NULL);
- free_irq(msix_entries[i].vector,
- &oct->ioq_vector[i]);
- }
- pci_disable_msix(oct->pci_dev);
- kfree(oct->msix_entries);
- oct->msix_entries = NULL;
- kfree(oct->irq_name_storage);
- oct->irq_name_storage = NULL;
- return irqret;
- }
- oct->ioq_vector[i].vector = msix_entries[i].vector;
- /* assign the cpu mask for this msix interrupt vector */
- irq_set_affinity_hint(
- msix_entries[i].vector,
- (&oct->ioq_vector[i].affinity_mask));
- }
- dev_dbg(&oct->pci_dev->dev,
- "OCTEON[%d]: MSI-X enabled\n", oct->octeon_id);
- }
- return 0;
-}
-
/**
* \brief PCI probe handler
* @param pdev PCI device structure
@@ -942,10 +756,14 @@ static void octeon_destroy_resources(struct octeon_device *oct)
if (oct->msix_on) {
msix_entries = (struct msix_entry *)oct->msix_entries;
for (i = 0; i < oct->num_msix_irqs; i++) {
- irq_set_affinity_hint(msix_entries[i].vector,
- NULL);
- free_irq(msix_entries[i].vector,
- &oct->ioq_vector[i]);
+ if (oct->ioq_vector[i].vector) {
+ irq_set_affinity_hint(
+ msix_entries[i].vector,
+ NULL);
+ free_irq(msix_entries[i].vector,
+ &oct->ioq_vector[i]);
+ oct->ioq_vector[i].vector = 0;
+ }
}
pci_disable_msix(oct->pci_dev);
kfree(oct->msix_entries);
@@ -1137,6 +955,10 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
oct->droq[0]->ops.poll_mode = 0;
}
+ /* Delete NAPI */
+ list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
+ netif_napi_del(napi);
+
if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
unregister_netdev(netdev);
@@ -1174,7 +996,7 @@ static int liquidio_stop_nic_module(struct octeon_device *oct)
for (i = 0; i < oct->ifcount; i++) {
lio = GET_LIO(oct->props[i].netdev);
- for (j = 0; j < lio->linfo.num_rxpciq; j++)
+ for (j = 0; j < oct->num_oqs; j++)
octeon_unregister_droq_ops(oct,
lio->linfo.rxpciq[j].s.q_no);
}
@@ -1262,7 +1084,7 @@ static int check_txq_state(struct lio *lio, struct sk_buff *skb)
if (netif_is_multiqueue(lio->netdev)) {
q = skb->queue_mapping;
- iq = lio->linfo.txpciq[(q % (lio->linfo.num_txpciq))].s.q_no;
+ iq = lio->linfo.txpciq[q % lio->oct_dev->num_iqs].s.q_no;
} else {
iq = lio->txq;
q = iq;
@@ -1391,41 +1213,6 @@ static void free_netsgbuf_with_resp(void *buf)
}
/**
- * \brief Setup output queue
- * @param oct octeon device
- * @param q_no which queue
- * @param num_descs how many descriptors
- * @param desc_size size of each descriptor
- * @param app_ctx application context
- */
-static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs,
- int desc_size, void *app_ctx)
-{
- int ret_val;
-
- dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no);
- /* droq creation and local register settings. */
- ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx);
- if (ret_val < 0)
- return ret_val;
-
- if (ret_val == 1) {
- dev_dbg(&oct->pci_dev->dev, "Using default droq %d\n", q_no);
- return 0;
- }
-
- /* Enable the droq queues */
- octeon_set_droq_pkt_op(oct, q_no, 1);
-
- /* Send Credit for Octeon Output queues. Credits are always
- * sent after the output queue is enabled.
- */
- writel(oct->droq[q_no]->max_count, oct->droq[q_no]->pkts_credit_reg);
-
- return ret_val;
-}
-
-/**
* \brief Callback for getting interface configuration
* @param status status of request
* @param buf pointer to resp structure
@@ -1457,290 +1244,6 @@ static void if_cfg_callback(struct octeon_device *oct,
wake_up_interruptible(&ctx->wc);
}
-/** Routine to push packets arriving on Octeon interface upto network layer.
- * @param oct_id - octeon device id.
- * @param skbuff - skbuff struct to be passed to network layer.
- * @param len - size of total data received.
- * @param rh - Control header associated with the packet
- * @param param - additional control data with the packet
- * @param arg - farg registered in droq_ops
- */
-static void
-liquidio_push_packet(u32 octeon_id __attribute__((unused)),
- void *skbuff,
- u32 len,
- union octeon_rh *rh,
- void *param,
- void *arg)
-{
- struct napi_struct *napi = param;
- struct octeon_droq *droq =
- container_of(param, struct octeon_droq, napi);
- struct net_device *netdev = (struct net_device *)arg;
- struct sk_buff *skb = (struct sk_buff *)skbuff;
- u16 vtag = 0;
- u32 r_dh_off;
-
- if (netdev) {
- struct lio *lio = GET_LIO(netdev);
- int packet_was_received;
-
- /* Do not proceed if the interface is not in RUNNING state. */
- if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
- recv_buffer_free(skb);
- droq->stats.rx_dropped++;
- return;
- }
-
- skb->dev = netdev;
-
- skb_record_rx_queue(skb, droq->q_no);
- if (likely(len > MIN_SKB_SIZE)) {
- struct octeon_skb_page_info *pg_info;
- unsigned char *va;
-
- pg_info = ((struct octeon_skb_page_info *)(skb->cb));
- if (pg_info->page) {
- /* For Paged allocation use the frags */
- va = page_address(pg_info->page) +
- pg_info->page_offset;
- memcpy(skb->data, va, MIN_SKB_SIZE);
- skb_put(skb, MIN_SKB_SIZE);
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- pg_info->page,
- pg_info->page_offset +
- MIN_SKB_SIZE,
- len - MIN_SKB_SIZE,
- LIO_RXBUFFER_SZ);
- }
- } else {
- struct octeon_skb_page_info *pg_info =
- ((struct octeon_skb_page_info *)(skb->cb));
- skb_copy_to_linear_data(skb,
- page_address(pg_info->page) +
- pg_info->page_offset, len);
- skb_put(skb, len);
- put_page(pg_info->page);
- }
-
- r_dh_off = (rh->r_dh.len - 1) * BYTES_PER_DHLEN_UNIT;
-
- if (rh->r_dh.has_hwtstamp)
- r_dh_off -= BYTES_PER_DHLEN_UNIT;
-
- if (rh->r_dh.has_hash) {
- __be32 *hash_be = (__be32 *)(skb->data + r_dh_off);
- u32 hash = be32_to_cpu(*hash_be);
-
- skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
- r_dh_off -= BYTES_PER_DHLEN_UNIT;
- }
-
- skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT);
- skb->protocol = eth_type_trans(skb, skb->dev);
-
- if ((netdev->features & NETIF_F_RXCSUM) &&
- (((rh->r_dh.encap_on) &&
- (rh->r_dh.csum_verified & CNNIC_TUN_CSUM_VERIFIED)) ||
- (!(rh->r_dh.encap_on) &&
- (rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED))))
- /* checksum has already been verified */
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- else
- skb->ip_summed = CHECKSUM_NONE;
-
- /* Setting Encapsulation field on basis of status received
- * from the firmware
- */
- if (rh->r_dh.encap_on) {
- skb->encapsulation = 1;
- skb->csum_level = 1;
- droq->stats.rx_vxlan++;
- }
-
- /* inbound VLAN tag */
- if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
- rh->r_dh.vlan) {
- u16 priority = rh->r_dh.priority;
- u16 vid = rh->r_dh.vlan;
-
- vtag = (priority << VLAN_PRIO_SHIFT) | vid;
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
- }
-
- packet_was_received = (napi_gro_receive(napi, skb) != GRO_DROP);
-
- if (packet_was_received) {
- droq->stats.rx_bytes_received += len;
- droq->stats.rx_pkts_received++;
- } else {
- droq->stats.rx_dropped++;
- netif_info(lio, rx_err, lio->netdev,
- "droq:%d error rx_dropped:%llu\n",
- droq->q_no, droq->stats.rx_dropped);
- }
-
- } else {
- recv_buffer_free(skb);
- }
-}
-
-/**
- * \brief callback when receive interrupt occurs and we are in NAPI mode
- * @param arg pointer to octeon output queue
- */
-static void liquidio_vf_napi_drv_callback(void *arg)
-{
- struct octeon_droq *droq = arg;
-
- napi_schedule_irqoff(&droq->napi);
-}
-
-/**
- * \brief Entry point for NAPI polling
- * @param napi NAPI structure
- * @param budget maximum number of items to process
- */
-static int liquidio_napi_poll(struct napi_struct *napi, int budget)
-{
- struct octeon_instr_queue *iq;
- struct octeon_device *oct;
- struct octeon_droq *droq;
- int tx_done = 0, iq_no;
- int work_done;
-
- droq = container_of(napi, struct octeon_droq, napi);
- oct = droq->oct_dev;
- iq_no = droq->q_no;
-
- /* Handle Droq descriptors */
- work_done = octeon_process_droq_poll_cmd(oct, droq->q_no,
- POLL_EVENT_PROCESS_PKTS,
- budget);
-
- /* Flush the instruction queue */
- iq = oct->instr_queue[iq_no];
- if (iq) {
- if (atomic_read(&iq->instr_pending))
- /* Process iq buffers with in the budget limits */
- tx_done = octeon_flush_iq(oct, iq, budget);
- else
- tx_done = 1;
-
- /* Update iq read-index rather than waiting for next interrupt.
- * Return back if tx_done is false.
- */
- update_txq_status(oct, iq_no);
- } else {
- dev_err(&oct->pci_dev->dev, "%s: iq (%d) num invalid\n",
- __func__, iq_no);
- }
-
- /* force enable interrupt if reg cnts are high to avoid wraparound */
- if ((work_done < budget && tx_done) ||
- (iq && iq->pkt_in_done >= MAX_REG_CNT) ||
- (droq->pkt_count >= MAX_REG_CNT)) {
- tx_done = 1;
- napi_complete_done(napi, work_done);
- octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no,
- POLL_EVENT_ENABLE_INTR, 0);
- return 0;
- }
-
- return (!tx_done) ? (budget) : (work_done);
-}
-
-/**
- * \brief Setup input and output queues
- * @param octeon_dev octeon device
- * @param ifidx Interface index
- *
- * Note: Queues are with respect to the octeon device. Thus
- * an input queue is for egress packets, and output queues
- * are for ingress packets.
- */
-static int setup_io_queues(struct octeon_device *octeon_dev, int ifidx)
-{
- struct octeon_droq_ops droq_ops;
- struct net_device *netdev;
- static int cpu_id_modulus;
- struct octeon_droq *droq;
- struct napi_struct *napi;
- static int cpu_id;
- int num_tx_descs;
- struct lio *lio;
- int retval = 0;
- int q, q_no;
-
- netdev = octeon_dev->props[ifidx].netdev;
-
- lio = GET_LIO(netdev);
-
- memset(&droq_ops, 0, sizeof(struct octeon_droq_ops));
-
- droq_ops.fptr = liquidio_push_packet;
- droq_ops.farg = netdev;
-
- droq_ops.poll_mode = 1;
- droq_ops.napi_fn = liquidio_vf_napi_drv_callback;
- cpu_id = 0;
- cpu_id_modulus = num_present_cpus();
-
- /* set up DROQs. */
- for (q = 0; q < lio->linfo.num_rxpciq; q++) {
- q_no = lio->linfo.rxpciq[q].s.q_no;
-
- retval = octeon_setup_droq(
- octeon_dev, q_no,
- CFG_GET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(octeon_dev),
- lio->ifidx),
- CFG_GET_NUM_RX_BUF_SIZE_NIC_IF(octeon_get_conf(octeon_dev),
- lio->ifidx),
- NULL);
- if (retval) {
- dev_err(&octeon_dev->pci_dev->dev,
- "%s : Runtime DROQ(RxQ) creation failed.\n",
- __func__);
- return 1;
- }
-
- droq = octeon_dev->droq[q_no];
- napi = &droq->napi;
- netif_napi_add(netdev, napi, liquidio_napi_poll, 64);
-
- /* designate a CPU for this droq */
- droq->cpu_id = cpu_id;
- cpu_id++;
- if (cpu_id >= cpu_id_modulus)
- cpu_id = 0;
-
- octeon_register_droq_ops(octeon_dev, q_no, &droq_ops);
- }
-
- /* 23XX VF can send/recv control messages (via the first VF-owned
- * droq) from the firmware even if the ethX interface is down,
- * so that's why poll_mode must be off for the first droq.
- */
- octeon_dev->droq[0]->ops.poll_mode = 0;
-
- /* set up IQs. */
- for (q = 0; q < lio->linfo.num_txpciq; q++) {
- num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF(
- octeon_get_conf(octeon_dev), lio->ifidx);
- retval = octeon_setup_iq(octeon_dev, ifidx, q,
- lio->linfo.txpciq[q], num_tx_descs,
- netdev_get_tx_queue(netdev, q));
- if (retval) {
- dev_err(&octeon_dev->pci_dev->dev,
- " %s : Runtime IQ(TxQ) creation failed.\n",
- __func__);
- return 1;
- }
- }
-
- return 0;
-}
-
/**
* \brief Net device open for LiquidIO
* @param netdev network device
@@ -1784,6 +1287,16 @@ static int liquidio_stop(struct net_device *netdev)
{
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
+ struct napi_struct *napi, *n;
+
+ if (oct->props[lio->ifidx].napi_enabled) {
+ list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
+ napi_disable(napi);
+
+ oct->props[lio->ifidx].napi_enabled = 0;
+
+ oct->droq[0]->ops.poll_mode = 0;
+ }
netif_info(lio, ifdown, lio->netdev, "Stopping interface!\n");
/* Inform that netif carrier is down */
@@ -1988,7 +1501,10 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev)
oct = lio->oct_dev;
- for (i = 0; i < lio->linfo.num_txpciq; i++) {
+ if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
+ return stats;
+
+ for (i = 0; i < oct->num_iqs; i++) {
iq_no = lio->linfo.txpciq[i].s.q_no;
iq_stats = &oct->instr_queue[iq_no]->stats;
pkts += iq_stats->tx_done;
@@ -2004,7 +1520,7 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev)
drop = 0;
bytes = 0;
- for (i = 0; i < lio->linfo.num_rxpciq; i++) {
+ for (i = 0; i < oct->num_oqs; i++) {
oq_no = lio->linfo.rxpciq[i].s.q_no;
oq_stats = &oct->droq[oq_no]->stats;
pkts += oq_stats->rx_pkts_received;
@@ -2028,17 +1544,31 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev)
*/
static int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
{
- struct lio *lio = GET_LIO(netdev);
- struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ struct octeon_device *oct;
+ struct lio *lio;
+ int ret = 0;
- lio->mtu = new_mtu;
+ lio = GET_LIO(netdev);
+ oct = lio->oct_dev;
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
- netif_info(lio, probe, lio->netdev, "MTU Changed from %d to %d\n",
- netdev->mtu, new_mtu);
- dev_info(&oct->pci_dev->dev, "%s MTU Changed from %d to %d\n",
- netdev->name, netdev->mtu, new_mtu);
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MTU;
+ nctrl.ncmd.s.param1 = new_mtu;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
+ nctrl.wait_time = LIO_CMD_WAIT_TM;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
- netdev->mtu = new_mtu;
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
+ if (ret < 0) {
+ dev_err(&oct->pci_dev->dev, "Failed to set MTU\n");
+ return -EIO;
+ }
+
+ lio->mtu = new_mtu;
return 0;
}
@@ -2959,7 +2489,9 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
/* Copy MAC Address to OS network device structure */
ether_addr_copy(netdev->dev_addr, mac);
- if (setup_io_queues(octeon_dev, i)) {
+ if (liquidio_setup_io_queues(octeon_dev, i,
+ lio->linfo.num_txpciq,
+ lio->linfo.num_rxpciq)) {
dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n");
goto setup_nic_dev_fail;
}
@@ -3182,7 +2714,7 @@ static int octeon_device_init(struct octeon_device *oct)
LIQUIDIO_VERSION, oct->sriov_info.rings_per_vf);
/* Setup the interrupt handler and record the INT SUM register address*/
- if (octeon_setup_interrupt(oct))
+ if (octeon_setup_interrupt(oct, oct->sriov_info.rings_per_vf))
return 1;
atomic_set(&oct->status, OCT_DEV_INTR_SET_DONE);
diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
index 231dd7fbfb80..3788c8cd082a 100644
--- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
+++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
@@ -27,7 +27,7 @@
#define LIQUIDIO_PACKAGE ""
#define LIQUIDIO_BASE_MAJOR_VERSION 1
-#define LIQUIDIO_BASE_MINOR_VERSION 5
+#define LIQUIDIO_BASE_MINOR_VERSION 6
#define LIQUIDIO_BASE_MICRO_VERSION 1
#define LIQUIDIO_BASE_VERSION __stringify(LIQUIDIO_BASE_MAJOR_VERSION) "." \
__stringify(LIQUIDIO_BASE_MINOR_VERSION)
@@ -106,6 +106,7 @@ enum octeon_tag_type {
#define MAX_IOQ_INTERRUPTS_PER_PF (64 * 2)
#define MAX_IOQ_INTERRUPTS_PER_VF (8 * 2)
+#define SCR2_BIT_FW_LOADED 63
static inline u32 incr_index(u32 index, u32 count, u32 max)
{
@@ -189,7 +190,6 @@ static inline void add_sg_size(struct octeon_sg_entry *sg_entry,
#define OCTNET_CMD_Q 0
/* NIC Command types */
-#define OCTNET_CMD_RESET_PF 0x0
#define OCTNET_CMD_CHANGE_MTU 0x1
#define OCTNET_CMD_CHANGE_MACADDR 0x2
#define OCTNET_CMD_CHANGE_DEVFLAGS 0x3
@@ -226,6 +226,9 @@ static inline void add_sg_size(struct octeon_sg_entry *sg_entry,
#define OCTNET_CMD_SET_UC_LIST 0x1b
#define OCTNET_CMD_SET_VF_LINKSTATE 0x1c
+
+#define OCTNET_CMD_QUEUE_COUNT_CTL 0x1f
+
#define OCTNET_CMD_VXLAN_PORT_ADD 0x0
#define OCTNET_CMD_VXLAN_PORT_DEL 0x1
#define OCTNET_CMD_RXCSUM_ENABLE 0x0
@@ -235,6 +238,8 @@ static inline void add_sg_size(struct octeon_sg_entry *sg_entry,
#define OCTNET_CMD_VLAN_FILTER_ENABLE 0x1
#define OCTNET_CMD_VLAN_FILTER_DISABLE 0x0
+#define LIO_CMD_WAIT_TM 100
+
/* RX(packets coming from wire) Checksum verification flags */
/* TCP/UDP csum */
#define CNNIC_L4SUM_VERIFIED 0x1
@@ -768,6 +773,7 @@ struct nic_rx_stats {
/* firmware stats */
u64 fw_total_rcvd;
u64 fw_total_fwd;
+ u64 fw_total_fwd_bytes;
u64 fw_err_pko;
u64 fw_err_link;
u64 fw_err_drop;
@@ -814,6 +820,7 @@ struct nic_tx_stats {
u64 fw_tso; /* number of tso requests */
u64 fw_tso_fwd; /* number of packets segmented in tso */
u64 fw_tx_vxlan;
+ u64 fw_err_pki;
};
struct oct_link_stats {
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_config.h b/drivers/net/ethernet/cavium/liquidio/octeon_config.h
index f229d792c2b3..63bd9c94e547 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_config.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_config.h
@@ -71,13 +71,17 @@
#define CN23XX_MAX_RINGS_PER_VF 8
#define CN23XX_MAX_INPUT_QUEUES CN23XX_MAX_RINGS_PER_PF
-#define CN23XX_MAX_IQ_DESCRIPTORS 512
+#define CN23XX_MAX_IQ_DESCRIPTORS 2048
+#define CN23XX_DEFAULT_IQ_DESCRIPTORS 512
+#define CN23XX_MIN_IQ_DESCRIPTORS 128
#define CN23XX_DB_MIN 1
#define CN23XX_DB_MAX 8
#define CN23XX_DB_TIMEOUT 1
#define CN23XX_MAX_OUTPUT_QUEUES CN23XX_MAX_RINGS_PER_PF
-#define CN23XX_MAX_OQ_DESCRIPTORS 512
+#define CN23XX_MAX_OQ_DESCRIPTORS 2048
+#define CN23XX_DEFAULT_OQ_DESCRIPTORS 512
+#define CN23XX_MIN_OQ_DESCRIPTORS 128
#define CN23XX_OQ_BUF_SIZE 1664
#define CN23XX_OQ_PKTSPER_INTR 128
/*#define CAVIUM_ONLY_CN23XX_RX_PERF*/
@@ -163,6 +167,11 @@
((cfg)->misc.oct_link_query_interval)
#define CFG_GET_IS_SLI_BP_ON(cfg) ((cfg)->misc.enable_sli_oq_bp)
+#define CFG_SET_NUM_RX_DESCS_NIC_IF(cfg, idx, value) \
+ ((cfg)->nic_if_cfg[idx].num_rx_descs = value)
+#define CFG_SET_NUM_TX_DESCS_NIC_IF(cfg, idx, value) \
+ ((cfg)->nic_if_cfg[idx].num_tx_descs = value)
+
/* Max IOQs per OCTEON Link */
#define MAX_IOQS_PER_NICIF 64
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_console.c b/drivers/net/ethernet/cavium/liquidio/octeon_console.c
index e08f7600f986..ec3dd69cd6b2 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_console.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_console.c
@@ -37,13 +37,6 @@ static u64 cvmx_bootmem_phy_named_block_find(struct octeon_device *oct,
u32 flags);
static int octeon_console_read(struct octeon_device *oct, u32 console_num,
char *buffer, u32 buf_size);
-static u32 console_bitmask;
-module_param(console_bitmask, int, 0644);
-MODULE_PARM_DESC(console_bitmask,
- "Bitmask indicating which consoles have debug output redirected to syslog.");
-
-#define MIN(a, b) min((a), (b))
-#define CAST_ULL(v) ((u64)(v))
#define BOOTLOADER_PCI_READ_BUFFER_DATA_ADDR 0x0006c008
#define BOOTLOADER_PCI_READ_BUFFER_LEN_ADDR 0x0006c004
@@ -139,16 +132,6 @@ struct octeon_pci_console_desc {
};
/**
- * \brief determines if a given console has debug enabled.
- * @param console console to check
- * @returns 1 = enabled. 0 otherwise
- */
-static int octeon_console_debug_enabled(u32 console)
-{
- return (console_bitmask >> (console)) & 0x1;
-}
-
-/**
* This function is the implementation of the get macros defined
* for individual structure members. The argument are generated
* by the macros inorder to read only the needed memory.
@@ -234,7 +217,7 @@ static int __cvmx_bootmem_check_version(struct octeon_device *oct,
(exact_match && major_version != exact_match)) {
dev_err(&oct->pci_dev->dev, "bootmem ver mismatch %d.%d addr:0x%llx\n",
major_version, minor_version,
- CAST_ULL(oct->bootmem_desc_addr));
+ (long long)oct->bootmem_desc_addr);
return -1;
} else {
return 0;
@@ -454,20 +437,31 @@ static void output_console_line(struct octeon_device *oct,
{
char *line;
s32 i;
+ size_t len;
line = console_buffer;
for (i = 0; i < bytes_read; i++) {
/* Output a line at a time, prefixed */
if (console_buffer[i] == '\n') {
console_buffer[i] = '\0';
- if (console->leftover[0]) {
- dev_info(&oct->pci_dev->dev, "%lu: %s%s\n",
- console_num, console->leftover,
- line);
+ /* We need to output 'line', prefaced by 'leftover'.
+ * However, it is possible we're being called to
+ * output 'leftover' by itself (in the case of nothing
+ * having been read from the console).
+ *
+ * To avoid duplication, check for this condition.
+ */
+ if (console->leftover[0] &&
+ (line != console->leftover)) {
+ if (console->print)
+ (*console->print)(oct, (u32)console_num,
+ console->leftover,
+ line);
console->leftover[0] = '\0';
} else {
- dev_info(&oct->pci_dev->dev, "%lu: %s\n",
- console_num, line);
+ if (console->print)
+ (*console->print)(oct, (u32)console_num,
+ line, NULL);
}
line = &console_buffer[i + 1];
}
@@ -476,13 +470,16 @@ static void output_console_line(struct octeon_device *oct,
/* Save off any leftovers */
if (line != &console_buffer[bytes_read]) {
console_buffer[bytes_read] = '\0';
- strcpy(console->leftover, line);
+ len = strlen(console->leftover);
+ strncpy(&console->leftover[len], line,
+ sizeof(console->leftover) - len);
}
}
static void check_console(struct work_struct *work)
{
s32 bytes_read, tries, total_read;
+ size_t len;
struct octeon_console *console;
struct cavium_wk *wk = (struct cavium_wk *)work;
struct octeon_device *oct = (struct octeon_device *)wk->ctxptr;
@@ -504,7 +501,7 @@ static void check_console(struct work_struct *work)
total_read += bytes_read;
if (console->waiting)
octeon_console_handle_result(oct, console_num);
- if (octeon_console_debug_enabled(console_num)) {
+ if (console->print) {
output_console_line(oct, console, console_num,
console_buffer, bytes_read);
}
@@ -519,10 +516,13 @@ static void check_console(struct work_struct *work)
/* If nothing is read after polling the console,
* output any leftovers if any
*/
- if (octeon_console_debug_enabled(console_num) &&
- (total_read == 0) && (console->leftover[0])) {
- dev_info(&oct->pci_dev->dev, "%u: %s\n",
- console_num, console->leftover);
+ if (console->print && (total_read == 0) &&
+ (console->leftover[0])) {
+ /* append '\n' as terminator for 'output_console_line' */
+ len = strlen(console->leftover);
+ console->leftover[len] = '\n';
+ output_console_line(oct, console, console_num,
+ console->leftover, (s32)(len + 1));
console->leftover[0] = '\0';
}
@@ -574,7 +574,84 @@ int octeon_init_consoles(struct octeon_device *oct)
return ret;
}
-int octeon_add_console(struct octeon_device *oct, u32 console_num)
+static void octeon_get_uboot_version(struct octeon_device *oct)
+{
+ s32 bytes_read, tries, total_read;
+ struct octeon_console *console;
+ u32 console_num = 0;
+ char *uboot_ver;
+ char *buf;
+ char *p;
+
+#define OCTEON_UBOOT_VER_BUF_SIZE 512
+ buf = kmalloc(OCTEON_UBOOT_VER_BUF_SIZE, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ if (octeon_console_send_cmd(oct, "setenv stdout pci\n", 50)) {
+ kfree(buf);
+ return;
+ }
+
+ if (octeon_console_send_cmd(oct, "version\n", 1)) {
+ kfree(buf);
+ return;
+ }
+
+ console = &oct->console[console_num];
+ tries = 0;
+ total_read = 0;
+
+ do {
+ /* Take console output regardless of whether it will
+ * be logged
+ */
+ bytes_read =
+ octeon_console_read(oct,
+ console_num, buf + total_read,
+ OCTEON_UBOOT_VER_BUF_SIZE - 1 -
+ total_read);
+ if (bytes_read > 0) {
+ buf[bytes_read] = '\0';
+
+ total_read += bytes_read;
+ if (console->waiting)
+ octeon_console_handle_result(oct, console_num);
+ } else if (bytes_read < 0) {
+ dev_err(&oct->pci_dev->dev, "Error reading console %u, ret=%d\n",
+ console_num, bytes_read);
+ }
+
+ tries++;
+ } while ((bytes_read > 0) && (tries < 16));
+
+ /* If nothing is read after polling the console,
+ * output any leftovers if any
+ */
+ if ((total_read == 0) && (console->leftover[0])) {
+ dev_dbg(&oct->pci_dev->dev, "%u: %s\n",
+ console_num, console->leftover);
+ console->leftover[0] = '\0';
+ }
+
+ buf[OCTEON_UBOOT_VER_BUF_SIZE - 1] = '\0';
+
+ uboot_ver = strstr(buf, "U-Boot");
+ if (uboot_ver) {
+ p = strstr(uboot_ver, "mips");
+ if (p) {
+ p--;
+ *p = '\0';
+ dev_info(&oct->pci_dev->dev, "%s\n", uboot_ver);
+ }
+ }
+
+ kfree(buf);
+ octeon_console_send_cmd(oct, "setenv stdout serial\n", 50);
+}
+
+int octeon_add_console(struct octeon_device *oct, u32 console_num,
+ char *dbg_enb)
{
int ret = 0;
u32 delay;
@@ -610,17 +687,19 @@ int octeon_add_console(struct octeon_device *oct, u32 console_num)
work = &oct->console_poll_work[console_num].work;
+ octeon_get_uboot_version(oct);
+
INIT_DELAYED_WORK(work, check_console);
oct->console_poll_work[console_num].ctxptr = (void *)oct;
oct->console_poll_work[console_num].ctxul = console_num;
delay = OCTEON_CONSOLE_POLL_INTERVAL_MS;
schedule_delayed_work(work, msecs_to_jiffies(delay));
- if (octeon_console_debug_enabled(console_num)) {
- ret = octeon_console_send_cmd(oct,
- "setenv pci_console_active 1",
- 2000);
- }
+ /* an empty string means use default debug console enablement */
+ if (dbg_enb && !dbg_enb[0])
+ dbg_enb = "setenv pci_console_active 1";
+ if (dbg_enb)
+ ret = octeon_console_send_cmd(oct, dbg_enb, 2000);
console->active = 1;
}
@@ -704,7 +783,7 @@ static int octeon_console_read(struct octeon_device *oct, u32 console_num,
if (bytes_to_read <= 0)
return bytes_to_read;
- bytes_to_read = MIN(bytes_to_read, (s32)buf_size);
+ bytes_to_read = min_t(s32, bytes_to_read, buf_size);
/* Check to see if what we want to read is not contiguous, and limit
* ourselves to the contiguous block
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
index 623e28ca736e..29d53b1763a7 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
@@ -418,7 +418,7 @@ static struct octeon_config default_cn23xx_conf = {
/** IQ attributes */
.iq = {
.max_iqs = CN23XX_CFG_IO_QUEUES,
- .pending_list_size = (CN23XX_MAX_IQ_DESCRIPTORS *
+ .pending_list_size = (CN23XX_DEFAULT_IQ_DESCRIPTORS *
CN23XX_CFG_IO_QUEUES),
.instr_type = OCTEON_64BYTE_INSTR,
.db_min = CN23XX_DB_MIN,
@@ -436,8 +436,8 @@ static struct octeon_config default_cn23xx_conf = {
},
.num_nic_ports = DEFAULT_NUM_NIC_PORTS_23XX,
- .num_def_rx_descs = CN23XX_MAX_OQ_DESCRIPTORS,
- .num_def_tx_descs = CN23XX_MAX_IQ_DESCRIPTORS,
+ .num_def_rx_descs = CN23XX_DEFAULT_OQ_DESCRIPTORS,
+ .num_def_tx_descs = CN23XX_DEFAULT_IQ_DESCRIPTORS,
.def_rx_buf_size = CN23XX_OQ_BUF_SIZE,
/* For ethernet interface 0: Port cfg Attributes */
@@ -455,10 +455,10 @@ static struct octeon_config default_cn23xx_conf = {
.num_rxqs = DEF_RXQS_PER_INTF,
/* Num of desc for rx rings */
- .num_rx_descs = CN23XX_MAX_OQ_DESCRIPTORS,
+ .num_rx_descs = CN23XX_DEFAULT_OQ_DESCRIPTORS,
/* Num of desc for tx rings */
- .num_tx_descs = CN23XX_MAX_IQ_DESCRIPTORS,
+ .num_tx_descs = CN23XX_DEFAULT_IQ_DESCRIPTORS,
/* SKB size, We need not change buf size even for Jumbo frames.
* Octeon can send jumbo frames in 4 consecutive descriptors,
@@ -484,10 +484,10 @@ static struct octeon_config default_cn23xx_conf = {
.num_rxqs = DEF_RXQS_PER_INTF,
/* Num of desc for rx rings */
- .num_rx_descs = CN23XX_MAX_OQ_DESCRIPTORS,
+ .num_rx_descs = CN23XX_DEFAULT_OQ_DESCRIPTORS,
/* Num of desc for tx rings */
- .num_tx_descs = CN23XX_MAX_IQ_DESCRIPTORS,
+ .num_tx_descs = CN23XX_DEFAULT_IQ_DESCRIPTORS,
/* SKB size, We need not change buf size even for Jumbo frames.
* Octeon can send jumbo frames in 4 consecutive descriptors,
@@ -528,9 +528,10 @@ static struct octeon_config_ptr {
};
static char oct_dev_state_str[OCT_DEV_STATES + 1][32] = {
- "BEGIN", "PCI-MAP-DONE", "DISPATCH-INIT-DONE",
+ "BEGIN", "PCI-ENABLE-DONE", "PCI-MAP-DONE", "DISPATCH-INIT-DONE",
"IQ-INIT-DONE", "SCBUFF-POOL-INIT-DONE", "RESPLIST-INIT-DONE",
- "DROQ-INIT-DONE", "IO-QUEUES-INIT-DONE", "CONSOLE-INIT-DONE",
+ "DROQ-INIT-DONE", "MBOX-SETUP-DONE", "MSIX-ALLOC-VECTOR-DONE",
+ "INTR-SET-DONE", "IO-QUEUES-INIT-DONE", "CONSOLE-INIT-DONE",
"HOST-READY", "CORE-READY", "RUNNING", "IN-RESET",
"INVALID"
};
@@ -876,11 +877,11 @@ int octeon_setup_instr_queues(struct octeon_device *oct)
oct->num_iqs = 0;
- oct->instr_queue[0] = vmalloc_node(sizeof(*oct->instr_queue[0]),
+ oct->instr_queue[0] = vzalloc_node(sizeof(*oct->instr_queue[0]),
numa_node);
if (!oct->instr_queue[0])
oct->instr_queue[0] =
- vmalloc(sizeof(struct octeon_instr_queue));
+ vzalloc(sizeof(struct octeon_instr_queue));
if (!oct->instr_queue[0])
return 1;
memset(oct->instr_queue[0], 0, sizeof(struct octeon_instr_queue));
@@ -923,9 +924,9 @@ int octeon_setup_output_queues(struct octeon_device *oct)
desc_size = CFG_GET_DEF_RX_BUF_SIZE(CHIP_CONF(oct, cn23xx_vf));
}
oct->num_oqs = 0;
- oct->droq[0] = vmalloc_node(sizeof(*oct->droq[0]), numa_node);
+ oct->droq[0] = vzalloc_node(sizeof(*oct->droq[0]), numa_node);
if (!oct->droq[0])
- oct->droq[0] = vmalloc(sizeof(*oct->droq[0]));
+ oct->droq[0] = vzalloc(sizeof(*oct->droq[0]));
if (!oct->droq[0])
return 1;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
index c90ed48ae8ab..894af199ddef 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
@@ -22,6 +22,8 @@
#ifndef _OCTEON_DEVICE_H_
#define _OCTEON_DEVICE_H_
+#include <linux/interrupt.h>
+
/** PCI VendorId Device Id */
#define OCTEON_CN68XX_PCIID 0x91177d
#define OCTEON_CN66XX_PCIID 0x92177d
@@ -192,6 +194,8 @@ struct octeon_reg_list {
};
#define OCTEON_CONSOLE_MAX_READ_BYTES 512
+typedef int (*octeon_console_print_fn)(struct octeon_device *oct,
+ u32 num, char *pre, char *suf);
struct octeon_console {
u32 active;
u32 waiting;
@@ -199,6 +203,7 @@ struct octeon_console {
u32 buffer_size;
u64 input_base_addr;
u64 output_base_addr;
+ octeon_console_print_fn print;
char leftover[OCTEON_CONSOLE_MAX_READ_BYTES];
};
@@ -552,6 +557,7 @@ struct octeon_device {
} loc;
atomic_t *adapter_refcount; /* reference count of adapter */
+ bool ptp_enable;
};
#define OCT_DRV_ONLINE 1
@@ -565,6 +571,8 @@ struct octeon_device {
#define CHIP_CONF(oct, TYPE) \
(((struct octeon_ ## TYPE *)((oct)->chip))->conf)
+#define MAX_IO_PENDING_PKT_COUNT 100
+
/*------------------ Function Prototypes ----------------------*/
/** Initialize device list memory */
@@ -740,11 +748,17 @@ int octeon_init_consoles(struct octeon_device *oct);
/**
* Adds access to a console to the device.
*
- * @param oct which octeon to add to
- * @param console_num which console
+ * @param oct: which octeon to add to
+ * @param console_num: which console
+ * @param dbg_enb: ptr to debug enablement string, one of:
+ * * NULL for no debug output (i.e. disabled)
+ * * empty string enables debug output (via default method)
+ * * specific string to enable debug console output
+ *
* @return Zero on success, negative on failure.
*/
-int octeon_add_console(struct octeon_device *oct, u32 console_num);
+int octeon_add_console(struct octeon_device *oct, u32 console_num,
+ char *dbg_enb);
/** write or read from a console */
int octeon_console_write(struct octeon_device *oct, u32 console_num,
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
index 2e190deb2233..9372d4ce9954 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
@@ -145,6 +145,8 @@ octeon_droq_destroy_ring_buffers(struct octeon_device *oct,
for (i = 0; i < droq->max_count; i++) {
pg_info = &droq->recv_buf_list[i].pg_info;
+ if (!pg_info)
+ continue;
if (pg_info->dma)
lio_unmap_ring(oct->pci_dev,
@@ -207,6 +209,10 @@ int octeon_delete_droq(struct octeon_device *oct, u32 q_no)
droq->desc_ring, droq->desc_ring_dma);
memset(droq, 0, OCT_DROQ_SIZE);
+ oct->io_qmask.oq &= ~(1ULL << q_no);
+ vfree(oct->droq[q_no]);
+ oct->droq[q_no] = NULL;
+ oct->num_oqs--;
return 0;
}
@@ -275,12 +281,12 @@ int octeon_init_droq(struct octeon_device *oct,
droq->max_count);
droq->recv_buf_list = (struct octeon_recv_buffer *)
- vmalloc_node(droq->max_count *
+ vzalloc_node(droq->max_count *
OCT_DROQ_RECVBUF_SIZE,
numa_node);
if (!droq->recv_buf_list)
droq->recv_buf_list = (struct octeon_recv_buffer *)
- vmalloc(droq->max_count *
+ vzalloc(droq->max_count *
OCT_DROQ_RECVBUF_SIZE);
if (!droq->recv_buf_list) {
dev_err(&oct->pci_dev->dev, "Output queue recv buf list alloc failed\n");
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_main.h b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
index 7ccffbb0019e..32ef3a7d88d8 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_main.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
@@ -35,6 +35,12 @@
#define DRV_NAME "LiquidIO"
+struct octeon_device_priv {
+ /** Tasklet structures for this device. */
+ struct tasklet_struct droq_tasklet;
+ unsigned long napi_mask;
+};
+
/** This structure is used by NIC driver to store information required
* to free the sk_buff when the packet has been fetched by Octeon.
* Bytes offset below assume worst-case of a 64-bit system.
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
index ec8504b2942d..9e36319cead6 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
@@ -33,6 +33,7 @@
#define LIO_IFSTATE_REGISTERED 0x02
#define LIO_IFSTATE_RUNNING 0x04
#define LIO_IFSTATE_RX_TIMESTAMP_ENABLED 0x08
+#define LIO_IFSTATE_RESETTING 0x10
struct oct_nic_stats_resp {
u64 rh;
@@ -166,6 +167,14 @@ void cleanup_rx_oom_poll_fn(struct net_device *netdev);
*/
void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr);
+int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx,
+ u32 num_iqs, u32 num_oqs);
+
+irqreturn_t liquidio_msix_intr_handler(int irq __attribute__((unused)),
+ void *dev);
+
+int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs);
+
/**
* \brief Register ethtool operations
* @param netdev pointer to network device
@@ -448,4 +457,30 @@ static inline void ifstate_reset(struct lio *lio, int state_flag)
atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) & ~(state_flag)));
}
+/**
+ * \brief wait for all pending requests to complete
+ * @param oct Pointer to Octeon device
+ *
+ * Called during shutdown sequence
+ */
+static inline int wait_for_pending_requests(struct octeon_device *oct)
+{
+ int i, pcount = 0;
+
+ for (i = 0; i < MAX_IO_PENDING_PKT_COUNT; i++) {
+ pcount = atomic_read(
+ &oct->response_list[OCTEON_ORDERED_SC_LIST]
+ .pending_req_count);
+ if (pcount)
+ schedule_timeout_uninterruptible(HZ / 10);
+ else
+ break;
+ }
+
+ if (pcount)
+ return 1;
+
+ return 0;
+}
+
#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
index 7b297f1f6dbe..1e0fbce86d60 100644
--- a/drivers/net/ethernet/cavium/liquidio/request_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
@@ -77,13 +77,6 @@ int octeon_init_instr_queue(struct octeon_device *oct,
return 1;
}
- if (num_descs & (num_descs - 1)) {
- dev_err(&oct->pci_dev->dev,
- "Number of descriptors for instr queue %d not in power of 2.\n",
- iq_no);
- return 1;
- }
-
q_size = (u32)conf->instr_type * num_descs;
iq = oct->instr_queue[iq_no];
@@ -190,6 +183,10 @@ int octeon_delete_instr_queue(struct octeon_device *oct, u32 iq_no)
q_size = iq->max_count * desc_size;
lio_dma_free(oct, (u32)q_size, iq->base_addr,
iq->base_addr_dma);
+ oct->io_qmask.iq &= ~(1ULL << iq_no);
+ vfree(oct->instr_queue[iq_no]);
+ oct->instr_queue[iq_no] = NULL;
+ oct->num_iqs--;
return 0;
}
return 1;