summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/chelsio/cxgb4
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/chelsio/cxgb4')
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c49
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c52
4 files changed, 42 insertions, 66 deletions
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
index 2a2938bbb93a..fc05248984fc 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -902,7 +902,7 @@ void clear_all_filters(struct adapter *adapter)
adapter->tids.tid_tab[i];
if (f && (f->valid || f->pending))
- cxgb4_del_filter(dev, i, &f->fs);
+ cxgb4_del_filter(dev, f->tid, &f->fs);
}
sb = t4_read_reg(adapter, LE_DB_SRVR_START_INDEX_A);
@@ -910,7 +910,7 @@ void clear_all_filters(struct adapter *adapter)
f = (struct filter_entry *)adapter->tids.tid_tab[i];
if (f && (f->valid || f->pending))
- cxgb4_del_filter(dev, i, &f->fs);
+ cxgb4_del_filter(dev, f->tid, &f->fs);
}
}
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 649842a8aa28..97f90edbc068 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -5381,12 +5381,11 @@ static inline bool is_x_10g_port(const struct link_config *lc)
static int cfg_queues(struct adapter *adap)
{
u32 avail_qsets, avail_eth_qsets, avail_uld_qsets;
+ u32 i, n10g = 0, qidx = 0, n1g = 0;
+ u32 ncpus = num_online_cpus();
u32 niqflint, neq, num_ulds;
struct sge *s = &adap->sge;
- u32 i, n10g = 0, qidx = 0;
-#ifndef CONFIG_CHELSIO_T4_DCB
- int q10g = 0;
-#endif
+ u32 q10g = 0, q1g;
/* Reduce memory usage in kdump environment, disable all offload. */
if (is_kdump_kernel() || (is_uld(adap) && t4_uld_mem_alloc(adap))) {
@@ -5424,44 +5423,50 @@ static int cfg_queues(struct adapter *adap)
n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
avail_eth_qsets = min_t(u32, avail_qsets, MAX_ETH_QSETS);
+
+ /* We default to 1 queue per non-10G port and up to # of cores queues
+ * per 10G port.
+ */
+ if (n10g)
+ q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g;
+
+ n1g = adap->params.nports - n10g;
#ifdef CONFIG_CHELSIO_T4_DCB
/* For Data Center Bridging support we need to be able to support up
* to 8 Traffic Priorities; each of which will be assigned to its
* own TX Queue in order to prevent Head-Of-Line Blocking.
*/
+ q1g = 8;
if (adap->params.nports * 8 > avail_eth_qsets) {
dev_err(adap->pdev_dev, "DCB avail_eth_qsets=%d < %d!\n",
avail_eth_qsets, adap->params.nports * 8);
return -ENOMEM;
}
- for_each_port(adap, i) {
- struct port_info *pi = adap2pinfo(adap, i);
+ if (adap->params.nports * ncpus < avail_eth_qsets)
+ q10g = max(8U, ncpus);
+ else
+ q10g = max(8U, q10g);
- pi->first_qset = qidx;
- pi->nqsets = is_kdump_kernel() ? 1 : 8;
- qidx += pi->nqsets;
- }
-#else /* !CONFIG_CHELSIO_T4_DCB */
- /* We default to 1 queue per non-10G port and up to # of cores queues
- * per 10G port.
- */
- if (n10g)
- q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g;
- if (q10g > netif_get_num_default_rss_queues())
- q10g = netif_get_num_default_rss_queues();
+ while ((q10g * n10g) > (avail_eth_qsets - n1g * q1g))
+ q10g--;
- if (is_kdump_kernel())
+#else /* !CONFIG_CHELSIO_T4_DCB */
+ q1g = 1;
+ q10g = min(q10g, ncpus);
+#endif /* !CONFIG_CHELSIO_T4_DCB */
+ if (is_kdump_kernel()) {
q10g = 1;
+ q1g = 1;
+ }
for_each_port(adap, i) {
struct port_info *pi = adap2pinfo(adap, i);
pi->first_qset = qidx;
- pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
+ pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : q1g;
qidx += pi->nqsets;
}
-#endif /* !CONFIG_CHELSIO_T4_DCB */
s->ethqsets = qidx;
s->max_ethqsets = qidx; /* MSI-X may lower it later */
@@ -5473,7 +5478,7 @@ static int cfg_queues(struct adapter *adap)
* capped by the number of available cores.
*/
num_ulds = adap->num_uld + adap->num_ofld_uld;
- i = min_t(u32, MAX_OFLD_QSETS, num_online_cpus());
+ i = min_t(u32, MAX_OFLD_QSETS, ncpus);
avail_uld_qsets = roundup(i, adap->params.nports);
if (avail_qsets < num_ulds * adap->params.nports) {
adap->params.offload = 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
index 58a039c3224a..af1f40cbccc8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
@@ -246,6 +246,9 @@ static int cxgb4_ptp_fineadjtime(struct adapter *adapter, s64 delta)
FW_PTP_CMD_PORTID_V(0));
c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16));
c.u.ts.sc = FW_PTP_SC_ADJ_FTIME;
+ c.u.ts.sign = (delta < 0) ? 1 : 0;
+ if (delta < 0)
+ delta = -delta;
c.u.ts.tm = cpu_to_be64(delta);
err = t4_wr_mbox(adapter, adapter->mbox, &c, sizeof(c), NULL);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 97cda501e7e8..cab3d17e0e1a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -1307,8 +1307,9 @@ static inline void *write_tso_wr(struct adapter *adap, struct sk_buff *skb,
int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *eq,
int maxreclaim)
{
+ unsigned int reclaimed, hw_cidx;
struct sge_txq *q = &eq->q;
- unsigned int reclaimed;
+ int hw_in_use;
if (!q->in_use || !__netif_tx_trylock(eq->txq))
return 0;
@@ -1316,12 +1317,17 @@ int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *eq,
/* Reclaim pending completed TX Descriptors. */
reclaimed = reclaim_completed_tx(adap, &eq->q, maxreclaim, true);
+ hw_cidx = ntohs(READ_ONCE(q->stat->cidx));
+ hw_in_use = q->pidx - hw_cidx;
+ if (hw_in_use < 0)
+ hw_in_use += q->size;
+
/* If the TX Queue is currently stopped and there's now more than half
* the queue available, restart it. Otherwise bail out since the rest
* of what we want do here is with the possibility of shipping any
* currently buffered Coalesced TX Work Request.
*/
- if (netif_tx_queue_stopped(eq->txq) && txq_avail(q) > (q->size / 2)) {
+ if (netif_tx_queue_stopped(eq->txq) && hw_in_use < (q->size / 2)) {
netif_tx_wake_queue(eq->txq);
eq->q.restarts++;
}
@@ -1486,16 +1492,7 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
* has opened up.
*/
eth_txq_stop(q);
-
- /* If we're using the SGE Doorbell Queue Timer facility, we
- * don't need to ask the Firmware to send us Egress Queue CIDX
- * Updates: the Hardware will do this automatically. And
- * since we send the Ingress Queue CIDX Updates to the
- * corresponding Ethernet Response Queue, we'll get them very
- * quickly.
- */
- if (!q->dbqt)
- wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
}
wr = (void *)&q->q.desc[q->q.pidx];
@@ -1805,16 +1802,7 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
* has opened up.
*/
eth_txq_stop(txq);
-
- /* If we're using the SGE Doorbell Queue Timer facility, we
- * don't need to ask the Firmware to send us Egress Queue CIDX
- * Updates: the Hardware will do this automatically. And
- * since we send the Ingress Queue CIDX Updates to the
- * corresponding Ethernet Response Queue, we'll get them very
- * quickly.
- */
- if (!txq->dbqt)
- wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
}
/* Start filling in our Work Request. Note that we do _not_ handle
@@ -3370,26 +3358,6 @@ static void t4_tx_completion_handler(struct sge_rspq *rspq,
}
txq = &s->ethtxq[pi->first_qset + rspq->idx];
-
- /* We've got the Hardware Consumer Index Update in the Egress Update
- * message. If we're using the SGE Doorbell Queue Timer mechanism,
- * these Egress Update messages will be our sole CIDX Updates we get
- * since we don't want to chew up PCIe bandwidth for both Ingress
- * Messages and Status Page writes. However, The code which manages
- * reclaiming successfully DMA'ed TX Work Requests uses the CIDX value
- * stored in the Status Page at the end of the TX Queue. It's easiest
- * to simply copy the CIDX Update value from the Egress Update message
- * to the Status Page. Also note that no Endian issues need to be
- * considered here since both are Big Endian and we're just copying
- * bytes consistently ...
- */
- if (txq->dbqt) {
- struct cpl_sge_egr_update *egr;
-
- egr = (struct cpl_sge_egr_update *)rsp;
- WRITE_ONCE(txq->q.stat->cidx, egr->cidx);
- }
-
t4_sge_eth_txq_egress_update(adapter, txq, -1);
}