summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/chelsio/cxgb4/sge.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/chelsio/cxgb4/sge.c')
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c37
1 files changed, 11 insertions, 26 deletions
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 49d5808b7d11..9fccb8ea9bcd 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -163,7 +163,7 @@ static inline unsigned int fl_mtu_bufsize(struct adapter *adapter,
* for DMA, but this is of course never sent to the hardware and is only used
* to prevent double unmappings. All of the above requires that the Free List
* Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are
- * 32-byte or or a power of 2 greater in alignment. Since the SGE's minimal
+ * 32-byte or a power of 2 greater in alignment. Since the SGE's minimal
* Free List Buffer alignment is 32 bytes, this works out for us ...
*/
enum {
@@ -1533,7 +1533,6 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
} else {
q = &adap->sge.ethtxq[qidx + pi->first_qset];
}
- skb_tx_timestamp(skb);
reclaim_completed_tx(adap, &q->q, -1, true);
cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
@@ -1706,6 +1705,8 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
cpl->len = htons(skb->len);
cpl->ctrl1 = cpu_to_be64(cntrl);
+ skb_tx_timestamp(skb);
+
if (immediate) {
cxgb4_inline_tx_skb(skb, &q->q, sgl);
dev_consume_skb_any(skb);
@@ -2268,7 +2269,6 @@ static int ethofld_hard_xmit(struct net_device *dev,
d = &eosw_txq->desc[eosw_txq->last_pidx];
skb = d->skb;
- skb_tx_timestamp(skb);
wr = (struct fw_eth_tx_eo_wr *)&eohw_txq->q.desc[eohw_txq->q.pidx];
if (unlikely(eosw_txq->state != CXGB4_EO_STATE_ACTIVE &&
@@ -2373,6 +2373,7 @@ write_wr_headers:
eohw_txq->vlan_ins++;
txq_advance(&eohw_txq->q, ndesc);
+ skb_tx_timestamp(skb);
cxgb4_ring_tx_db(adap, &eohw_txq->q, ndesc);
eosw_txq_advance_index(&eosw_txq->last_pidx, 1, eosw_txq->ndesc);
@@ -2670,12 +2671,12 @@ int cxgb4_selftest_lb_pkt(struct net_device *netdev)
lb->loopback = 1;
q = &adap->sge.ethtxq[pi->first_qset];
- __netif_tx_lock(q->txq, smp_processor_id());
+ __netif_tx_lock_bh(q->txq);
reclaim_completed_tx(adap, &q->q, -1, true);
credits = txq_avail(&q->q) - ndesc;
if (unlikely(credits < 0)) {
- __netif_tx_unlock(q->txq);
+ __netif_tx_unlock_bh(q->txq);
return -ENOMEM;
}
@@ -2710,7 +2711,7 @@ int cxgb4_selftest_lb_pkt(struct net_device *netdev)
init_completion(&lb->completion);
txq_advance(&q->q, ndesc);
cxgb4_ring_tx_db(adap, &q->q, ndesc);
- __netif_tx_unlock(q->txq);
+ __netif_tx_unlock_bh(q->txq);
/* wait for the pkt to return */
ret = wait_for_completion_timeout(&lb->completion, 10 * HZ);
@@ -4233,7 +4234,7 @@ static void sge_rx_timer_cb(struct timer_list *t)
{
unsigned long m;
unsigned int i;
- struct adapter *adap = from_timer(adap, t, sge.rx_timer);
+ struct adapter *adap = timer_container_of(adap, t, sge.rx_timer);
struct sge *s = &adap->sge;
for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
@@ -4268,7 +4269,7 @@ done:
static void sge_tx_timer_cb(struct timer_list *t)
{
- struct adapter *adap = from_timer(adap, t, sge.tx_timer);
+ struct adapter *adap = timer_container_of(adap, t, sge.tx_timer);
struct sge *s = &adap->sge;
unsigned long m, period;
unsigned int i, budget;
@@ -4874,22 +4875,6 @@ void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
}
}
-/**
- * t4_free_ofld_rxqs - free a block of consecutive Rx queues
- * @adap: the adapter
- * @n: number of queues
- * @q: pointer to first queue
- *
- * Release the resources of a consecutive block of offload Rx queues.
- */
-void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q)
-{
- for ( ; n; n--, q++)
- if (q->rspq.desc)
- free_rspq_fl(adap, &q->rspq,
- q->fl.size ? &q->fl : NULL);
-}
-
void t4_sge_free_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq)
{
if (txq->q.desc) {
@@ -5012,9 +4997,9 @@ void t4_sge_stop(struct adapter *adap)
struct sge *s = &adap->sge;
if (s->rx_timer.function)
- del_timer_sync(&s->rx_timer);
+ timer_delete_sync(&s->rx_timer);
if (s->tx_timer.function)
- del_timer_sync(&s->tx_timer);
+ timer_delete_sync(&s->tx_timer);
if (is_offload(adap)) {
struct sge_uld_txq_info *txq_info;