summaryrefslogtreecommitdiff
path: root/drivers/net/wireless/intel/iwlwifi/queue/tx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/intel/iwlwifi/queue/tx.c')
-rw-r--r--drivers/net/wireless/intel/iwlwifi/queue/tx.c146
1 files changed, 80 insertions, 66 deletions
diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.c b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
index fbacbe9ada15..340240b8954f 100644
--- a/drivers/net/wireless/intel/iwlwifi/queue/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
@@ -10,6 +10,7 @@
#include "fw/api/commands.h"
#include "fw/api/tx.h"
#include "fw/api/datapath.h"
+#include "fw/api/debug.h"
#include "queue/tx.h"
#include "iwl-fh.h"
#include "iwl-scd.h"
@@ -84,6 +85,50 @@ static u8 iwl_txq_gen2_get_num_tbs(struct iwl_trans *trans,
return le16_to_cpu(tfd->num_tbs) & 0x1f;
}
+int iwl_txq_gen2_set_tb(struct iwl_trans *trans, struct iwl_tfh_tfd *tfd,
+ dma_addr_t addr, u16 len)
+{
+ int idx = iwl_txq_gen2_get_num_tbs(trans, tfd);
+ struct iwl_tfh_tb *tb;
+
+ /* Only WARN here so we know about the issue, but we mess up our
+ * unmap path because not every place currently checks for errors
+ * returned from this function - it can only return an error if
+ * there's no more space, and so when we know there is enough we
+ * don't always check ...
+ */
+ WARN(iwl_txq_crosses_4g_boundary(addr, len),
+ "possible DMA problem with iova:0x%llx, len:%d\n",
+ (unsigned long long)addr, len);
+
+ if (WARN_ON(idx >= IWL_TFH_NUM_TBS))
+ return -EINVAL;
+ tb = &tfd->tbs[idx];
+
+ /* Each TFD can point to a maximum max_tbs Tx buffers */
+ if (le16_to_cpu(tfd->num_tbs) >= trans->txqs.tfd.max_tbs) {
+ IWL_ERR(trans, "Error can not send more than %d chunks\n",
+ trans->txqs.tfd.max_tbs);
+ return -EINVAL;
+ }
+
+ put_unaligned_le64(addr, &tb->addr);
+ tb->tb_len = cpu_to_le16(len);
+
+ tfd->num_tbs = cpu_to_le16(idx + 1);
+
+ return idx;
+}
+
+static void iwl_txq_set_tfd_invalid_gen2(struct iwl_trans *trans,
+ struct iwl_tfh_tfd *tfd)
+{
+ tfd->num_tbs = 0;
+
+ iwl_txq_gen2_set_tb(trans, tfd, trans->invalid_tx_cmd.dma,
+ trans->invalid_tx_cmd.size);
+}
+
void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
struct iwl_tfh_tfd *tfd)
{
@@ -111,7 +156,7 @@ void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
DMA_TO_DEVICE);
}
- tfd->num_tbs = 0;
+ iwl_txq_set_tfd_invalid_gen2(trans, tfd);
}
void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
@@ -142,42 +187,6 @@ void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
}
}
-int iwl_txq_gen2_set_tb(struct iwl_trans *trans, struct iwl_tfh_tfd *tfd,
- dma_addr_t addr, u16 len)
-{
- int idx = iwl_txq_gen2_get_num_tbs(trans, tfd);
- struct iwl_tfh_tb *tb;
-
- /*
- * Only WARN here so we know about the issue, but we mess up our
- * unmap path because not every place currently checks for errors
- * returned from this function - it can only return an error if
- * there's no more space, and so when we know there is enough we
- * don't always check ...
- */
- WARN(iwl_txq_crosses_4g_boundary(addr, len),
- "possible DMA problem with iova:0x%llx, len:%d\n",
- (unsigned long long)addr, len);
-
- if (WARN_ON(idx >= IWL_TFH_NUM_TBS))
- return -EINVAL;
- tb = &tfd->tbs[idx];
-
- /* Each TFD can point to a maximum max_tbs Tx buffers */
- if (le16_to_cpu(tfd->num_tbs) >= trans->txqs.tfd.max_tbs) {
- IWL_ERR(trans, "Error can not send more than %d chunks\n",
- trans->txqs.tfd.max_tbs);
- return -EINVAL;
- }
-
- put_unaligned_le64(addr, &tb->addr);
- tb->tb_len = cpu_to_le16(len);
-
- tfd->num_tbs = cpu_to_le16(idx + 1);
-
- return idx;
-}
-
static struct page *get_workaround_page(struct iwl_trans *trans,
struct sk_buff *skb)
{
@@ -985,7 +994,7 @@ void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
bool active;
u8 fifo;
- if (trans->trans_cfg->use_tfh) {
+ if (trans->trans_cfg->gen2) {
IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id,
txq->read_ptr, txq->write_ptr);
/* TODO: access new SCD registers and dump them */
@@ -1026,11 +1035,21 @@ static void iwl_txq_stuck_timer(struct timer_list *t)
iwl_force_nmi(trans);
}
+static void iwl_txq_set_tfd_invalid_gen1(struct iwl_trans *trans,
+ struct iwl_tfd *tfd)
+{
+ tfd->num_tbs = 0;
+
+ iwl_pcie_gen1_tfd_set_tb(trans, tfd, 0, trans->invalid_tx_cmd.dma,
+ trans->invalid_tx_cmd.size);
+}
+
int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
bool cmd_queue)
{
- size_t tfd_sz = trans->txqs.tfd.size *
- trans->trans_cfg->base_params->max_tfd_queue_size;
+ size_t num_entries = trans->trans_cfg->gen2 ?
+ slots_num : trans->trans_cfg->base_params->max_tfd_queue_size;
+ size_t tfd_sz;
size_t tb0_buf_sz;
int i;
@@ -1040,8 +1059,7 @@ int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
if (WARN_ON(txq->entries || txq->tfds))
return -EINVAL;
- if (trans->trans_cfg->use_tfh)
- tfd_sz = trans->txqs.tfd.size * slots_num;
+ tfd_sz = trans->txqs.tfd.size * num_entries;
timer_setup(&txq->stuck_timer, iwl_txq_stuck_timer, 0);
txq->trans = trans;
@@ -1081,6 +1099,15 @@ int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
if (!txq->first_tb_bufs)
goto err_free_tfds;
+ for (i = 0; i < num_entries; i++) {
+ void *tfd = iwl_txq_get_tfd(trans, txq, i);
+
+ if (trans->trans_cfg->gen2)
+ iwl_txq_set_tfd_invalid_gen2(trans, tfd);
+ else
+ iwl_txq_set_tfd_invalid_gen1(trans, tfd);
+ }
+
return 0;
err_free_tfds:
dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr);
@@ -1340,22 +1367,12 @@ error:
}
static inline dma_addr_t iwl_txq_gen1_tfd_tb_get_addr(struct iwl_trans *trans,
- void *_tfd, u8 idx)
+ struct iwl_tfd *tfd, u8 idx)
{
- struct iwl_tfd *tfd;
- struct iwl_tfd_tb *tb;
+ struct iwl_tfd_tb *tb = &tfd->tbs[idx];
dma_addr_t addr;
dma_addr_t hi_len;
- if (trans->trans_cfg->use_tfh) {
- struct iwl_tfh_tfd *tfh_tfd = _tfd;
- struct iwl_tfh_tb *tfh_tb = &tfh_tfd->tbs[idx];
-
- return (dma_addr_t)(le64_to_cpu(tfh_tb->addr));
- }
-
- tfd = _tfd;
- tb = &tfd->tbs[idx];
addr = get_unaligned_le32(&tb->lo);
if (sizeof(dma_addr_t) <= sizeof(u32))
@@ -1376,7 +1393,7 @@ void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans,
struct iwl_txq *txq, int index)
{
int i, num_tbs;
- void *tfd = iwl_txq_get_tfd(trans, txq, index);
+ struct iwl_tfd *tfd = iwl_txq_get_tfd(trans, txq, index);
/* Sanity check on number of chunks */
num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd);
@@ -1408,15 +1425,7 @@ void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans,
meta->tbs = 0;
- if (trans->trans_cfg->use_tfh) {
- struct iwl_tfh_tfd *tfd_fh = (void *)tfd;
-
- tfd_fh->num_tbs = 0;
- } else {
- struct iwl_tfd *tfd_fh = (void *)tfd;
-
- tfd_fh->num_tbs = 0;
- }
+ iwl_txq_set_tfd_invalid_gen1(trans, tfd);
}
#define IWL_TX_CRC_SIZE 4
@@ -1520,7 +1529,12 @@ void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
/* We have only q->n_window txq->entries, but we use
* TFD_QUEUE_SIZE_MAX tfds
*/
- iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr);
+ if (trans->trans_cfg->gen2)
+ iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
+ iwl_txq_get_tfd(trans, txq, rd_ptr));
+ else
+ iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta,
+ txq, rd_ptr);
/* free SKB */
skb = txq->entries[idx].skb;
@@ -1625,7 +1639,7 @@ void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
txq->entries[read_ptr].skb = NULL;
- if (!trans->trans_cfg->use_tfh)
+ if (!trans->trans_cfg->gen2)
iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq);
iwl_txq_free_tfd(trans, txq);