diff options
Diffstat (limited to 'drivers/net/wireless/intel/iwlwifi/pcie/rx.c')
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/pcie/rx.c | 148 |
1 files changed, 73 insertions, 75 deletions
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index 489b07a9e471..4be3c35afd19 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -231,6 +231,9 @@ static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans) } } +/* + * iwl_pcie_rxq_mq_restock - restock implementation for multi-queue rx + */ static void iwl_pcie_rxq_mq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq) { @@ -277,17 +280,10 @@ static void iwl_pcie_rxq_mq_restock(struct iwl_trans *trans, } /* - * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool - * - * If there are slots in the RX queue that need to be restocked, - * and we have free pre-allocated buffers, fill the ranks as much - * as we can, pulling from rx_free. - * - * This moves the 'write' index forward to catch up with 'processed', and - * also updates the memory address in the firmware to reference the new - * target buffer. + * iwl_pcie_rxq_sq_restock - restock implementation for single queue rx */ -static void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq) +static void iwl_pcie_rxq_sq_restock(struct iwl_trans *trans, + struct iwl_rxq *rxq) { struct iwl_rx_mem_buffer *rxb; @@ -332,6 +328,26 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq) } /* + * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool + * + * If there are slots in the RX queue that need to be restocked, + * and we have free pre-allocated buffers, fill the ranks as much + * as we can, pulling from rx_free. + * + * This moves the 'write' index forward to catch up with 'processed', and + * also updates the memory address in the firmware to reference the new + * target buffer. + */ +static +void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq) +{ + if (trans->cfg->mq_rx_supported) + iwl_pcie_rxq_mq_restock(trans, rxq); + else + iwl_pcie_rxq_sq_restock(trans, rxq); +} + +/* * iwl_pcie_rx_alloc_page - allocates and returns a page. * */ @@ -434,7 +450,7 @@ static void iwl_pcie_free_rbs_pool(struct iwl_trans *trans) struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int i; - for (i = 0; i < MQ_RX_POOL_SIZE; i++) { + for (i = 0; i < RX_POOL_SIZE; i++) { if (!trans_pcie->rx_pool[i].page) continue; dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma, @@ -539,40 +555,46 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans) } /* - * iwl_pcie_rx_allocator_get - Returns the pre-allocated pages + * iwl_pcie_rx_allocator_get - returns the pre-allocated pages .* .* Called by queue when the queue posted allocation request and * has freed 8 RBDs in order to restock itself. + * This function directly moves the allocated RBs to the queue's ownership + * and updates the relevant counters. */ -static int iwl_pcie_rx_allocator_get(struct iwl_trans *trans, - struct iwl_rx_mem_buffer - *out[RX_CLAIM_REQ_ALLOC]) +static void iwl_pcie_rx_allocator_get(struct iwl_trans *trans, + struct iwl_rxq *rxq) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rb_allocator *rba = &trans_pcie->rba; int i; + lockdep_assert_held(&rxq->lock); + /* * atomic_dec_if_positive returns req_ready - 1 for any scenario. * If req_ready is 0 atomic_dec_if_positive will return -1 and this - * function will return -ENOMEM, as there are no ready requests. + * function will return early, as there are no ready requests. * atomic_dec_if_positive will perofrm the *actual* decrement only if * req_ready > 0, i.e. - there are ready requests and the function * hands one request to the caller. */ if (atomic_dec_if_positive(&rba->req_ready) < 0) - return -ENOMEM; + return; spin_lock(&rba->lock); for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) { /* Get next free Rx buffer, remove it from free list */ - out[i] = list_first_entry(&rba->rbd_allocated, - struct iwl_rx_mem_buffer, list); - list_del(&out[i]->list); + struct iwl_rx_mem_buffer *rxb = + list_first_entry(&rba->rbd_allocated, + struct iwl_rx_mem_buffer, list); + + list_move(&rxb->list, &rxq->rx_free); } spin_unlock(&rba->lock); - return 0; + rxq->used_count -= RX_CLAIM_REQ_ALLOC; + rxq->free_count += RX_CLAIM_REQ_ALLOC; } static void iwl_pcie_rx_allocator_work(struct work_struct *data) @@ -795,11 +817,10 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans) /* * Activate DMA snooping. - * Set RX DMA chunk size to 128 bit + * Set RX DMA chunk size to 64B * Default queue is 0 */ iwl_write_prph(trans, RFH_GEN_CFG, RFH_GEN_CFG_RFH_DMA_SNOOP | - RFH_GEN_CFG_RB_CHUNK_SIZE | (DEFAULT_RXQ_NUM << RFH_GEN_CFG_DEFAULT_RXQ_NUM_POS) | RFH_GEN_CFG_SERVICE_DMA_SNOOP); /* Enable the relevant rx queues */ @@ -830,7 +851,7 @@ int iwl_pcie_rx_init(struct iwl_trans *trans) struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *def_rxq; struct iwl_rb_allocator *rba = &trans_pcie->rba; - int i, err, num_rbds, allocator_pool_size; + int i, err, queue_size, allocator_pool_size, num_alloc; if (!trans_pcie->rxq) { err = iwl_pcie_rx_alloc(trans); @@ -882,11 +903,12 @@ int iwl_pcie_rx_init(struct iwl_trans *trans) } /* move the pool to the default queue and allocator ownerships */ - num_rbds = trans->cfg->mq_rx_supported ? - MQ_RX_POOL_SIZE : RX_QUEUE_SIZE; + queue_size = trans->cfg->mq_rx_supported ? + MQ_RX_NUM_RBDS : RX_QUEUE_SIZE; allocator_pool_size = trans->num_rx_queues * (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC); - for (i = 0; i < num_rbds; i++) { + num_alloc = queue_size + allocator_pool_size; + for (i = 0; i < num_alloc; i++) { struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i]; if (i < allocator_pool_size) @@ -901,7 +923,7 @@ int iwl_pcie_rx_init(struct iwl_trans *trans) if (trans->cfg->mq_rx_supported) { iwl_pcie_rx_mq_hw_init(trans); } else { - iwl_pcie_rxq_restock(trans, def_rxq); + iwl_pcie_rxq_sq_restock(trans, def_rxq); iwl_pcie_rx_hw_init(trans, def_rxq); } @@ -1149,7 +1171,7 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *rxq = &trans_pcie->rxq[queue]; - u32 r, i, j, count = 0; + u32 r, i, count = 0; bool emergency = false; restart: @@ -1193,62 +1215,36 @@ restart: i = (i + 1) & (rxq->queue_size - 1); - /* If we have RX_CLAIM_REQ_ALLOC released rx buffers - - * try to claim the pre-allocated buffers from the allocator */ - if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) { + /* + * If we have RX_CLAIM_REQ_ALLOC released rx buffers - + * try to claim the pre-allocated buffers from the allocator. + * If not ready - will try to reclaim next time. + * There is no need to reschedule work - allocator exits only + * on success + */ + if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) + iwl_pcie_rx_allocator_get(trans, rxq); + + if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) { struct iwl_rb_allocator *rba = &trans_pcie->rba; - struct iwl_rx_mem_buffer *out[RX_CLAIM_REQ_ALLOC]; - - if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && - !emergency) { - /* Add the remaining 6 empty RBDs - * for allocator use - */ - spin_lock(&rba->lock); - list_splice_tail_init(&rxq->rx_used, - &rba->rbd_empty); - spin_unlock(&rba->lock); - } - /* If not ready - continue, will try to reclaim later. - * No need to reschedule work - allocator exits only on - * success */ - if (!iwl_pcie_rx_allocator_get(trans, out)) { - /* If success - then RX_CLAIM_REQ_ALLOC - * buffers were retrieved and should be added - * to free list */ - rxq->used_count -= RX_CLAIM_REQ_ALLOC; - for (j = 0; j < RX_CLAIM_REQ_ALLOC; j++) { - list_add_tail(&out[j]->list, - &rxq->rx_free); - rxq->free_count++; - } - } - } - if (emergency) { + /* Add the remaining empty RBDs for allocator use */ + spin_lock(&rba->lock); + list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); + spin_unlock(&rba->lock); + } else if (emergency) { count++; if (count == 8) { count = 0; if (rxq->used_count < rxq->queue_size / 3) emergency = false; + + rxq->read = i; spin_unlock(&rxq->lock); iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq); - spin_lock(&rxq->lock); - } - } - /* handle restock for three cases, can be all of them at once: - * - we just pulled buffers from the allocator - * - we have 8+ unstolen pages accumulated - * - we are in emergency and allocated buffers - */ - if (rxq->free_count >= RX_CLAIM_REQ_ALLOC) { - rxq->read = i; - spin_unlock(&rxq->lock); - if (trans->cfg->mq_rx_supported) - iwl_pcie_rxq_mq_restock(trans, rxq); - else iwl_pcie_rxq_restock(trans, rxq); - goto restart; + goto restart; + } } } out: @@ -1273,6 +1269,8 @@ out: if (rxq->napi.poll) napi_gro_flush(&rxq->napi, false); + + iwl_pcie_rxq_restock(trans, rxq); } static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry) |