summaryrefslogtreecommitdiff
path: root/drivers/net/wireless/intel/iwlwifi/pcie
diff options
context:
space:
mode:
authorLuca Coelho <luciano.coelho@intel.com>2018-08-02 14:57:55 +0300
committerLuca Coelho <luciano.coelho@intel.com>2019-09-06 15:34:25 +0300
commit79b6c8feb63589196cd4c557c91bfafd2da47f4e (patch)
tree971c175c86c58e7704db937045df08cab5fb3615 /drivers/net/wireless/intel/iwlwifi/pcie
parentcd6de838e179287cbbfb7b655701183f2d32936e (diff)
iwlwifi: separate elements from cfg that are needed by trans_alloc
In order to be able to select the cfg depending on the HW revision or on the RF ID, we need to set up the trans before selecting the cfg. To do so, move the elements from cfg that are needed by iwl_trans_alloc() to a separate struct at the top of the cfg, so it can be used by other cfg types as well, before selecting the rest of the configuration. Signed-off-by: Luca Coelho <luciano.coelho@intel.com>
Diffstat (limited to 'drivers/net/wireless/intel/iwlwifi/pcie')
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c56
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c132
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c83
8 files changed, 162 insertions, 149 deletions
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
index 9b27c955b8dc..1b5de527f141 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
@@ -180,7 +180,7 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL,
CSR_AUTO_FUNC_BOOT_ENA);
- if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ if (trans->cfg->trans.device_family >= IWL_DEVICE_FAMILY_AX210)
iwl_write_umac_prph(trans, UREG_CPU_INIT_RUN, 1);
else
iwl_set_bit(trans, CSR_GP_CNTRL, CSR_AUTO_FUNC_INIT);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index c02cb976cafe..08990694dd0e 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -995,7 +995,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct iwl_trans *iwl_trans;
int ret;
- if (WARN_ONCE(!cfg->csr, "CSR addresses aren't configured\n"))
+ if (WARN_ONCE(!cfg->trans.csr, "CSR addresses aren't configured\n"))
return -EINVAL;
iwl_trans = iwl_trans_pcie_alloc(pdev, ent, cfg);
@@ -1022,7 +1022,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
iwl_trans->cfg = cfg_7265d;
}
- if (iwl_trans->cfg->rf_id && cfg == &iwl22000_2ac_cfg_hr_cdb &&
+ if (iwl_trans->cfg->trans.rf_id && cfg == &iwl22000_2ac_cfg_hr_cdb &&
iwl_trans->hw_rev != CSR_HW_REV_TYPE_HR_CDB) {
u32 rf_id_chp = CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id);
u32 jf_chp_id = CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index 906e1da3923d..53999ea7bb04 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -253,7 +253,8 @@ struct iwl_dma_ptr {
*/
static inline int iwl_queue_inc_wrap(struct iwl_trans *trans, int index)
{
- return ++index & (trans->cfg->base_params->max_tfd_queue_size - 1);
+ return ++index &
+ (trans->cfg->trans.base_params->max_tfd_queue_size - 1);
}
/**
@@ -263,7 +264,7 @@ static inline int iwl_queue_inc_wrap(struct iwl_trans *trans, int index)
static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans,
struct iwl_rxq *rxq)
{
- if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+ if (trans->cfg->trans.device_family >= IWL_DEVICE_FAMILY_22560) {
__le16 *rb_stts = rxq->rb_stts;
return READ_ONCE(*rb_stts);
@@ -280,7 +281,8 @@ static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans,
*/
static inline int iwl_queue_dec_wrap(struct iwl_trans *trans, int index)
{
- return --index & (trans->cfg->base_params->max_tfd_queue_size - 1);
+ return --index &
+ (trans->cfg->trans.base_params->max_tfd_queue_size - 1);
}
struct iwl_cmd_meta {
@@ -704,7 +706,7 @@ void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_trans *trans, void *_tfd,
u8 idx)
{
- if (trans->cfg->use_tfh) {
+ if (trans->cfg->trans.use_tfh) {
struct iwl_tfh_tfd *tfd = _tfd;
struct iwl_tfh_tb *tb = &tfd->tbs[idx];
@@ -910,7 +912,7 @@ static inline void *iwl_pcie_get_tfd(struct iwl_trans *trans,
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- if (trans->cfg->use_tfh)
+ if (trans->cfg->trans.use_tfh)
idx = iwl_pcie_get_cmd_index(txq, idx);
return txq->tfds + trans_pcie->tfd_size * idx;
@@ -954,7 +956,7 @@ static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
MSIX_HW_INT_CAUSES_REG_RF_KILL);
}
- if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_9000) {
+ if (trans->cfg->trans.device_family >= IWL_DEVICE_FAMILY_9000) {
/*
* On 9000-series devices this bit isn't enabled by default, so
* when we power down the device we need set the bit to allow it
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index a2d709642b2a..79b012e881f0 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -200,12 +200,12 @@ static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
*/
int iwl_pcie_rx_stop(struct iwl_trans *trans)
{
- if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+ if (trans->cfg->trans.device_family >= IWL_DEVICE_FAMILY_22560) {
/* TODO: remove this for 22560 once fw does it */
iwl_write_umac_prph(trans, RFH_RXF_DMA_CFG_GEN3, 0);
return iwl_poll_umac_prph_bit(trans, RFH_GEN_STATUS_GEN3,
RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
- } else if (trans->cfg->mq_rx_supported) {
+ } else if (trans->cfg->trans.mq_rx_supported) {
iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0);
return iwl_poll_prph_bit(trans, RFH_GEN_STATUS,
RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
@@ -232,7 +232,7 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
* 1. shadow registers aren't enabled
* 2. there is a chance that the NIC is asleep
*/
- if (!trans->cfg->base_params->shadow_reg_enable &&
+ if (!trans->cfg->trans.base_params->shadow_reg_enable &&
test_bit(STATUS_TPOWER_PMI, &trans->status)) {
reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
@@ -240,18 +240,18 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n",
reg);
iwl_set_bit(trans, CSR_GP_CNTRL,
- BIT(trans->cfg->csr->flag_mac_access_req));
+ BIT(trans->cfg->trans.csr->flag_mac_access_req));
rxq->need_update = true;
return;
}
}
rxq->write_actual = round_down(rxq->write, 8);
- if (trans->cfg->device_family == IWL_DEVICE_FAMILY_22560)
+ if (trans->cfg->trans.device_family == IWL_DEVICE_FAMILY_22560)
iwl_write32(trans, HBUS_TARG_WRPTR,
(rxq->write_actual |
((FIRST_RX_QUEUE + rxq->id) << 16)));
- else if (trans->cfg->mq_rx_supported)
+ else if (trans->cfg->trans.mq_rx_supported)
iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id),
rxq->write_actual);
else
@@ -279,7 +279,7 @@ static void iwl_pcie_restock_bd(struct iwl_trans *trans,
struct iwl_rxq *rxq,
struct iwl_rx_mem_buffer *rxb)
{
- if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+ if (trans->cfg->trans.device_family >= IWL_DEVICE_FAMILY_22560) {
struct iwl_rx_transfer_desc *bd = rxq->bd;
BUILD_BUG_ON(sizeof(*bd) != 2 * sizeof(u64));
@@ -405,7 +405,7 @@ static void iwl_pcie_rxsq_restock(struct iwl_trans *trans,
static
void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq)
{
- if (trans->cfg->mq_rx_supported)
+ if (trans->cfg->trans.mq_rx_supported)
iwl_pcie_rxmq_restock(trans, rxq);
else
iwl_pcie_rxsq_restock(trans, rxq);
@@ -682,7 +682,7 @@ static int iwl_pcie_free_bd_size(struct iwl_trans *trans, bool use_rx_td)
if (use_rx_td)
return sizeof(*rx_td);
else
- return trans->cfg->mq_rx_supported ? sizeof(__le64) :
+ return trans->cfg->trans.mq_rx_supported ? sizeof(__le64) :
sizeof(__le32);
}
@@ -690,7 +690,7 @@ static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
struct iwl_rxq *rxq)
{
struct device *dev = trans->dev;
- bool use_rx_td = (trans->cfg->device_family >=
+ bool use_rx_td = (trans->cfg->trans.device_family >=
IWL_DEVICE_FAMILY_22560);
int free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
@@ -712,7 +712,7 @@ static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
rxq->used_bd_dma = 0;
rxq->used_bd = NULL;
- if (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560)
+ if (trans->cfg->trans.device_family < IWL_DEVICE_FAMILY_22560)
return;
if (rxq->tr_tail)
@@ -735,13 +735,13 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
struct device *dev = trans->dev;
int i;
int free_size;
- bool use_rx_td = (trans->cfg->device_family >=
+ bool use_rx_td = (trans->cfg->trans.device_family >=
IWL_DEVICE_FAMILY_22560);
size_t rb_stts_size = use_rx_td ? sizeof(__le16) :
sizeof(struct iwl_rb_status);
spin_lock_init(&rxq->lock);
- if (trans->cfg->mq_rx_supported)
+ if (trans->cfg->trans.mq_rx_supported)
rxq->queue_size = MQ_RX_TABLE_SIZE;
else
rxq->queue_size = RX_QUEUE_SIZE;
@@ -757,7 +757,7 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
if (!rxq->bd)
goto err;
- if (trans->cfg->mq_rx_supported) {
+ if (trans->cfg->trans.mq_rx_supported) {
rxq->used_bd = dma_alloc_coherent(dev,
(use_rx_td ? sizeof(*rxq->cd) : sizeof(__le32)) * rxq->queue_size,
&rxq->used_bd_dma,
@@ -807,7 +807,7 @@ int iwl_pcie_rx_alloc(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rb_allocator *rba = &trans_pcie->rba;
int i, ret;
- size_t rb_stts_size = trans->cfg->device_family >=
+ size_t rb_stts_size = trans->cfg->trans.device_family >=
IWL_DEVICE_FAMILY_22560 ?
sizeof(__le16) : sizeof(struct iwl_rb_status);
@@ -1074,8 +1074,8 @@ int _iwl_pcie_rx_init(struct iwl_trans *trans)
rxq->read = 0;
rxq->write = 0;
rxq->write_actual = 0;
- memset(rxq->rb_stts, 0,
- (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ?
+ memset(rxq->rb_stts, 0, (trans->cfg->trans.device_family >=
+ IWL_DEVICE_FAMILY_22560) ?
sizeof(__le16) : sizeof(struct iwl_rb_status));
iwl_pcie_rx_init_rxb_lists(rxq);
@@ -1088,7 +1088,7 @@ int _iwl_pcie_rx_init(struct iwl_trans *trans)
}
/* move the pool to the default queue and allocator ownerships */
- queue_size = trans->cfg->mq_rx_supported ?
+ queue_size = trans->cfg->trans.mq_rx_supported ?
MQ_RX_NUM_RBDS : RX_QUEUE_SIZE;
allocator_pool_size = trans->num_rx_queues *
(RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC);
@@ -1120,7 +1120,7 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
if (ret)
return ret;
- if (trans->cfg->mq_rx_supported)
+ if (trans->cfg->trans.mq_rx_supported)
iwl_pcie_rx_mq_hw_init(trans);
else
iwl_pcie_rx_hw_init(trans, trans_pcie->rxq);
@@ -1151,7 +1151,7 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rb_allocator *rba = &trans_pcie->rba;
int i;
- size_t rb_stts_size = trans->cfg->device_family >=
+ size_t rb_stts_size = trans->cfg->trans.device_family >=
IWL_DEVICE_FAMILY_22560 ?
sizeof(__le16) : sizeof(struct iwl_rb_status);
@@ -1347,7 +1347,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
}
page_stolen |= rxcb._page_stolen;
- if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ if (trans->cfg->trans.device_family >= IWL_DEVICE_FAMILY_22560)
break;
offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
}
@@ -1392,14 +1392,14 @@ static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc) != 32);
- if (!trans->cfg->mq_rx_supported) {
+ if (!trans->cfg->trans.mq_rx_supported) {
rxb = rxq->queue[i];
rxq->queue[i] = NULL;
return rxb;
}
/* used_bd is a 32/16 bit but only 12 are used to retrieve the vid */
- if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ if (trans->cfg->trans.device_family >= IWL_DEVICE_FAMILY_22560)
vid = le16_to_cpu(rxq->cd[i].rbid) & 0x0FFF;
else
vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF;
@@ -1515,7 +1515,7 @@ out:
/* Backtrack one entry */
rxq->read = i;
/* update cr tail with the rxq read pointer */
- if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ if (trans->cfg->trans.device_family >= IWL_DEVICE_FAMILY_22560)
*rxq->cr_tail = cpu_to_le16(r);
spin_unlock(&rxq->lock);
@@ -1597,7 +1597,7 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
return;
}
- for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
+ for (i = 0; i < trans->cfg->trans.base_params->num_of_queues; i++) {
if (!trans_pcie->txq[i])
continue;
del_timer(&trans_pcie->txq[i]->stuck_timer);
@@ -1838,7 +1838,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
if (inta & CSR_INT_BIT_ALIVE) {
IWL_DEBUG_ISR(trans, "Alive interrupt\n");
isr_stats->alive++;
- if (trans->cfg->gen2) {
+ if (trans->cfg->trans.gen2) {
/*
* We can restock, since firmware configured
* the RFH
@@ -2179,13 +2179,13 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) {
IWL_DEBUG_ISR(trans, "Alive interrupt\n");
isr_stats->alive++;
- if (trans->cfg->gen2) {
+ if (trans->cfg->trans.gen2) {
/* We can restock, since firmware configured the RFH */
iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
}
}
- if (trans->cfg->device_family == IWL_DEVICE_FAMILY_22560 &&
+ if (trans->cfg->trans.device_family == IWL_DEVICE_FAMILY_22560 &&
inta_hw & MSIX_HW_INT_CAUSES_REG_IPC) {
/* Reflect IML transfer status */
int res = iwl_read32(trans, CSR_IML_RESP_ADDR);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index c099ad8b003f..919b69ab836d 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -92,7 +92,7 @@ int iwl_pcie_gen2_apm_init(struct iwl_trans *trans)
iwl_pcie_apm_config(trans);
- ret = iwl_finish_nic_init(trans);
+ ret = iwl_finish_nic_init(trans, &trans->cfg->trans);
if (ret)
return ret;
@@ -133,7 +133,7 @@ static void iwl_pcie_gen2_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
* D0A* (powered-up Active) --> D0U* (Uninitialized) state.
*/
iwl_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->cfg->csr->flag_init_done));
+ BIT(trans->cfg->trans.csr->flag_init_done));
}
void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
@@ -168,14 +168,14 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
}
iwl_pcie_ctxt_info_free_paging(trans);
- if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ if (trans->cfg->trans.device_family >= IWL_DEVICE_FAMILY_22560)
iwl_pcie_ctxt_info_gen3_free(trans);
else
iwl_pcie_ctxt_info_free(trans);
/* Make sure (redundant) we've released our request to stay awake */
iwl_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->cfg->csr->flag_mac_access_req));
+ BIT(trans->cfg->trans.csr->flag_mac_access_req));
/* Stop the device, and put it in low power state */
iwl_pcie_gen2_apm_stop(trans, false);
@@ -340,7 +340,7 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
goto out;
}
- if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ if (trans->cfg->trans.device_family >= IWL_DEVICE_FAMILY_22560)
ret = iwl_pcie_ctxt_info_gen3_init(trans, fw);
else
ret = iwl_pcie_ctxt_info_init(trans, fw);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 8639e60b4f40..db75a79aa8a4 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -184,8 +184,8 @@ out:
static void iwl_trans_pcie_sw_reset(struct iwl_trans *trans)
{
/* Reset entire device - do controller reset (results in SHRD_HW_RST) */
- iwl_set_bit(trans, trans->cfg->csr->addr_sw_reset,
- BIT(trans->cfg->csr->flag_sw_reset));
+ iwl_set_bit(trans, trans->cfg->trans.csr->addr_sw_reset,
+ BIT(trans->cfg->trans.csr->flag_sw_reset));
usleep_range(5000, 6000);
}
@@ -341,7 +341,7 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)
*/
/* Disable L0S exit timer (platform NMI Work/Around) */
- if (trans->cfg->device_family < IWL_DEVICE_FAMILY_8000)
+ if (trans->cfg->trans.device_family < IWL_DEVICE_FAMILY_8000)
iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
@@ -365,10 +365,10 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)
iwl_pcie_apm_config(trans);
/* Configure analog phase-lock-loop before activating to D0A */
- if (trans->cfg->base_params->pll_cfg)
+ if (trans->cfg->trans.base_params->pll_cfg)
iwl_set_bit(trans, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL);
- ret = iwl_finish_nic_init(trans);
+ ret = iwl_finish_nic_init(trans, &trans->cfg->trans);
if (ret)
return ret;
@@ -440,7 +440,7 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
iwl_trans_pcie_sw_reset(trans);
- ret = iwl_finish_nic_init(trans);
+ ret = iwl_finish_nic_init(trans, &trans->cfg->trans);
if (WARN_ON(ret)) {
/* Release XTAL ON request */
__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
@@ -490,7 +490,7 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
* D0A* (powered-up Active) --> D0U* (Uninitialized) state.
*/
iwl_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->cfg->csr->flag_init_done));
+ BIT(trans->cfg->trans.csr->flag_init_done));
/* Activates XTAL resources monitor */
__iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG,
@@ -512,12 +512,12 @@ void iwl_pcie_apm_stop_master(struct iwl_trans *trans)
int ret;
/* stop device's busmaster DMA activity */
- iwl_set_bit(trans, trans->cfg->csr->addr_sw_reset,
- BIT(trans->cfg->csr->flag_stop_master));
+ iwl_set_bit(trans, trans->cfg->trans.csr->addr_sw_reset,
+ BIT(trans->cfg->trans.csr->flag_stop_master));
- ret = iwl_poll_bit(trans, trans->cfg->csr->addr_sw_reset,
- BIT(trans->cfg->csr->flag_master_dis),
- BIT(trans->cfg->csr->flag_master_dis), 100);
+ ret = iwl_poll_bit(trans, trans->cfg->trans.csr->addr_sw_reset,
+ BIT(trans->cfg->trans.csr->flag_master_dis),
+ BIT(trans->cfg->trans.csr->flag_master_dis), 100);
if (ret < 0)
IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
@@ -533,10 +533,11 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
iwl_pcie_apm_init(trans);
/* inform ME that we are leaving */
- if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000)
+ if (trans->cfg->trans.device_family == IWL_DEVICE_FAMILY_7000)
iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
APMG_PCIDEV_STT_VAL_WAKE_ME);
- else if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000) {
+ else if (trans->cfg->trans.device_family >=
+ IWL_DEVICE_FAMILY_8000) {
iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
CSR_RESET_LINK_PWR_MGMT_DISABLED);
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
@@ -566,7 +567,7 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
* D0A* (powered-up Active) --> D0U* (Uninitialized) state.
*/
iwl_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->cfg->csr->flag_init_done));
+ BIT(trans->cfg->trans.csr->flag_init_done));
}
static int iwl_pcie_nic_init(struct iwl_trans *trans)
@@ -593,7 +594,7 @@ static int iwl_pcie_nic_init(struct iwl_trans *trans)
if (iwl_pcie_tx_init(trans))
return -ENOMEM;
- if (trans->cfg->base_params->shadow_reg_enable) {
+ if (trans->cfg->trans.base_params->shadow_reg_enable) {
/* enable shadow regs in HW */
iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
@@ -831,7 +832,7 @@ static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
iwl_enable_interrupts(trans);
- if (trans->cfg->use_tfh) {
+ if (trans->cfg->trans.use_tfh) {
if (cpu == 1)
iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS,
0xFFFF);
@@ -963,7 +964,7 @@ monitor:
iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
trans->dbg.fw_mon[0].physical >>
dest->base_shift);
- if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000)
+ if (trans->cfg->trans.device_family >= IWL_DEVICE_FAMILY_8000)
iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
(trans->dbg.fw_mon[0].physical +
trans->dbg.fw_mon[0].size - 256) >>
@@ -1005,7 +1006,7 @@ static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
/* supported for 7000 only for the moment */
if (iwlwifi_mod_params.fw_monitor &&
- trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
+ trans->cfg->trans.device_family == IWL_DEVICE_FAMILY_7000) {
iwl_pcie_alloc_fw_monitor(trans, 0);
if (trans->dbg.fw_mon[0].size) {
@@ -1134,7 +1135,7 @@ static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE;
int i, arr_size =
- (trans->cfg->device_family != IWL_DEVICE_FAMILY_22560) ?
+ (trans->cfg->trans.device_family != IWL_DEVICE_FAMILY_22560) ?
ARRAY_SIZE(causes_list) : ARRAY_SIZE(causes_list_v2);
/*
@@ -1144,7 +1145,8 @@ static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans)
*/
for (i = 0; i < arr_size; i++) {
struct iwl_causes_list *causes =
- (trans->cfg->device_family != IWL_DEVICE_FAMILY_22560) ?
+ (trans->cfg->trans.device_family !=
+ IWL_DEVICE_FAMILY_22560) ?
causes_list : causes_list_v2;
iwl_write8(trans, CSR_MSIX_IVAR(causes[i].addr), val);
@@ -1188,7 +1190,7 @@ void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie)
struct iwl_trans *trans = trans_pcie->trans;
if (!trans_pcie->msix_enabled) {
- if (trans->cfg->mq_rx_supported &&
+ if (trans->cfg->trans.mq_rx_supported &&
test_bit(STATUS_DEVICE_ENABLED, &trans->status))
iwl_write_umac_prph(trans, UREG_CHICK,
UREG_CHICK_MSI_ENABLE);
@@ -1269,7 +1271,7 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans)
/* Make sure (redundant) we've released our request to stay awake */
iwl_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->cfg->csr->flag_mac_access_req));
+ BIT(trans->cfg->trans.csr->flag_mac_access_req));
/* Stop the device, and put it in low power state */
iwl_pcie_apm_stop(trans, false);
@@ -1396,7 +1398,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
/* Load the given image to the HW */
- if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000)
+ if (trans->cfg->trans.device_family >= IWL_DEVICE_FAMILY_8000)
ret = iwl_pcie_load_given_ucode_8000(trans, fw);
else
ret = iwl_pcie_load_given_ucode(trans, fw);
@@ -1469,7 +1471,7 @@ void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
IWL_WARN(trans, "reporting RF_KILL (radio %s)\n",
state ? "disabled" : "enabled");
if (iwl_op_mode_hw_rf_kill(trans->op_mode, state)) {
- if (trans->cfg->gen2)
+ if (trans->cfg->trans.gen2)
_iwl_trans_pcie_gen2_stop_device(trans);
else
_iwl_trans_pcie_stop_device(trans);
@@ -1499,9 +1501,9 @@ static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test,
iwl_pcie_synchronize_irqs(trans);
iwl_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->cfg->csr->flag_mac_access_req));
+ BIT(trans->cfg->trans.csr->flag_mac_access_req));
iwl_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->cfg->csr->flag_init_done));
+ BIT(trans->cfg->trans.csr->flag_init_done));
if (reset) {
/*
@@ -1530,9 +1532,9 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
}
iwl_set_bit(trans, CSR_GP_CNTRL,
- BIT(trans->cfg->csr->flag_mac_access_req));
+ BIT(trans->cfg->trans.csr->flag_mac_access_req));
- ret = iwl_finish_nic_init(trans);
+ ret = iwl_finish_nic_init(trans, &trans->cfg->trans);
if (ret)
return ret;
@@ -1552,7 +1554,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
if (!reset) {
iwl_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->cfg->csr->flag_mac_access_req));
+ BIT(trans->cfg->trans.csr->flag_mac_access_req));
} else {
iwl_trans_pcie_tx_reset(trans);
@@ -1583,7 +1585,7 @@ static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
int max_irqs, num_irqs, i, ret;
u16 pci_cmd;
- if (!trans->cfg->mq_rx_supported)
+ if (!trans->cfg->trans.mq_rx_supported)
goto enable_msi;
max_irqs = min_t(u32, num_online_cpus() + 2, IWL_MAX_RX_HW_QUEUES);
@@ -1704,7 +1706,7 @@ static int iwl_trans_pcie_clear_persistence_bit(struct iwl_trans *trans)
{
u32 hpm, wprot;
- switch (trans->cfg->device_family) {
+ switch (trans->cfg->trans.device_family) {
case IWL_DEVICE_FAMILY_9000:
wprot = PREG_PRPH_WPROT_9000;
break;
@@ -1819,7 +1821,7 @@ static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
static u32 iwl_trans_pcie_prph_msk(struct iwl_trans *trans)
{
- if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ if (trans->cfg->trans.device_family >= IWL_DEVICE_FAMILY_22560)
return 0x00FFFFFF;
else
return 0x000FFFFF;
@@ -1890,7 +1892,7 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
iwl_pcie_synchronize_irqs(trans);
- if (trans->cfg->gen2)
+ if (trans->cfg->trans.gen2)
iwl_pcie_gen2_tx_free(trans);
else
iwl_pcie_tx_free(trans);
@@ -1972,8 +1974,8 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans,
/* this bit wakes up the NIC */
__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
- BIT(trans->cfg->csr->flag_mac_access_req));
- if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000)
+ BIT(trans->cfg->trans.csr->flag_mac_access_req));
+ if (trans->cfg->trans.device_family >= IWL_DEVICE_FAMILY_8000)
udelay(2);
/*
@@ -1997,8 +1999,8 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans,
* and do not save/restore SRAM when power cycling.
*/
ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
- BIT(trans->cfg->csr->flag_val_mac_access_en),
- (BIT(trans->cfg->csr->flag_mac_clock_ready) |
+ BIT(trans->cfg->trans.csr->flag_val_mac_access_en),
+ (BIT(trans->cfg->trans.csr->flag_mac_clock_ready) |
CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
if (unlikely(ret < 0)) {
u32 cntrl = iwl_read32(trans, CSR_GP_CNTRL);
@@ -2080,7 +2082,7 @@ static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
goto out;
__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->cfg->csr->flag_mac_access_req));
+ BIT(trans->cfg->trans.csr->flag_mac_access_req));
/*
* Above we read the CSR_GP_CNTRL register, which will flush
* any previous writes, but we need the write that clears the
@@ -2187,7 +2189,7 @@ static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int i;
- for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
+ for (i = 0; i < trans->cfg->trans.base_params->num_of_queues; i++) {
struct iwl_txq *txq = trans_pcie->txq[i];
if (i == trans_pcie->cmd_queue)
@@ -2218,7 +2220,7 @@ void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
bool active;
u8 fifo;
- if (trans->cfg->use_tfh) {
+ if (trans->cfg->trans.use_tfh) {
IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id,
txq->read_ptr, txq->write_ptr);
/* TODO: access new SCD registers and dump them */
@@ -2235,9 +2237,9 @@ void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
jiffies_to_msecs(txq->wd_timeout),
txq->read_ptr, txq->write_ptr,
iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) &
- (trans->cfg->base_params->max_tfd_queue_size - 1),
+ (trans->cfg->trans.base_params->max_tfd_queue_size - 1),
iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) &
- (trans->cfg->base_params->max_tfd_queue_size - 1),
+ (trans->cfg->trans.base_params->max_tfd_queue_size - 1),
iwl_read_direct32(trans, FH_TX_TRB_REG(fifo)));
}
@@ -2326,7 +2328,9 @@ static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm)
int ret = 0;
/* waiting for all the tx frames complete might take a while */
- for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
+ for (cnt = 0;
+ cnt < trans->cfg->trans.base_params->num_of_queues;
+ cnt++) {
if (cnt == trans_pcie->cmd_queue)
continue;
@@ -2470,7 +2474,8 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
int ret;
size_t bufsz;
- bufsz = sizeof(char) * 75 * trans->cfg->base_params->num_of_queues;
+ bufsz = sizeof(char) * 75 *
+ trans->cfg->trans.base_params->num_of_queues;
if (!trans_pcie->txq_memory)
return -EAGAIN;
@@ -2479,7 +2484,9 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
if (!buf)
return -ENOMEM;
- for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
+ for (cnt = 0;
+ cnt < trans->cfg->trans.base_params->num_of_queues;
+ cnt++) {
txq = trans_pcie->txq[cnt];
pos += scnprintf(buf + pos, bufsz - pos,
"hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d frozen=%d%s\n",
@@ -2949,7 +2956,7 @@ static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans,
(*data)->len = cpu_to_le32(fh_regs_len);
val = (void *)(*data)->data;
- if (!trans->cfg->gen2)
+ if (!trans->cfg->trans.gen2)
for (i = FH_MEM_LOWER_BOUND; i < FH_MEM_UPPER_BOUND;
i += sizeof(u32))
*val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
@@ -2997,7 +3004,7 @@ iwl_trans_pcie_dump_pointers(struct iwl_trans *trans,
{
u32 base, base_high, write_ptr, write_ptr_val, wrap_cnt;
- if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ if (trans->cfg->trans.device_family >= IWL_DEVICE_FAMILY_AX210) {
base = DBGC_CUR_DBGBUF_BASE_ADDR_LSB;
base_high = DBGC_CUR_DBGBUF_BASE_ADDR_MSB;
write_ptr = DBGC_CUR_DBGBUF_STATUS;
@@ -3017,7 +3024,7 @@ iwl_trans_pcie_dump_pointers(struct iwl_trans *trans,
cpu_to_le32(iwl_read_prph(trans, wrap_cnt));
fw_mon_data->fw_mon_base_ptr =
cpu_to_le32(iwl_read_prph(trans, base));
- if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ if (trans->cfg->trans.device_family >= IWL_DEVICE_FAMILY_AX210) {
fw_mon_data->fw_mon_base_high_ptr =
cpu_to_le32(iwl_read_prph(trans, base_high));
write_ptr_val &= DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK;
@@ -3034,8 +3041,8 @@ iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
if (trans->dbg.dest_tlv ||
(trans->dbg.num_blocks &&
- (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 ||
- trans->cfg->device_family >= IWL_DEVICE_FAMILY_AX210))) {
+ (trans->cfg->trans.device_family == IWL_DEVICE_FAMILY_7000 ||
+ trans->cfg->trans.device_family >= IWL_DEVICE_FAMILY_AX210))) {
struct iwl_fw_error_dump_fw_mon *fw_mon_data;
(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);
@@ -3118,7 +3125,7 @@ static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, u32 *len)
trans->dbg.dest_tlv->end_shift;
/* Make "end" point to the actual end */
- if (trans->cfg->device_family >=
+ if (trans->cfg->trans.device_family >=
IWL_DEVICE_FAMILY_8000 ||
trans->dbg.dest_tlv->monitor_mode == MARBH_MODE)
end += (1 << trans->dbg.dest_tlv->end_shift);
@@ -3144,7 +3151,7 @@ static struct iwl_trans_dump_data
u32 len, num_rbs = 0, monitor_len = 0;
int i, ptr;
bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) &&
- !trans->cfg->mq_rx_supported &&
+ !trans->cfg->trans.mq_rx_supported &&
dump_mask & BIT(IWL_FW_ERROR_DUMP_RB);
if (!dump_mask)
@@ -3169,7 +3176,7 @@ static struct iwl_trans_dump_data
/* FH registers */
if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) {
- if (trans->cfg->gen2)
+ if (trans->cfg->trans.gen2)
len += sizeof(*data) +
(iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2) -
iwl_umac_prph(trans, FH_MEM_LOWER_BOUND_GEN2));
@@ -3193,7 +3200,7 @@ static struct iwl_trans_dump_data
}
/* Paged memory for gen2 HW */
- if (trans->cfg->gen2 && dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING))
+ if (trans->cfg->trans.gen2 && dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING))
for (i = 0; i < trans->init_dram.paging_cnt; i++)
len += sizeof(*data) +
sizeof(struct iwl_fw_error_dump_paging) +
@@ -3248,7 +3255,8 @@ static struct iwl_trans_dump_data
len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs);
/* Paged memory for gen2 HW */
- if (trans->cfg->gen2 && dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) {
+ if (trans->cfg->trans.gen2 &&
+ dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) {
for (i = 0; i < trans->init_dram.paging_cnt; i++) {
struct iwl_fw_error_dump_paging *paging;
u32 page_len = trans->init_dram.paging[i].size;
@@ -3374,7 +3382,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
if (ret)
return ERR_PTR(ret);
- if (cfg->gen2)
+ if (cfg->trans.gen2)
trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
&pdev->dev, cfg, &trans_ops_pcie_gen2);
else
@@ -3398,7 +3406,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
}
trans_pcie->debug_rfkill = -1;
- if (!cfg->base_params->pcie_l1_allowed) {
+ if (!cfg->trans.base_params->pcie_l1_allowed) {
/*
* W/A - seems to solve weird behavior. We need to remove this
* if we don't want to stay in L1 all the time. This wastes a
@@ -3411,7 +3419,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
trans_pcie->def_rx_queue = 0;
- if (cfg->use_tfh) {
+ if (cfg->trans.use_tfh) {
addr_size = 64;
trans_pcie->max_tbs = IWL_TFH_NUM_TBS;
trans_pcie->tfd_size = sizeof(struct iwl_tfh_tfd);
@@ -3473,7 +3481,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
* "dash" value). To keep hw_rev backwards compatible - we'll store it
* in the old format.
*/
- if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000) {
+ if (trans->cfg->trans.device_family >= IWL_DEVICE_FAMILY_8000) {
unsigned long flags;
trans->hw_rev = (trans->hw_rev & 0xfff0) |
@@ -3489,7 +3497,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
* in-order to recognize C step driver should read chip version
* id located at the AUX bus MISC address space.
*/
- ret = iwl_finish_nic_init(trans);
+ ret = iwl_finish_nic_init(trans, &trans->cfg->trans);
if (ret)
goto out_no_pci;
@@ -3516,7 +3524,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
#if IS_ENABLED(CONFIG_IWLMVM)
trans->hw_rf_id = iwl_read32(trans, CSR_HW_RF_ID);
- if (cfg == &iwlax210_2ax_cfg_so_hr_a0) {
+ if (trans->cfg == &iwlax210_2ax_cfg_so_hr_a0) {
if (trans->hw_rev == CSR_HW_REV_TYPE_TY) {
trans->cfg = &iwlax210_2ax_cfg_ty_gf_a0;
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 70a15364190c..3fb688826302 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -113,7 +113,7 @@ void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
*/
num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
- if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+ if (trans->cfg->trans.device_family >= IWL_DEVICE_FAMILY_22560) {
/* Starting from 22560, the HW expects bytes */
WARN_ON(trans_pcie->bc_table_dword);
WARN_ON(len > 0x3FFF);
@@ -547,7 +547,7 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
memset(tfd, 0, sizeof(*tfd));
- if (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560)
+ if (trans->cfg->trans.device_family < IWL_DEVICE_FAMILY_22560)
len = sizeof(struct iwl_tx_cmd_gen2);
else
len = sizeof(struct iwl_tx_cmd_gen3);
@@ -629,7 +629,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
return -1;
}
- if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+ if (trans->cfg->trans.device_family >= IWL_DEVICE_FAMILY_22560) {
struct iwl_tx_cmd_gen3 *tx_cmd_gen3 =
(void *)dev_cmd->payload;
@@ -1129,7 +1129,7 @@ int iwl_trans_pcie_dyn_txq_alloc_dma(struct iwl_trans *trans,
if (!txq)
return -ENOMEM;
ret = iwl_pcie_alloc_dma_ptr(trans, &txq->bc_tbl,
- (trans->cfg->device_family >=
+ (trans->cfg->trans.device_family >=
IWL_DEVICE_FAMILY_22560) ?
sizeof(struct iwl_gen3_bc_tbl) :
sizeof(struct iwlagn_scd_bc_tbl));
@@ -1193,7 +1193,7 @@ int iwl_trans_pcie_txq_alloc_response(struct iwl_trans *trans,
txq->id = qid;
trans_pcie->txq[qid] = txq;
- wr_ptr &= (trans->cfg->base_params->max_tfd_queue_size - 1);
+ wr_ptr &= (trans->cfg->trans.base_params->max_tfd_queue_size - 1);
/* Place first TFD at index corresponding to start sequence number */
txq->read_ptr = wr_ptr;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 8b7b918e71ad..774c8bfe8450 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -113,17 +113,17 @@ int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q)
* If q->n_window is smaller than max_tfd_queue_size, there is no need
* to reserve any queue entries for this purpose.
*/
- if (q->n_window < trans->cfg->base_params->max_tfd_queue_size)
+ if (q->n_window < trans->cfg->trans.base_params->max_tfd_queue_size)
max = q->n_window;
else
- max = trans->cfg->base_params->max_tfd_queue_size - 1;
+ max = trans->cfg->trans.base_params->max_tfd_queue_size - 1;
/*
* max_tfd_queue_size is a power of 2, so the following is equivalent to
* modulo by max_tfd_queue_size and is well defined.
*/
used = (q->write_ptr - q->read_ptr) &
- (trans->cfg->base_params->max_tfd_queue_size - 1);
+ (trans->cfg->trans.base_params->max_tfd_queue_size - 1);
if (WARN_ON(used > max))
return 0;
@@ -292,7 +292,7 @@ static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
* 2. NIC is woken up for CMD regardless of shadow outside this function
* 3. there is a chance that the NIC is asleep
*/
- if (!trans->cfg->base_params->shadow_reg_enable &&
+ if (!trans->cfg->trans.base_params->shadow_reg_enable &&
txq_id != trans_pcie->cmd_queue &&
test_bit(STATUS_TPOWER_PMI, &trans->status)) {
/*
@@ -306,7 +306,7 @@ static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n",
txq_id, reg);
iwl_set_bit(trans, CSR_GP_CNTRL,
- BIT(trans->cfg->csr->flag_mac_access_req));
+ BIT(trans->cfg->trans.csr->flag_mac_access_req));
txq->need_update = true;
return;
}
@@ -327,7 +327,7 @@ void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int i;
- for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
+ for (i = 0; i < trans->cfg->trans.base_params->num_of_queues; i++) {
struct iwl_txq *txq = trans_pcie->txq[i];
if (!test_bit(i, trans_pcie->queue_used))
@@ -346,7 +346,7 @@ static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_trans *trans,
void *_tfd, u8 idx)
{
- if (trans->cfg->use_tfh) {
+ if (trans->cfg->trans.use_tfh) {
struct iwl_tfh_tfd *tfd = _tfd;
struct iwl_tfh_tb *tb = &tfd->tbs[idx];
@@ -389,7 +389,7 @@ static inline void iwl_pcie_tfd_set_tb(struct iwl_trans *trans, void *tfd,
static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_trans *trans, void *_tfd)
{
- if (trans->cfg->use_tfh) {
+ if (trans->cfg->trans.use_tfh) {
struct iwl_tfh_tfd *tfd = _tfd;
return le16_to_cpu(tfd->num_tbs) & 0x1f;
@@ -436,7 +436,7 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
meta->tbs = 0;
- if (trans->cfg->use_tfh) {
+ if (trans->cfg->trans.use_tfh) {
struct iwl_tfh_tfd *tfd_fh = (void *)tfd;
tfd_fh->num_tbs = 0;
@@ -524,14 +524,14 @@ int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
size_t tfd_sz = trans_pcie->tfd_size *
- trans->cfg->base_params->max_tfd_queue_size;
+ trans->cfg->trans.base_params->max_tfd_queue_size;
size_t tb0_buf_sz;
int i;
if (WARN_ON(txq->entries || txq->tfds))
return -EINVAL;
- if (trans->cfg->use_tfh)
+ if (trans->cfg->trans.use_tfh)
tfd_sz = trans_pcie->tfd_size * slots_num;
timer_setup(&txq->stuck_timer, iwl_pcie_txq_stuck_timer, 0);
@@ -590,7 +590,8 @@ int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
int slots_num, bool cmd_queue)
{
int ret;
- u32 tfd_queue_max_size = trans->cfg->base_params->max_tfd_queue_size;
+ u32 tfd_queue_max_size =
+ trans->cfg->trans.base_params->max_tfd_queue_size;
txq->need_update = false;
@@ -638,14 +639,14 @@ static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
lockdep_assert_held(&trans_pcie->reg_lock);
- if (!trans->cfg->base_params->apmg_wake_up_wa)
+ if (!trans->cfg->trans.base_params->apmg_wake_up_wa)
return;
if (WARN_ON(!trans_pcie->cmd_hold_nic_awake))
return;
trans_pcie->cmd_hold_nic_awake = false;
__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->cfg->csr->flag_mac_access_req));
+ BIT(trans->cfg->trans.csr->flag_mac_access_req));
}
/*
@@ -725,7 +726,7 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
if (txq->tfds) {
dma_free_coherent(dev,
trans_pcie->tfd_size *
- trans->cfg->base_params->max_tfd_queue_size,
+ trans->cfg->trans.base_params->max_tfd_queue_size,
txq->tfds, txq->dma_addr);
txq->dma_addr = 0;
txq->tfds = NULL;
@@ -747,7 +748,7 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int nq = trans->cfg->base_params->num_of_queues;
+ int nq = trans->cfg->trans.base_params->num_of_queues;
int chan;
u32 reg_val;
int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) -
@@ -774,7 +775,7 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
/* The chain extension of the SCD doesn't work well. This feature is
* enabled by default by the HW, so we need to disable it manually.
*/
- if (trans->cfg->base_params->scd_chain_ext_wa)
+ if (trans->cfg->trans.base_params->scd_chain_ext_wa)
iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue,
@@ -796,7 +797,7 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
/* Enable L1-Active */
- if (trans->cfg->device_family < IWL_DEVICE_FAMILY_8000)
+ if (trans->cfg->trans.device_family < IWL_DEVICE_FAMILY_8000)
iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
}
@@ -810,13 +811,13 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
* we should never get here in gen2 trans mode return early to avoid
* having invalid accesses
*/
- if (WARN_ON_ONCE(trans->cfg->gen2))
+ if (WARN_ON_ONCE(trans->cfg->trans.gen2))
return;
- for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
+ for (txq_id = 0; txq_id < trans->cfg->trans.base_params->num_of_queues;
txq_id++) {
struct iwl_txq *txq = trans_pcie->txq[txq_id];
- if (trans->cfg->use_tfh)
+ if (trans->cfg->trans.use_tfh)
iwl_write_direct64(trans,
FH_MEM_CBBC_QUEUE(trans, txq_id),
txq->dma_addr);
@@ -899,7 +900,7 @@ int iwl_pcie_tx_stop(struct iwl_trans *trans)
return 0;
/* Unmap DMA from host system and free skb's */
- for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
+ for (txq_id = 0; txq_id < trans->cfg->trans.base_params->num_of_queues;
txq_id++)
iwl_pcie_txq_unmap(trans, txq_id);
@@ -921,7 +922,7 @@ void iwl_pcie_tx_free(struct iwl_trans *trans)
/* Tx queues */
if (trans_pcie->txq_memory) {
for (txq_id = 0;
- txq_id < trans->cfg->base_params->num_of_queues;
+ txq_id < trans->cfg->trans.base_params->num_of_queues;
txq_id++) {
iwl_pcie_txq_free(trans, txq_id);
trans_pcie->txq[txq_id] = NULL;
@@ -945,9 +946,10 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
int ret;
int txq_id, slots_num;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- u16 bc_tbls_size = trans->cfg->base_params->num_of_queues;
+ u16 bc_tbls_size = trans->cfg->trans.base_params->num_of_queues;
- bc_tbls_size *= (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ?
+ bc_tbls_size *= (trans->cfg->trans.device_family >=
+ IWL_DEVICE_FAMILY_22560) ?
sizeof(struct iwl_gen3_bc_tbl) :
sizeof(struct iwlagn_scd_bc_tbl);
@@ -972,8 +974,9 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
goto error;
}
- trans_pcie->txq_memory = kcalloc(trans->cfg->base_params->num_of_queues,
- sizeof(struct iwl_txq), GFP_KERNEL);
+ trans_pcie->txq_memory =
+ kcalloc(trans->cfg->trans.base_params->num_of_queues,
+ sizeof(struct iwl_txq), GFP_KERNEL);
if (!trans_pcie->txq_memory) {
IWL_ERR(trans, "Not enough memory for txq\n");
ret = -ENOMEM;
@@ -981,7 +984,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
}
/* Alloc and init all Tx queues, including the command queue (#4/#9) */
- for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
+ for (txq_id = 0; txq_id < trans->cfg->trans.base_params->num_of_queues;
txq_id++) {
bool cmd_queue = (txq_id == trans_pcie->cmd_queue);
@@ -1035,7 +1038,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
spin_unlock(&trans_pcie->irq_lock);
/* Alloc and init all Tx queues, including the command queue (#4/#9) */
- for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
+ for (txq_id = 0; txq_id < trans->cfg->trans.base_params->num_of_queues;
txq_id++) {
bool cmd_queue = (txq_id == trans_pcie->cmd_queue);
@@ -1063,7 +1066,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
}
iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE);
- if (trans->cfg->base_params->num_of_queues > 20)
+ if (trans->cfg->trans.base_params->num_of_queues > 20)
iwl_set_bits_prph(trans, SCD_GP_CTRL,
SCD_GP_CTRL_ENABLE_31_QUEUES);
@@ -1135,7 +1138,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
IWL_ERR(trans,
"%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
__func__, txq_id, last_to_free,
- trans->cfg->base_params->max_tfd_queue_size,
+ trans->cfg->trans.base_params->max_tfd_queue_size,
txq->write_ptr, txq->read_ptr);
goto out;
}
@@ -1158,7 +1161,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
txq->entries[read_ptr].skb = NULL;
- if (!trans->cfg->use_tfh)
+ if (!trans->cfg->trans.use_tfh)
iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq);
iwl_pcie_txq_free_tfd(trans, txq);
@@ -1250,19 +1253,19 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
* returned. This needs to be done only on NICs that have
* apmg_wake_up_wa set.
*/
- if (cfg->base_params->apmg_wake_up_wa &&
+ if (cfg->trans.base_params->apmg_wake_up_wa &&
!trans_pcie->cmd_hold_nic_awake) {
__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
- BIT(cfg->csr->flag_mac_access_req));
+ BIT(cfg->trans.csr->flag_mac_access_req));
ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
- BIT(cfg->csr->flag_val_mac_access_en),
- (BIT(cfg->csr->flag_mac_clock_ready) |
+ BIT(cfg->trans.csr->flag_val_mac_access_en),
+ (BIT(cfg->trans.csr->flag_mac_clock_ready) |
CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP),
15000);
if (ret < 0) {
__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
- BIT(cfg->csr->flag_mac_access_req));
+ BIT(cfg->trans.csr->flag_mac_access_req));
IWL_ERR(trans, "Failed to wake NIC for hcmd\n");
return -EIO;
}
@@ -1292,12 +1295,12 @@ void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
idx = iwl_pcie_get_cmd_index(txq, idx);
r = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
- if (idx >= trans->cfg->base_params->max_tfd_queue_size ||
+ if (idx >= trans->cfg->trans.base_params->max_tfd_queue_size ||
(!iwl_queue_used(txq, idx))) {
WARN_ONCE(test_bit(txq_id, trans_pcie->queue_used),
"%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
__func__, txq_id, idx,
- trans->cfg->base_params->max_tfd_queue_size,
+ trans->cfg->trans.base_params->max_tfd_queue_size,
txq->write_ptr, txq->read_ptr);
return;
}
@@ -1411,7 +1414,7 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
* this sad hardware issue.
* This bug has been fixed on devices 9000 and up.
*/
- scd_bug = !trans->cfg->mq_rx_supported &&
+ scd_bug = !trans->cfg->trans.mq_rx_supported &&
!((ssn - txq->write_ptr) & 0x3f) &&
(ssn != txq->write_ptr);
if (scd_bug)