diff options
Diffstat (limited to 'drivers/infiniband/hw/hfi1/sdma.c')
| -rw-r--r-- | drivers/infiniband/hw/hfi1/sdma.c | 65 |
1 files changed, 25 insertions, 40 deletions
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c index f07d328689d3..5cfa4f8fbf3d 100644 --- a/drivers/infiniband/hw/hfi1/sdma.c +++ b/drivers/infiniband/hw/hfi1/sdma.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright(c) 2015 - 2018 Intel Corporation. */ @@ -467,7 +467,8 @@ static void sdma_err_progress_check_schedule(struct sdma_engine *sde) static void sdma_err_progress_check(struct timer_list *t) { unsigned index; - struct sdma_engine *sde = from_timer(sde, t, err_progress_check_timer); + struct sdma_engine *sde = timer_container_of(sde, t, + err_progress_check_timer); dd_dev_err(sde->dd, "SDE progress check event\n"); for (index = 0; index < sde->dd->num_sdma; index++) { @@ -989,7 +990,7 @@ ssize_t sdma_set_cpu_to_sde_map(struct sdma_engine *sde, const char *buf, } /* Clean up old mappings */ - for_each_cpu(cpu, cpu_online_mask) { + for_each_online_cpu(cpu) { struct sdma_rht_node *rht_node; /* Don't cleanup sdes that are set in the new mask */ @@ -1288,11 +1289,13 @@ void sdma_clean(struct hfi1_devdata *dd, size_t num_engines) kvfree(sde->tx_ring); sde->tx_ring = NULL; } - spin_lock_irq(&dd->sde_map_lock); - sdma_map_free(rcu_access_pointer(dd->sdma_map)); - RCU_INIT_POINTER(dd->sdma_map, NULL); - spin_unlock_irq(&dd->sde_map_lock); - synchronize_rcu(); + if (rcu_access_pointer(dd->sdma_map)) { + spin_lock_irq(&dd->sde_map_lock); + sdma_map_free(rcu_access_pointer(dd->sdma_map)); + RCU_INIT_POINTER(dd->sdma_map, NULL); + spin_unlock_irq(&dd->sde_map_lock); + synchronize_rcu(); + } kfree(dd->per_sdma); dd->per_sdma = NULL; @@ -1519,24 +1522,6 @@ void sdma_all_running(struct hfi1_devdata *dd) } /** - * sdma_all_idle() - called when the link goes down - * @dd: hfi1_devdata - * - * This routine moves all engines to the idle state. - */ -void sdma_all_idle(struct hfi1_devdata *dd) -{ - struct sdma_engine *sde; - unsigned int i; - - /* idle all engines */ - for (i = 0; i < dd->num_sdma; ++i) { - sde = &dd->per_sdma[i]; - sdma_process_event(sde, sdma_event_e70_go_idle); - } -} - -/** * sdma_start() - called to kick off state processing for all engines * @dd: hfi1_devdata * @@ -1573,7 +1558,7 @@ void sdma_exit(struct hfi1_devdata *dd) sde->this_idx); sdma_process_event(sde, sdma_event_e00_go_hw_down); - del_timer_sync(&sde->err_progress_check_timer); + timer_delete_sync(&sde->err_progress_check_timer); /* * This waits for the state machine to exit so it is not @@ -1593,20 +1578,18 @@ static inline void sdma_unmap_desc( { switch (sdma_mapping_type(descp)) { case SDMA_MAP_SINGLE: - dma_unmap_single( - &dd->pcidev->dev, - sdma_mapping_addr(descp), - sdma_mapping_len(descp), - DMA_TO_DEVICE); + dma_unmap_single(&dd->pcidev->dev, sdma_mapping_addr(descp), + sdma_mapping_len(descp), DMA_TO_DEVICE); break; case SDMA_MAP_PAGE: - dma_unmap_page( - &dd->pcidev->dev, - sdma_mapping_addr(descp), - sdma_mapping_len(descp), - DMA_TO_DEVICE); + dma_unmap_page(&dd->pcidev->dev, sdma_mapping_addr(descp), + sdma_mapping_len(descp), DMA_TO_DEVICE); break; } + + if (descp->pinning_ctx && descp->ctx_put) + descp->ctx_put(descp->pinning_ctx); + descp->pinning_ctx = NULL; } /* @@ -3127,7 +3110,7 @@ int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx, /* Add descriptor for coalesce buffer */ tx->desc_limit = MAX_DESC; return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, tx, - addr, tx->tlen); + addr, tx->tlen, NULL, NULL, NULL); } return 1; @@ -3158,7 +3141,6 @@ int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx) { int rval = 0; - tx->num_desc++; if ((unlikely(tx->num_desc == tx->desc_limit))) { rval = _extend_sdma_tx_descs(dd, tx); if (rval) { @@ -3166,12 +3148,15 @@ int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx) return rval; } } + /* finish the one just added */ make_tx_sdma_desc( tx, SDMA_MAP_NONE, dd->sdma_pad_phys, - sizeof(u32) - (tx->packet_len & (sizeof(u32) - 1))); + sizeof(u32) - (tx->packet_len & (sizeof(u32) - 1)), + NULL, NULL, NULL); + tx->num_desc++; _sdma_close_tx(dd, tx); return rval; } |
