diff options
Diffstat (limited to 'drivers/dma/mv_xor.c')
| -rw-r--r-- | drivers/dma/mv_xor.c | 32 |
1 files changed, 24 insertions, 8 deletions
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index bcd3b623ac6c..5e8386296046 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@ -414,7 +414,7 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx) if (!mv_chan_is_busy(mv_chan)) { u32 current_desc = mv_chan_get_current_desc(mv_chan); /* - * and the curren desc is the end of the chain before + * and the current desc is the end of the chain before * the append, then we need to start the channel */ if (current_desc == old_chain_tail->async_tx.phys) @@ -1013,7 +1013,7 @@ static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan) dma_async_device_unregister(&mv_chan->dmadev); - dma_free_coherent(dev, MV_XOR_POOL_SIZE, + dma_free_wc(dev, MV_XOR_POOL_SIZE, mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool); dma_unmap_single(dev, mv_chan->dummy_src_addr, MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE); @@ -1061,8 +1061,16 @@ mv_xor_channel_add(struct mv_xor_device *xordev, */ mv_chan->dummy_src_addr = dma_map_single(dma_dev->dev, mv_chan->dummy_src, MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE); + if (dma_mapping_error(dma_dev->dev, mv_chan->dummy_src_addr)) + return ERR_PTR(-ENOMEM); + mv_chan->dummy_dst_addr = dma_map_single(dma_dev->dev, mv_chan->dummy_dst, MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE); + if (dma_mapping_error(dma_dev->dev, mv_chan->dummy_dst_addr)) { + ret = -ENOMEM; + goto err_unmap_src; + } + /* allocate coherent memory for hardware descriptors * note: writecombine gives slightly better performance, but @@ -1071,10 +1079,12 @@ mv_xor_channel_add(struct mv_xor_device *xordev, mv_chan->dma_desc_pool_virt = dma_alloc_wc(&pdev->dev, MV_XOR_POOL_SIZE, &mv_chan->dma_desc_pool, GFP_KERNEL); - if (!mv_chan->dma_desc_pool_virt) - return ERR_PTR(-ENOMEM); + if (!mv_chan->dma_desc_pool_virt) { + ret = -ENOMEM; + goto err_unmap_dst; + } - /* discover transaction capabilites from the platform data */ + /* discover transaction capabilities from the platform data */ dma_dev->cap_mask = cap_mask; INIT_LIST_HEAD(&dma_dev->channels); @@ -1153,8 +1163,15 @@ mv_xor_channel_add(struct mv_xor_device *xordev, err_free_irq: free_irq(mv_chan->irq, mv_chan); err_free_dma: - dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE, + dma_free_wc(&pdev->dev, MV_XOR_POOL_SIZE, mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool); +err_unmap_dst: + dma_unmap_single(dma_dev->dev, mv_chan->dummy_dst_addr, + MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE); +err_unmap_src: + dma_unmap_single(dma_dev->dev, mv_chan->dummy_src_addr, + MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE); + return ERR_PTR(ret); } @@ -1369,10 +1386,9 @@ static int mv_xor_probe(struct platform_device *pdev) return 0; if (pdev->dev.of_node) { - struct device_node *np; int i = 0; - for_each_child_of_node(pdev->dev.of_node, np) { + for_each_child_of_node_scoped(pdev->dev.of_node, np) { struct mv_xor_chan *chan; dma_cap_mask_t cap_mask; int irq; |
