summaryrefslogtreecommitdiff
path: root/drivers/dma
diff options
context:
space:
mode:
authorAnup Patel <anup.patel@broadcom.com>2017-08-22 15:26:51 +0530
committerVinod Koul <vinod.koul@intel.com>2017-08-28 16:44:24 +0530
commite4274cfa422b50c5b74434f4f23d9163626a01f4 (patch)
treead1c4c74f1e71acd17e3d9b00c95f4a89e85a425 /drivers/dma
parente897091ab9e022a1adb98ba56dfc5a8d9600f6c4 (diff)
dmaengine: bcm-sba-raid: Reduce locking context in sba_alloc_request()
We don't require to hold "sba->reqs_lock" for long-time in sba_alloc_request() because lock protection is not required when initializing members of "struct sba_request". Signed-off-by: Anup Patel <anup.patel@broadcom.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/bcm-sba-raid.c22
1 files changed, 11 insertions, 11 deletions
diff --git a/drivers/dma/bcm-sba-raid.c b/drivers/dma/bcm-sba-raid.c
index d1d76641709b..fb13ec552900 100644
--- a/drivers/dma/bcm-sba-raid.c
+++ b/drivers/dma/bcm-sba-raid.c
@@ -207,24 +207,24 @@ static struct sba_request *sba_alloc_request(struct sba_device *sba)
struct sba_request *req = NULL;
spin_lock_irqsave(&sba->reqs_lock, flags);
-
req = list_first_entry_or_null(&sba->reqs_free_list,
struct sba_request, node);
if (req) {
list_move_tail(&req->node, &sba->reqs_alloc_list);
- req->state = SBA_REQUEST_STATE_ALLOCED;
- req->fence = false;
- req->first = req;
- INIT_LIST_HEAD(&req->next);
- req->next_count = 1;
- atomic_set(&req->next_pending_count, 1);
-
sba->reqs_free_count--;
-
- dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan);
}
-
spin_unlock_irqrestore(&sba->reqs_lock, flags);
+ if (!req)
+ return NULL;
+
+ req->state = SBA_REQUEST_STATE_ALLOCED;
+ req->fence = false;
+ req->first = req;
+ INIT_LIST_HEAD(&req->next);
+ req->next_count = 1;
+ atomic_set(&req->next_pending_count, 1);
+
+ dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan);
return req;
}