summaryrefslogtreecommitdiff
path: root/drivers/scsi/bnx2fc
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2017-07-24 12:52:56 +0200
committerMartin K. Petersen <martin.petersen@oracle.com>2017-07-26 21:51:24 -0400
commit8addebc14a322fa8ca67cd57c6038069acde8ddc (patch)
tree3fb5ee2628099fb5a7159e61d1acbcff180002ed /drivers/scsi/bnx2fc
parent2c67521821acd900d7508f37e49d0b494011106d (diff)
scsi: bnx2fc: Plug CPU hotplug race
bnx2fc_process_new_cqes() has protection against CPU hotplug, which relies on the per cpu thread pointer. This protection is racy because it happens only partially with the per cpu fp_work_lock held. If the CPU is unplugged after the lock is dropped, the wakeup code can dereference a NULL pointer or access freed and potentially reused memory. Restructure the code so the thread check and wakeup happens with the fp_work_lock held. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Chad Dupuis <chad.dupuis@cavium.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Diffstat (limited to 'drivers/scsi/bnx2fc')
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c45
1 files changed, 23 insertions, 22 deletions
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index 913c750205ce..26de61d65a4d 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -1008,6 +1008,28 @@ static struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe)
return work;
}
+/* Pending work request completion */
+static void bnx2fc_pending_work(struct bnx2fc_rport *tgt, unsigned int wqe)
+{
+ unsigned int cpu = wqe % num_possible_cpus();
+ struct bnx2fc_percpu_s *fps;
+ struct bnx2fc_work *work;
+
+ fps = &per_cpu(bnx2fc_percpu, cpu);
+ spin_lock_bh(&fps->fp_work_lock);
+ if (fps->iothread) {
+ work = bnx2fc_alloc_work(tgt, wqe);
+ if (work) {
+ list_add_tail(&work->list, &fps->work_list);
+ wake_up_process(fps->iothread);
+ spin_unlock_bh(&fps->fp_work_lock);
+ return;
+ }
+ }
+ spin_unlock_bh(&fps->fp_work_lock);
+ bnx2fc_process_cq_compl(tgt, wqe);
+}
+
int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
{
struct fcoe_cqe *cq;
@@ -1042,28 +1064,7 @@ int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
/* Unsolicited event notification */
bnx2fc_process_unsol_compl(tgt, wqe);
} else {
- /* Pending work request completion */
- struct bnx2fc_work *work = NULL;
- struct bnx2fc_percpu_s *fps = NULL;
- unsigned int cpu = wqe % num_possible_cpus();
-
- fps = &per_cpu(bnx2fc_percpu, cpu);
- spin_lock_bh(&fps->fp_work_lock);
- if (unlikely(!fps->iothread))
- goto unlock;
-
- work = bnx2fc_alloc_work(tgt, wqe);
- if (work)
- list_add_tail(&work->list,
- &fps->work_list);
-unlock:
- spin_unlock_bh(&fps->fp_work_lock);
-
- /* Pending work request completion */
- if (fps->iothread && work)
- wake_up_process(fps->iothread);
- else
- bnx2fc_process_cq_compl(tgt, wqe);
+ bnx2fc_pending_work(tgt, wqe);
num_free_sqes++;
}
cqe++;