@@ -319,16 +319,17 @@ static void fcoe_ctlr_announce(struct fcoe_ctlr *fip)
{
struct fcoe_fcf *sel;
struct fcoe_fcf *fcf;
+ unsigned long flags;
mutex_lock(&fip->ctlr_mutex);
- spin_lock_bh(&fip->ctlr_lock);
+ spin_lock_irqsave(&fip->ctlr_lock, flags);
kfree_skb(fip->flogi_req);
fip->flogi_req = NULL;
list_for_each_entry(fcf, &fip->fcfs, list)
fcf->flogi_sent = 0;
- spin_unlock_bh(&fip->ctlr_lock);
+ spin_unlock_irqrestore(&fip->ctlr_lock, flags);
sel = fip->sel_fcf;
if (sel && ether_addr_equal(sel->fcf_mac, fip->dest_addr))
@@ -699,6 +700,7 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
{
struct fc_frame *fp;
struct fc_frame_header *fh;
+ unsigned long flags;
u16 old_xid;
u8 op;
u8 mac[ETH_ALEN];
@@ -732,11 +734,11 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
op = FIP_DT_FLOGI;
if (fip->mode == FIP_MODE_VN2VN)
break;
- spin_lock_bh(&fip->ctlr_lock);
+ spin_lock_irqsave(&fip->ctlr_lock, flags);
kfree_skb(fip->flogi_req);
fip->flogi_req = skb;
fip->flogi_req_send = 1;
- spin_unlock_bh(&fip->ctlr_lock);
+ spin_unlock_irqrestore(&fip->ctlr_lock, flags);
schedule_work(&fip->timer_work);
return -EINPROGRESS;
case ELS_FDISC:
@@ -1705,10 +1707,11 @@ static int fcoe_ctlr_flogi_send_locked(struct fcoe_ctlr *fip)
static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip)
{
struct fcoe_fcf *fcf;
+ unsigned long flags;
int error;
mutex_lock(&fip->ctlr_mutex);
- spin_lock_bh(&fip->ctlr_lock);
+ spin_lock_irqsave(&fip->ctlr_lock, flags);
LIBFCOE_FIP_DBG(fip, "re-sending FLOGI - reselect\n");
fcf = fcoe_ctlr_select(fip);
if (!fcf || fcf->flogi_sent) {
@@ -1719,7 +1722,7 @@ static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip)
fcoe_ctlr_solicit(fip, NULL);
error = fcoe_ctlr_flogi_send_locked(fip);
}
- spin_unlock_bh(&fip->ctlr_lock);
+ spin_unlock_irqrestore(&fip->ctlr_lock, flags);
mutex_unlock(&fip->ctlr_mutex);
return error;
}
@@ -1736,8 +1739,9 @@ static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip)
static void fcoe_ctlr_flogi_send(struct fcoe_ctlr *fip)
{
struct fcoe_fcf *fcf;
+ unsigned long flags;
- spin_lock_bh(&fip->ctlr_lock);
+ spin_lock_irqsave(&fip->ctlr_lock, flags);
fcf = fip->sel_fcf;
if (!fcf || !fip->flogi_req_send)
goto unlock;
@@ -1764,7 +1768,7 @@ static void fcoe_ctlr_flogi_send(struct fcoe_ctlr *fip)
} else /* XXX */
LIBFCOE_FIP_DBG(fip, "No FCF selected - defer send\n");
unlock:
- spin_unlock_bh(&fip->ctlr_lock);
+ spin_unlock_irqrestore(&fip->ctlr_lock, flags);
}
/**
There is a long call chain that &fip->ctlr_lock is acquired by isr fnic_isr_msix_wq_copy() under hard irq context. Thus other process context code acquiring the lock should disable irq, otherwise deadlock could happen if the irq preempt the execution while the lock is held in process context on the same CPU. [ISR] fnic_isr_msix_wq_copy() -> fnic_wq_copy_cmpl_handler() -> fnic_fcpio_cmpl_handler() -> fnic_fcpio_flogi_reg_cmpl_handler() -> fnic_flush_tx() -> fnic_send_frame() -> fcoe_ctlr_els_send() -> spin_lock_bh(&fip->ctlr_lock) [Process Context] 1. fcoe_ctlr_timer_work() -> fcoe_ctlr_flogi_send() -> spin_lock_bh(&fip->ctlr_lock) 2. fcoe_ctlr_recv_work() -> fcoe_ctlr_recv_handler() -> fcoe_ctlr_recv_els() -> fcoe_ctlr_announce() -> spin_lock_bh(&fip->ctlr_lock) 3. fcoe_ctlr_recv_work() -> fcoe_ctlr_recv_handler() -> fcoe_ctlr_recv_els() -> fcoe_ctlr_flogi_retry() -> spin_lock_bh(&fip->ctlr_lock) 4. -> fcoe_xmit() -> fcoe_ctlr_els_send() -> spin_lock_bh(&fip->ctlr_lock) spin_lock_bh() is not enough since fnic_isr_msix_wq_copy() is a hardirq. These flaws were found by an experimental static analysis tool I am developing for irq-related deadlock. The patch fix the potential deadlocks by spin_lock_irqsave() to disable hard irq. Signed-off-by: Chengfeng Ye <dg573847474@gmail.com> --- drivers/scsi/fcoe/fcoe_ctlr.c | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-)