@@ -35,6 +35,8 @@
#include <uapi/scsi/fc/fc_els.h>
+extern struct workqueue_struct *qla2xxx_wq;
+
/* Big endian Fibre Channel S_ID (source ID) or D_ID (destination ID). */
typedef struct {
uint8_t domain;
@@ -3947,7 +3947,7 @@ static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp,
ql_dbg(ql_dbg_disc, vha, 0xffff,
"%s: schedule\n", __func__);
vha->scan.scan_flags |= SF_QUEUED;
- schedule_delayed_work(&vha->scan.scan_work, 5);
+ queue_delayed_work(qla2xxx_wq, &vha->scan.scan_work, 5);
}
spin_unlock_irqrestore(&vha->work_lock, flags);
@@ -4113,7 +4113,7 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
"%s: Scan scheduled.\n", __func__);
vha->scan.scan_flags |= SF_QUEUED;
- schedule_delayed_work(&vha->scan.scan_work, 5);
+ queue_delayed_work(qla2xxx_wq, &vha->scan.scan_work, 5);
}
spin_unlock_irqrestore(&vha->work_lock, flags);
@@ -1886,7 +1886,7 @@ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea)
if (vha->scan.scan_flags == 0) {
ql_dbg(ql_dbg_disc, vha, 0xffff, "%s: schedule\n", __func__);
vha->scan.scan_flags |= SF_QUEUED;
- schedule_delayed_work(&vha->scan.scan_work, 5);
+ queue_delayed_work(qla2xxx_wq, &vha->scan.scan_work, 5);
}
spin_unlock_irqrestore(&vha->work_lock, flags);
}
@@ -230,7 +230,7 @@ static void qla_nvme_sp_ls_done(srb_t *sp, int res)
priv->comp_status = res;
INIT_WORK(&priv->ls_work, qla_nvme_ls_complete);
- schedule_work(&priv->ls_work);
+ queue_work(qla2xxx_wq, &priv->ls_work);
}
/* it assumed that QPair lock is held. */
@@ -324,7 +324,7 @@ static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport,
spin_unlock_irqrestore(&priv->cmd_lock, flags);
INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
- schedule_work(&priv->abort_work);
+ queue_work(qla2xxx_wq, &priv->abort_work);
}
static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
@@ -411,7 +411,7 @@ static void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport,
spin_unlock_irqrestore(&priv->cmd_lock, flags);
INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
- schedule_work(&priv->abort_work);
+ queue_work(qla2xxx_wq, &priv->abort_work);
}
static inline int qla2x00_start_nvme_mq(srb_t *sp)
@@ -45,6 +45,8 @@ module_param(ql2xenforce_iocb_limit, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ql2xenforce_iocb_limit,
"Enforce IOCB throttling, to avoid FW congestion. (default: 1)");
+struct workqueue_struct *qla2xxx_wq;
+
/*
* CT6 CTX allocation cache
*/
@@ -5341,7 +5343,7 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
}
fcport->fw_login_state = 0;
- schedule_delayed_work(&vha->scan.scan_work, 5);
+ queue_delayed_work(qla2xxx_wq, &vha->scan.scan_work, 5);
} else {
qla24xx_fcport_handle_login(vha, fcport);
}
@@ -8123,10 +8125,16 @@ qla2x00_module_init(void)
return -ENOMEM;
}
+ qla2xxx_wq = alloc_workqueue("qla2xxx_wq", 0, 0);
+ if (!qla2xxx_wq) {
+ ret = -ENOMEM;
+ goto destroy_cache;
+ }
+
/* Initialize target kmem_cache and mem_pools */
ret = qlt_init();
if (ret < 0) {
- goto destroy_cache;
+ goto destroy_wq;
} else if (ret > 0) {
/*
* If initiator mode is explictly disabled by qlt_init(),
@@ -8190,6 +8198,9 @@ qla2x00_module_init(void)
qlt_exit:
qlt_exit();
+destroy_wq:
+ destroy_workqueue(qla2xxx_wq);
+
destroy_cache:
kmem_cache_destroy(srb_cachep);
return ret;
@@ -8209,6 +8220,7 @@ qla2x00_module_exit(void)
unregister_chrdev(apidev_major, QLA2XXX_APIDEV);
fc_release_transport(qla2xxx_transport_template);
qlt_exit();
+ destroy_workqueue(qla2xxx_wq);
kmem_cache_destroy(srb_cachep);
}
@@ -262,7 +262,7 @@ static void qlt_queue_unknown_atio(scsi_qla_host_t *vha,
list_add_tail(&u->cmd_list, &vha->unknown_atio_list);
spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
- schedule_delayed_work(&vha->unknown_atio_work, 1);
+ queue_delayed_work(qla2xxx_wq, &vha->unknown_atio_work, 1);
out:
return;
@@ -307,8 +307,7 @@ static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host *vha,
"Reschedule u %p, vha %p, host %p\n", u, vha, host);
if (!queued) {
queued = 1;
- schedule_delayed_work(&vha->unknown_atio_work,
- 1);
+ queue_delayed_work(qla2xxx_wq, &vha->unknown_atio_work, 1);
}
continue;
}
@@ -1556,7 +1555,7 @@ int qlt_stop_phase1(struct qla_tgt *tgt)
spin_lock_irqsave(&tgt->sess_work_lock, flags);
while (!list_empty(&tgt->sess_works_list)) {
spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
- flush_scheduled_work();
+ flush_workqueue(qla2xxx_wq);
spin_lock_irqsave(&tgt->sess_work_lock, flags);
}
spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
@@ -1696,7 +1695,7 @@ static int qlt_sched_sess_work(struct qla_tgt *tgt, int type,
list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list);
spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
- schedule_work(&tgt->sess_work);
+ queue_work(qla2xxx_wq, &tgt->sess_work);
return 0;
}
Replace system_wq with qla2xxx_wq in files which will be compiled as qla2xxx.o, in order to avoid flush_scheduled_work() usage. Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp> --- Please see commit c4f135d643823a86 ("workqueue: Wrap flush_workqueue() using a macro") for background. This is a blind conversion, for I don't know whether qlt_stop_phase1() needs to wait all works. If qlt_stop_phase1() needs to wait only works scheduled via qlt_sched_sess_work(), can we use flush_work() instead of introducing dedicated qla2xxx_wq ? Well, it seems to me that currently qlt_sess_work_fn() is racy with regard to flush_scheduled_work() from qlt_stop_phase1(), for flush_scheduled_work() will not be called if list_empty() == true due to qlt_sess_work_fn() already called list_del(). That won't be fixed by replacing flush_scheduled_work() with flush_work(&tgt->sess_work)... What do you want to do? Just call drain_workqueue(qla2xxx_wq) unconditionally? drivers/scsi/qla2xxx/qla_def.h | 2 ++ drivers/scsi/qla2xxx/qla_gs.c | 4 ++-- drivers/scsi/qla2xxx/qla_init.c | 2 +- drivers/scsi/qla2xxx/qla_nvme.c | 6 +++--- drivers/scsi/qla2xxx/qla_os.c | 16 ++++++++++++++-- drivers/scsi/qla2xxx/qla_target.c | 9 ++++----- 6 files changed, 26 insertions(+), 13 deletions(-)