@@ -1361,93 +1361,94 @@ int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do)
return wq_work_done;
}
-static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
+static bool fnic_cleanup_cmd(struct request *req, void *data, bool reserved)
{
- int i;
struct fnic_io_req *io_req;
unsigned long flags = 0;
- struct scsi_cmnd *sc;
spinlock_t *io_lock;
unsigned long start_time = 0;
+ struct scsi_cmnd *sc = blk_mq_rq_to_pdu(req);
+ struct fc_lport *lp = shost_priv(sc->device->host);
+ struct fnic *fnic = lport_priv(lp);
struct fnic_stats *fnic_stats = &fnic->fnic_stats;
+ int *exclude_id = data;
- for (i = 0; i < fnic->fnic_max_tag_id; i++) {
- if (i == exclude_id)
- continue;
+ if (*exclude_id == req->tag)
+ return true;
- io_lock = fnic_io_lock_tag(fnic, i);
- spin_lock_irqsave(io_lock, flags);
- sc = scsi_host_find_tag(fnic->lport->host, i);
- if (!sc) {
- spin_unlock_irqrestore(io_lock, flags);
- continue;
- }
+ io_lock = fnic_io_lock_tag(fnic, req->tag);
+ spin_lock_irqsave(io_lock, flags);
- io_req = (struct fnic_io_req *)CMD_SP(sc);
- if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
- !(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) {
- /*
- * We will be here only when FW completes reset
- * without sending completions for outstanding ios.
- */
- CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
- if (io_req && io_req->dr_done)
- complete(io_req->dr_done);
- else if (io_req && io_req->abts_done)
- complete(io_req->abts_done);
- spin_unlock_irqrestore(io_lock, flags);
- continue;
- } else if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
- spin_unlock_irqrestore(io_lock, flags);
- continue;
- }
- if (!io_req) {
- spin_unlock_irqrestore(io_lock, flags);
- continue;
- }
+ io_req = (struct fnic_io_req *)CMD_SP(sc);
+ if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
+ !(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) {
+ /*
+ * We will be here only when FW completes reset
+ * without sending completions for outstanding ios.
+ */
+ CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
+ if (io_req && io_req->dr_done)
+ complete(io_req->dr_done);
+ else if (io_req && io_req->abts_done)
+ complete(io_req->abts_done);
+ goto unlock;
+ } else if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
+ goto unlock;
+ }
+ if (!io_req)
+ goto unlock;
- CMD_SP(sc) = NULL;
+ CMD_SP(sc) = NULL;
- spin_unlock_irqrestore(io_lock, flags);
+ spin_unlock_irqrestore(io_lock, flags);
- /*
- * If there is a scsi_cmnd associated with this io_req, then
- * free the corresponding state
- */
- start_time = io_req->start_time;
- fnic_release_ioreq_buf(fnic, io_req, sc);
- mempool_free(io_req, fnic->io_req_pool);
+ /*
+ * If there is a scsi_cmnd associated with this io_req, then
+ * free the corresponding state
+ */
+ start_time = io_req->start_time;
+ fnic_release_ioreq_buf(fnic, io_req, sc);
+ mempool_free(io_req, fnic->io_req_pool);
- sc->result = DID_TRANSPORT_DISRUPTED << 16;
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
- "%s: tag:0x%x : sc:0x%p duration = %lu DID_TRANSPORT_DISRUPTED\n",
- __func__, sc->request->tag, sc,
- (jiffies - start_time));
+ sc->result = DID_TRANSPORT_DISRUPTED << 16;
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "%s: tag:0x%x : sc:0x%p duration = %lu DID_TRANSPORT_DISRUPTED\n",
+ __func__, sc->request->tag, sc,
+ (jiffies - start_time));
- if (atomic64_read(&fnic->io_cmpl_skip))
- atomic64_dec(&fnic->io_cmpl_skip);
- else
- atomic64_inc(&fnic_stats->io_stats.io_completions);
+ if (atomic64_read(&fnic->io_cmpl_skip))
+ atomic64_dec(&fnic->io_cmpl_skip);
+ else
+ atomic64_inc(&fnic_stats->io_stats.io_completions);
- /* Complete the command to SCSI */
- if (sc->scsi_done) {
- if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED))
- shost_printk(KERN_ERR, fnic->lport->host,
- "Calling done for IO not issued to fw: tag:0x%x sc:0x%p\n",
- sc->request->tag, sc);
+ /* Complete the command to SCSI */
+ if (sc->scsi_done) {
+ if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED))
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "Calling done for IO not issued to fw: tag:0x%x sc:0x%p\n",
+ sc->request->tag, sc);
- FNIC_TRACE(fnic_cleanup_io,
- sc->device->host->host_no, i, sc,
- jiffies_to_msecs(jiffies - start_time),
- 0, ((u64)sc->cmnd[0] << 32 |
- (u64)sc->cmnd[2] << 24 |
- (u64)sc->cmnd[3] << 16 |
- (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
- (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+ FNIC_TRACE(fnic_cleanup_io,
+ sc->device->host->host_no, req->tag, sc,
+ jiffies_to_msecs(jiffies - start_time),
+ 0, ((u64)sc->cmnd[0] << 32 |
+ (u64)sc->cmnd[2] << 24 |
+ (u64)sc->cmnd[3] << 16 |
+ (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
+ (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
- sc->scsi_done(sc);
- }
+ sc->scsi_done(sc);
}
+ return true;
+ unlock:
+ spin_unlock_irqrestore(io_lock, flags);
+ return true;
+}
+
+static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
+{
+ blk_mq_tagset_busy_iter(&fnic->lport->host->tag_set,
+ fnic_cleanup_cmd, &exclude_id);
}
void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
So far, scsi_host_find_tag() is supposed to use in fast path and the passed tag should be active. Convert the scsi command walking into blk_mq_tagset_busy_iter(), which has been one common pattern for handling failure. Cc: Satish Kharat <satishkh@cisco.com> Cc: Karan Tilak Kumar <kartilak@cisco.com> Cc: David Jeffery <djeffery@redhat.com> Signed-off-by: Ming Lei <ming.lei@redhat.com> --- drivers/scsi/fnic/fnic_scsi.c | 139 +++++++++++++++++----------------- 1 file changed, 70 insertions(+), 69 deletions(-)