@@ -44,6 +44,9 @@
/* Query request timeout */
#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
+/* LINERESET TIME OUT */
+#define LINERESET_IO_TIMEOUT_MS (30000) /* 30 sec */
+
/* Task management command timeout */
#define TM_CMD_TIMEOUT 100 /* msecs */
@@ -5899,6 +5902,8 @@ static void ufshcd_err_handler(struct work_struct *work)
* check if power mode restore is needed.
*/
if (hba->saved_uic_err & UFSHCD_UIC_PA_GENERIC_ERROR) {
+ ktime_t start = ktime_get();
+
hba->saved_uic_err &= ~UFSHCD_UIC_PA_GENERIC_ERROR;
if (!hba->saved_uic_err)
hba->saved_err &= ~UIC_ERROR;
@@ -5906,6 +5911,20 @@ static void ufshcd_err_handler(struct work_struct *work)
if (ufshcd_is_pwr_mode_restore_needed(hba))
needs_restore = true;
spin_lock_irqsave(hba->host->host_lock, flags);
+ /* Wait for IO completion to avoid aborting IOs */
+ while (hba->outstanding_reqs) {
+ ufshcd_complete_requests(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ schedule();
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (ktime_to_ms(ktime_sub(ktime_get(), start)) >
+ LINERESET_IO_TIMEOUT_MS) {
+ dev_err(hba->dev, "%s: timeout, outstanding=%x\n",
+ __func__, hba->outstanding_reqs);
+ break;
+ }
+ }
+
if (!hba->saved_err && !needs_restore)
goto skip_err_handling;
}
@@ -6302,9 +6321,13 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
}
- if (enabled_intr_status && retval == IRQ_NONE) {
- dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x\n",
- __func__, intr_status);
+ if (enabled_intr_status && retval == IRQ_NONE &&
+ !ufshcd_eh_in_progress(hba)) {
+ dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n",
+ __func__,
+ intr_status,
+ hba->ufs_stats.last_intr_status,
+ enabled_intr_status);
ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
}
@@ -6348,7 +6371,11 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
* Even though we use wait_event() which sleeps indefinitely,
* the maximum wait time is bounded by %TM_CMD_TIMEOUT.
*/
- req = blk_get_request(q, REQ_OP_DRV_OUT, BLK_MQ_REQ_RESERVED);
+ req = blk_get_request(q, REQ_OP_DRV_OUT, BLK_MQ_REQ_RESERVED |
+ BLK_MQ_REQ_NOWAIT);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
req->end_io_data = &wait;
free_slot = req->tag;
WARN_ON_ONCE(free_slot < 0 || free_slot >= hba->nutmrs);
@@ -9355,6 +9382,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
hba->tmf_tag_set = (struct blk_mq_tag_set) {
.nr_hw_queues = 1,
+ .reserved_tags = 1,
.queue_depth = hba->nutmrs,
.ops = &ufshcd_tmf_ops,
.flags = BLK_MQ_F_NO_SCHED,