@@ -100,6 +100,10 @@ static enum blk_eh_timer_return mmc_cqe_timed_out(struct request *req)
enum mmc_issue_type issue_type = mmc_issue_type(mq, req);
bool recovery_needed = false;
+ host->err_stats[MMC_ERR_CMDQ_REQ_TIMEOUT]++;
+ mmc_log_string(host,
+ "Request timed out! Active reqs: %d Req: %p Tag: %d\n",
+ mmc_cqe_qcnt(mq), req, req->tag);
switch (issue_type) {
case MMC_ISSUE_ASYNC:
case MMC_ISSUE_DCMD:
@@ -825,6 +825,13 @@ irqreturn_t cqhci_irq(struct mmc_host *mmc, u32 intmask, int cmd_error,
if ((status & (CQHCI_IS_RED | CQHCI_IS_GCE | CQHCI_IS_ICCE)) ||
cmd_error || data_error || ice_err){
mmc->need_hw_reset = true;
+ if (status & CQHCI_IS_RED)
+ mmc->err_stats[MMC_ERR_CMDQ_RED]++;
+ if (status & CQHCI_IS_GCE)
+ mmc->err_stats[MMC_ERR_CMDQ_GCE]++;
+ if (status & CQHCI_IS_ICCE)
+ mmc->err_stats[MMC_ERR_CMDQ_ICCE]++;
+
cqhci_error_irq(mmc, status, cmd_error, data_error);
}
@@ -3905,20 +3905,27 @@ bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
if (!host->cqe_on)
return false;
- if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC))
+ if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC)) {
*cmd_error = -EILSEQ;
- else if (intmask & SDHCI_INT_TIMEOUT)
+ if (intmask & SDHCI_INT_CRC)
+ host->mmc->err_stats[MMC_ERR_CMD_CRC]++;
+ } else if (intmask & SDHCI_INT_TIMEOUT) {
*cmd_error = -ETIMEDOUT;
- else
+ host->mmc->err_stats[MMC_ERR_CMD_TIMEOUT]++;
+ } else
*cmd_error = 0;
- if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC))
+ if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC)) {
*data_error = -EILSEQ;
- else if (intmask & SDHCI_INT_DATA_TIMEOUT)
+ if (intmask & SDHCI_INT_DATA_CRC)
+ host->mmc->err_stats[MMC_ERR_DAT_CRC]++;
+ } else if (intmask & SDHCI_INT_DATA_TIMEOUT) {
*data_error = -ETIMEDOUT;
- else if (intmask & SDHCI_INT_ADMA_ERROR)
+ host->mmc->err_stats[MMC_ERR_DAT_TIMEOUT]++;
+ } else if (intmask & SDHCI_INT_ADMA_ERROR) {
*data_error = -EIO;
- else
+ host->mmc->err_stats[MMC_ERR_ADMA]++;
+ } else
*data_error = 0;
/* Clear selected interrupts. */
@@ -80,6 +80,9 @@ struct mmc_ios {
bool enhanced_strobe; /* hs400es selection */
};
+#define NUM_LOG_PAGES 10
+#define mmc_log_string(mmc_host, fmt, ...) do { } while (0)
+
struct mmc_clk_phase {
bool valid;
u16 in_deg;
@@ -93,6 +96,24 @@ struct mmc_clk_phase_map {
struct mmc_host;
+enum {
+ MMC_ERR_CMD_TIMEOUT,
+ MMC_ERR_CMD_CRC,
+ MMC_ERR_DAT_TIMEOUT,
+ MMC_ERR_DAT_CRC,
+ MMC_ERR_AUTO_CMD,
+ MMC_ERR_ADMA,
+ MMC_ERR_TUNING,
+ MMC_ERR_CMDQ_RED,
+ MMC_ERR_CMDQ_GCE,
+ MMC_ERR_CMDQ_ICCE,
+ MMC_ERR_REQ_TIMEOUT,
+ MMC_ERR_CMDQ_REQ_TIMEOUT,
+ MMC_ERR_ICE_CFG,
+ MMC_ERR_MAX,
+};
+
+
struct mmc_host_ops {
/*
* It is optional for the host to implement pre_req and post_req in
@@ -471,6 +492,7 @@ struct mmc_host {
struct mmc_supply supply;
struct dentry *debugfs_root;
+ u32 err_stats[MMC_ERR_MAX];
/* Ongoing data transfer that allows commands during transfer */
struct mmc_request *ongoing_mrq;