@@ -98,6 +98,19 @@ void sdhci_uhs2_reset(struct sdhci_host *host, u16 mask)
}
EXPORT_SYMBOL_GPL(sdhci_uhs2_reset);
+static void sdhci_uhs2_reset_cmd_data(struct sdhci_host *host)
+{
+ sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
+
+ if (host->mmc->uhs2_sd_tran) {
+ sdhci_uhs2_reset(host, SDHCI_UHS2_SW_RESET_SD);
+
+ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
+ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+ sdhci_uhs2_clear_set_irqs(host, SDHCI_INT_ALL_MASK, SDHCI_UHS2_INT_ERROR_MASK);
+ }
+}
+
void sdhci_uhs2_set_power(struct sdhci_host *host, unsigned char mode, unsigned short vdd)
{
struct mmc_host *mmc = host->mmc;
@@ -529,6 +542,217 @@ static int sdhci_uhs2_control(struct mmc_host *mmc, enum sd_uhs2_operation op)
return err;
}
+/*****************************************************************************\
+ * *
+ * Request done *
+ * *
+\*****************************************************************************/
+
+static bool sdhci_uhs2_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
+{
+ return sdhci_needs_reset(host, mrq) ||
+ (!(host->flags & SDHCI_DEVICE_DEAD) && mrq->data && mrq->data->error);
+}
+
+static bool sdhci_uhs2_request_done(struct sdhci_host *host)
+{
+ unsigned long flags;
+ struct mmc_request *mrq;
+ int i;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ for (i = 0; i < SDHCI_MAX_MRQS; i++) {
+ mrq = host->mrqs_done[i];
+ if (mrq)
+ break;
+ }
+
+ if (!mrq) {
+ spin_unlock_irqrestore(&host->lock, flags);
+ return true;
+ }
+
+ /*
+ * Always unmap the data buffers if they were mapped by
+ * sdhci_prepare_data() whenever we finish with a request.
+ * This avoids leaking DMA mappings on error.
+ */
+ if (host->flags & SDHCI_REQ_USE_DMA)
+ sdhci_request_done_dma(host, mrq);
+
+ /*
+ * The controller needs a reset of internal state machines
+ * upon error conditions.
+ */
+ if (sdhci_uhs2_needs_reset(host, mrq)) {
+ /*
+ * Do not finish until command and data lines are available for
+ * reset. Note there can only be one other mrq, so it cannot
+ * also be in mrqs_done, otherwise host->cmd and host->data_cmd
+ * would both be null.
+ */
+ if (host->cmd || host->data_cmd) {
+ spin_unlock_irqrestore(&host->lock, flags);
+ return true;
+ }
+
+ if (mrq->cmd->error || mrq->data->error)
+ sdhci_uhs2_reset_cmd_data(host);
+ else
+ sdhci_uhs2_reset(host, SDHCI_UHS2_SW_RESET_SD);
+ host->pending_reset = false;
+ }
+
+ host->mrqs_done[i] = NULL;
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ if (host->ops->request_done)
+ host->ops->request_done(host, mrq);
+ else
+ mmc_request_done(host->mmc, mrq);
+
+ return false;
+}
+
+static void sdhci_uhs2_complete_work(struct work_struct *work)
+{
+ struct sdhci_host *host = container_of(work, struct sdhci_host,
+ complete_work);
+
+ if (!mmc_card_uhs2(host->mmc)) {
+ sdhci_complete_work(work);
+ return;
+ }
+
+ while (!sdhci_uhs2_request_done(host))
+ ;
+}
+
+/*****************************************************************************\
+ * *
+ * Interrupt handling *
+ * *
+\*****************************************************************************/
+
+static void __sdhci_uhs2_irq(struct sdhci_host *host, u32 uhs2mask)
+{
+ DBG("*** %s got UHS2 error interrupt: 0x%08x\n",
+ mmc_hostname(host->mmc), uhs2mask);
+
+ if (uhs2mask & SDHCI_UHS2_INT_CMD_ERR_MASK) {
+ if (!host->cmd) {
+ pr_err("%s: Got cmd interrupt 0x%08x but no cmd.\n",
+ mmc_hostname(host->mmc),
+ (unsigned int)uhs2mask);
+ sdhci_dumpregs(host);
+ return;
+ }
+ host->cmd->error = -EILSEQ;
+ if (uhs2mask & SDHCI_UHS2_INT_CMD_TIMEOUT)
+ host->cmd->error = -ETIMEDOUT;
+ }
+
+ if (uhs2mask & SDHCI_UHS2_INT_DATA_ERR_MASK) {
+ if (!host->data) {
+ pr_err("%s: Got data interrupt 0x%08x but no data.\n",
+ mmc_hostname(host->mmc),
+ (unsigned int)uhs2mask);
+ sdhci_dumpregs(host);
+ return;
+ }
+
+ if (uhs2mask & SDHCI_UHS2_INT_DEADLOCK_TIMEOUT) {
+ pr_err("%s: Got deadlock timeout interrupt 0x%08x\n",
+ mmc_hostname(host->mmc),
+ (unsigned int)uhs2mask);
+ host->data->error = -ETIMEDOUT;
+ } else if (uhs2mask & SDHCI_UHS2_INT_ADMA_ERROR) {
+ pr_err("%s: ADMA error = 0x %x\n",
+ mmc_hostname(host->mmc),
+ sdhci_readb(host, SDHCI_ADMA_ERROR));
+ host->data->error = -EIO;
+ } else {
+ host->data->error = -EILSEQ;
+ }
+ }
+}
+
+u32 sdhci_uhs2_irq(struct sdhci_host *host, u32 intmask)
+{
+ u32 mask = intmask, uhs2mask;
+
+ if (!mmc_card_uhs2(host->mmc))
+ goto out;
+
+ if (intmask & SDHCI_INT_ERROR) {
+ uhs2mask = sdhci_readl(host, SDHCI_UHS2_INT_STATUS);
+ if (!(uhs2mask & SDHCI_UHS2_INT_ERROR_MASK))
+ goto cmd_irq;
+
+ /* Clear error interrupts */
+ sdhci_writel(host, uhs2mask & SDHCI_UHS2_INT_ERROR_MASK,
+ SDHCI_UHS2_INT_STATUS);
+
+ /* Handle error interrupts */
+ __sdhci_uhs2_irq(host, uhs2mask);
+
+ /* Caller, sdhci_irq(), doesn't have to care about UHS-2 errors */
+ intmask &= ~SDHCI_INT_ERROR;
+ mask &= SDHCI_INT_ERROR;
+ }
+
+cmd_irq:
+ if (intmask & SDHCI_INT_CMD_MASK) {
+ /* Clear command interrupt */
+ sdhci_writel(host, intmask & SDHCI_INT_CMD_MASK, SDHCI_INT_STATUS);
+
+ /* Caller, sdhci_irq(), doesn't have to care about UHS-2 commands */
+ intmask &= ~SDHCI_INT_CMD_MASK;
+ mask &= SDHCI_INT_CMD_MASK;
+ }
+
+ /* Clear already-handled interrupts. */
+ sdhci_writel(host, mask, SDHCI_INT_STATUS);
+
+out:
+ return intmask;
+}
+EXPORT_SYMBOL_GPL(sdhci_uhs2_irq);
+
+static irqreturn_t sdhci_uhs2_thread_irq(int irq, void *dev_id)
+{
+ struct sdhci_host *host = dev_id;
+ struct mmc_command *cmd;
+ unsigned long flags;
+ u32 isr;
+
+ if (!mmc_card_uhs2(host->mmc))
+ return sdhci_thread_irq(irq, dev_id);
+
+ while (!sdhci_uhs2_request_done(host))
+ ;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ isr = host->thread_isr;
+ host->thread_isr = 0;
+
+ cmd = host->deferred_cmd;
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
+ struct mmc_host *mmc = host->mmc;
+
+ mmc->ops->card_event(mmc);
+ mmc_detect_change(mmc, msecs_to_jiffies(200));
+ }
+
+ return IRQ_HANDLED;
+}
+
/*****************************************************************************\
* *
* Driver init/exit *
@@ -620,6 +844,9 @@ int sdhci_uhs2_add_host(struct sdhci_host *host)
if (mmc->caps2 & MMC_CAP2_SD_UHS2)
sdhci_uhs2_host_ops_init(host);
+ host->complete_work_fn = sdhci_uhs2_complete_work;
+ host->thread_irq_fn = sdhci_uhs2_thread_irq;
+
/* LED support not implemented for UHS2 */
host->quirks |= SDHCI_QUIRK_NO_LED;
@@ -176,6 +176,7 @@
struct sdhci_host;
struct mmc_command;
+struct mmc_request;
void sdhci_uhs2_dump_regs(struct sdhci_host *host);
void sdhci_uhs2_reset(struct sdhci_host *host, u16 mask);
@@ -184,5 +185,6 @@ void sdhci_uhs2_set_timeout(struct sdhci_host *host, struct mmc_command *cmd);
int sdhci_uhs2_add_host(struct sdhci_host *host);
void sdhci_uhs2_remove_host(struct sdhci_host *host, int dead);
void sdhci_uhs2_clear_set_irqs(struct sdhci_host *host, u32 clear, u32 set);
+u32 sdhci_uhs2_irq(struct sdhci_host *host, u32 intmask);
#endif /* __SDHCI_UHS2_H */
@@ -234,7 +234,7 @@ void sdhci_reset(struct sdhci_host *host, u8 mask)
}
EXPORT_SYMBOL_GPL(sdhci_reset);
-static bool sdhci_do_reset(struct sdhci_host *host, u8 mask)
+bool sdhci_do_reset(struct sdhci_host *host, u8 mask)
{
if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
struct mmc_host *mmc = host->mmc;
@@ -247,6 +247,7 @@ static bool sdhci_do_reset(struct sdhci_host *host, u8 mask)
return true;
}
+EXPORT_SYMBOL_GPL(sdhci_do_reset);
static void sdhci_reset_for_all(struct sdhci_host *host)
{
@@ -1489,7 +1490,7 @@ static void sdhci_set_transfer_mode(struct sdhci_host *host,
sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
}
-static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
+bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
{
return (!(host->flags & SDHCI_DEVICE_DEAD) &&
((mrq->cmd && mrq->cmd->error) ||
@@ -1497,6 +1498,7 @@ static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
(mrq->data && mrq->data->stop && mrq->data->stop->error) ||
(host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)));
}
+EXPORT_SYMBOL_GPL(sdhci_needs_reset);
static void sdhci_set_mrq_done(struct sdhci_host *host, struct mmc_request *mrq)
{
@@ -3076,6 +3078,53 @@ static const struct mmc_host_ops sdhci_ops = {
* *
\*****************************************************************************/
+void sdhci_request_done_dma(struct sdhci_host *host, struct mmc_request *mrq)
+{
+ struct mmc_data *data = mrq->data;
+
+ if (data && data->host_cookie == COOKIE_MAPPED) {
+ if (host->bounce_buffer) {
+ /*
+ * On reads, copy the bounced data into the
+ * sglist
+ */
+ if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) {
+ unsigned int length = data->bytes_xfered;
+
+ if (length > host->bounce_buffer_size) {
+ pr_err("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n",
+ mmc_hostname(host->mmc),
+ host->bounce_buffer_size,
+ data->bytes_xfered);
+ /* Cap it down and continue */
+ length = host->bounce_buffer_size;
+ }
+ dma_sync_single_for_cpu(mmc_dev(host->mmc),
+ host->bounce_addr,
+ host->bounce_buffer_size,
+ DMA_FROM_DEVICE);
+ sg_copy_from_buffer(data->sg,
+ data->sg_len,
+ host->bounce_buffer,
+ length);
+ } else {
+ /* No copying, just switch ownership */
+ dma_sync_single_for_cpu(mmc_dev(host->mmc),
+ host->bounce_addr,
+ host->bounce_buffer_size,
+ mmc_get_dma_dir(data));
+ }
+ } else {
+ /* Unmap the raw data */
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg,
+ data->sg_len,
+ mmc_get_dma_dir(data));
+ }
+ data->host_cookie = COOKIE_UNMAPPED;
+ }
+}
+EXPORT_SYMBOL_GPL(sdhci_request_done_dma);
+
static bool sdhci_request_done(struct sdhci_host *host)
{
unsigned long flags;
@@ -3140,48 +3189,7 @@ static bool sdhci_request_done(struct sdhci_host *host)
sdhci_set_mrq_done(host, mrq);
}
- if (data && data->host_cookie == COOKIE_MAPPED) {
- if (host->bounce_buffer) {
- /*
- * On reads, copy the bounced data into the
- * sglist
- */
- if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) {
- unsigned int length = data->bytes_xfered;
-
- if (length > host->bounce_buffer_size) {
- pr_err("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n",
- mmc_hostname(host->mmc),
- host->bounce_buffer_size,
- data->bytes_xfered);
- /* Cap it down and continue */
- length = host->bounce_buffer_size;
- }
- dma_sync_single_for_cpu(
- mmc_dev(host->mmc),
- host->bounce_addr,
- host->bounce_buffer_size,
- DMA_FROM_DEVICE);
- sg_copy_from_buffer(data->sg,
- data->sg_len,
- host->bounce_buffer,
- length);
- } else {
- /* No copying, just switch ownership */
- dma_sync_single_for_cpu(
- mmc_dev(host->mmc),
- host->bounce_addr,
- host->bounce_buffer_size,
- mmc_get_dma_dir(data));
- }
- } else {
- /* Unmap the raw data */
- dma_unmap_sg(mmc_dev(host->mmc), data->sg,
- data->sg_len,
- mmc_get_dma_dir(data));
- }
- data->host_cookie = COOKIE_UNMAPPED;
- }
+ sdhci_request_done_dma(host, mrq);
}
host->mrqs_done[i] = NULL;
@@ -3196,7 +3204,7 @@ static bool sdhci_request_done(struct sdhci_host *host)
return false;
}
-static void sdhci_complete_work(struct work_struct *work)
+void sdhci_complete_work(struct work_struct *work)
{
struct sdhci_host *host = container_of(work, struct sdhci_host,
complete_work);
@@ -3204,6 +3212,7 @@ static void sdhci_complete_work(struct work_struct *work)
while (!sdhci_request_done(host))
;
}
+EXPORT_SYMBOL_GPL(sdhci_complete_work);
static void sdhci_timeout_timer(struct timer_list *t)
{
@@ -3665,7 +3674,7 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
return result;
}
-static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
+irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
{
struct sdhci_host *host = dev_id;
struct mmc_command *cmd;
@@ -3695,6 +3704,7 @@ static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
+EXPORT_SYMBOL_GPL(sdhci_thread_irq);
/*****************************************************************************\
* *
@@ -4067,6 +4077,9 @@ struct sdhci_host *sdhci_alloc_host(struct device *dev,
host->max_timeout_count = 0xE;
+ host->complete_work_fn = sdhci_complete_work;
+ host->thread_irq_fn = sdhci_thread_irq;
+
return host;
}
@@ -4831,7 +4844,7 @@ int __sdhci_add_host(struct sdhci_host *host)
if (!host->complete_wq)
return -ENOMEM;
- INIT_WORK(&host->complete_work, sdhci_complete_work);
+ INIT_WORK(&host->complete_work, host->complete_work_fn);
timer_setup(&host->timer, sdhci_timeout_timer, 0);
timer_setup(&host->data_timer, sdhci_timeout_data_timer, 0);
@@ -4840,7 +4853,7 @@ int __sdhci_add_host(struct sdhci_host *host)
sdhci_init(host, 0);
- ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
+ ret = request_threaded_irq(host->irq, sdhci_irq, host->thread_irq_fn,
IRQF_SHARED, mmc_hostname(mmc), host);
if (ret) {
pr_err("%s: Failed to request IRQ %d: %d\n",
@@ -625,6 +625,9 @@ struct sdhci_host {
struct timer_list timer; /* Timer for timeouts */
struct timer_list data_timer; /* Timer for data timeouts */
+ void (*complete_work_fn)(struct work_struct *work);
+ irqreturn_t (*thread_irq_fn)(int irq, void *dev_id);
+
#if IS_ENABLED(CONFIG_MMC_SDHCI_EXTERNAL_DMA)
struct dma_chan *rx_chan;
struct dma_chan *tx_chan;
@@ -827,6 +830,7 @@ static inline void sdhci_read_caps(struct sdhci_host *host)
__sdhci_read_caps(host, NULL, NULL, NULL);
}
+bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq);
u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
unsigned int *actual_clock);
void sdhci_set_clock(struct sdhci_host *host, unsigned int clock);
@@ -845,6 +849,7 @@ void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq);
int sdhci_request_atomic(struct mmc_host *mmc, struct mmc_request *mrq);
void sdhci_set_bus_width(struct sdhci_host *host, int width);
void sdhci_reset(struct sdhci_host *host, u8 mask);
+bool sdhci_do_reset(struct sdhci_host *host, u8 mask);
void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing);
int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode);
int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode);
@@ -854,6 +859,9 @@ void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios);
int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
struct mmc_ios *ios);
void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable);
+void sdhci_request_done_dma(struct sdhci_host *host, struct mmc_request *mrq);
+void sdhci_complete_work(struct work_struct *work);
+irqreturn_t sdhci_thread_irq(int irq, void *dev_id);
void sdhci_adma_write_desc(struct sdhci_host *host, void **desc,
dma_addr_t addr, int len, unsigned int cmd);