@@ -231,5 +231,7 @@ int mhi_ep_set_m0_state(struct mhi_ep_cntrl *mhi_cntrl);
int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl);
int mhi_ep_set_ready_state(struct mhi_ep_cntrl *mhi_cntrl);
void mhi_ep_handle_syserr(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_resume_channels(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_suspend_channels(struct mhi_ep_cntrl *mhi_cntrl);
#endif
@@ -1176,6 +1176,64 @@ void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl)
}
EXPORT_SYMBOL_GPL(mhi_ep_power_down);
+void mhi_ep_suspend_channels(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct mhi_ep_chan *mhi_chan;
+ u32 tmp;
+ int i;
+
+ for (i = 0; i < mhi_cntrl->max_chan; i++) {
+ mhi_chan = &mhi_cntrl->mhi_chan[i];
+
+ if (!mhi_chan->mhi_dev)
+ continue;
+
+ mutex_lock(&mhi_chan->lock);
+ /* Skip if the channel is not currently running */
+ tmp = mhi_cntrl->ch_ctx_cache[i].chcfg;
+ if (FIELD_GET(CHAN_CTX_CHSTATE_MASK, tmp) != MHI_CH_STATE_RUNNING) {
+ mutex_unlock(&mhi_chan->lock);
+ continue;
+ }
+
+ dev_dbg(&mhi_chan->mhi_dev->dev, "Suspending channel\n");
+ /* Set channel state to SUSPENDED */
+ tmp &= ~CHAN_CTX_CHSTATE_MASK;
+ tmp |= (MHI_CH_STATE_SUSPENDED << CHAN_CTX_CHSTATE_SHIFT);
+ mhi_cntrl->ch_ctx_cache[i].chcfg = tmp;
+ mutex_unlock(&mhi_chan->lock);
+ }
+}
+
+void mhi_ep_resume_channels(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct mhi_ep_chan *mhi_chan;
+ u32 tmp;
+ int i;
+
+ for (i = 0; i < mhi_cntrl->max_chan; i++) {
+ mhi_chan = &mhi_cntrl->mhi_chan[i];
+
+ if (!mhi_chan->mhi_dev)
+ continue;
+
+ mutex_lock(&mhi_chan->lock);
+ /* Skip if the channel is not currently suspended */
+ tmp = mhi_cntrl->ch_ctx_cache[i].chcfg;
+ if (FIELD_GET(CHAN_CTX_CHSTATE_MASK, tmp) != MHI_CH_STATE_SUSPENDED) {
+ mutex_unlock(&mhi_chan->lock);
+ continue;
+ }
+
+ dev_dbg(&mhi_chan->mhi_dev->dev, "Resuming channel\n");
+ /* Set channel state to RUNNING */
+ tmp &= ~CHAN_CTX_CHSTATE_MASK;
+ tmp |= (MHI_CH_STATE_RUNNING << CHAN_CTX_CHSTATE_SHIFT);
+ mhi_cntrl->ch_ctx_cache[i].chcfg = tmp;
+ mutex_unlock(&mhi_chan->lock);
+ }
+}
+
static void mhi_ep_release_device(struct device *dev)
{
struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
@@ -93,8 +93,11 @@ int mhi_ep_set_m0_state(struct mhi_ep_cntrl *mhi_cntrl)
enum mhi_state old_state;
int ret;
+ /* If MHI is in M3, resume suspended channels */
spin_lock_bh(&mhi_cntrl->state_lock);
old_state = mhi_cntrl->mhi_state;
+ if (old_state == MHI_STATE_M3)
+ mhi_ep_resume_channels(mhi_cntrl);
ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
if (ret) {
@@ -140,6 +143,7 @@ int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl)
}
spin_unlock_bh(&mhi_cntrl->state_lock);
+ mhi_ep_suspend_channels(mhi_cntrl);
/* Signal host that the device moved to M3 */
ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_M3);
Add support for suspending and resuming the channels in MHI endpoint stack. The channels will be moved to the suspended state during M3 state transition and will be resumed during M0 transition. Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> --- drivers/bus/mhi/ep/internal.h | 2 ++ drivers/bus/mhi/ep/main.c | 58 +++++++++++++++++++++++++++++++++++ drivers/bus/mhi/ep/sm.c | 4 +++ 3 files changed, 64 insertions(+)