@@ -1082,7 +1082,6 @@ static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
* ufshcd_suspend() re-enabling regulators while vreg is still
* in low-power mode.
*/
- ufs_mtk_vreg_set_lpm(hba, true);
err = ufs_mtk_mphy_power_on(hba, false);
if (err)
goto fail;
@@ -1106,12 +1105,13 @@ static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
{
int err;
+ if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
+ ufs_mtk_vreg_set_lpm(hba, false);
+
err = ufs_mtk_mphy_power_on(hba, true);
if (err)
goto fail;
- ufs_mtk_vreg_set_lpm(hba, false);
-
if (ufshcd_is_link_hibern8(hba)) {
err = ufs_mtk_link_set_hpm(hba);
if (err)
@@ -1276,9 +1276,59 @@ static int ufs_mtk_remove(struct platform_device *pdev)
return 0;
}
+int ufs_mtk_system_suspend(struct device *dev)
+{
+ int ret = 0;
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ ret = ufshcd_system_suspend(dev);
+
+ if (!ret)
+ ufs_mtk_vreg_set_lpm(hba, true);
+
+ return ret;
+}
+
+int ufs_mtk_system_resume(struct device *dev)
+{
+ int ret = 0;
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ ufs_mtk_vreg_set_lpm(hba, false);
+
+ ret = ufshcd_system_resume(dev);
+
+ return ret;
+}
+
+int ufs_mtk_runtime_suspend(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ int ret = 0;
+
+ ret = ufshcd_runtime_suspend(dev);
+
+ if (!ret)
+ ufs_mtk_vreg_set_lpm(hba, true);
+
+ return ret;
+}
+
+int ufs_mtk_runtime_resume(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ int ret = 0;
+
+ ufs_mtk_vreg_set_lpm(hba, false);
+
+ ret = ufshcd_runtime_resume(dev);
+
+ return ret;
+}
+
static const struct dev_pm_ops ufs_mtk_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
- SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(ufs_mtk_system_suspend, ufs_mtk_system_resume)
+ SET_RUNTIME_PM_OPS(ufs_mtk_runtime_suspend, ufs_mtk_runtime_resume, NULL)
.prepare = ufshcd_suspend_prepare,
.complete = ufshcd_resume_complete,
};