@@ -639,13 +639,16 @@ static int amd_pmc_verify_czn_rtc(struct amd_pmc_dev *pdev, u32 *arg)
return rc;
}
-static int __maybe_unused amd_pmc_suspend(struct device *dev)
+static int amd_pmc_suspend(void *context, bool constraints)
{
- struct amd_pmc_dev *pdev = dev_get_drvdata(dev);
+ struct amd_pmc_dev *pdev = dev_get_drvdata((struct device *)context);
int rc;
u8 msg;
u32 arg = 1;
+ /* for enabling constraints checking in the future */
+ dev_dbg(pdev->dev, "LPI constraints were%smet.\n", constraints ? " " : " not ");
+
/* Reset and Start SMU logging - to monitor the s0i3 stats */
amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_RESET, 0);
amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_START, 0);
@@ -658,7 +661,7 @@ static int __maybe_unused amd_pmc_suspend(struct device *dev)
}
/* Dump the IdleMask before we send hint to SMU */
- amd_pmc_idlemask_read(pdev, dev, NULL);
+ amd_pmc_idlemask_read(pdev, pdev->dev, NULL);
msg = amd_pmc_get_os_hint(pdev);
rc = amd_pmc_send_cmd(pdev, arg, NULL, msg, 0);
if (rc) {
@@ -681,28 +684,28 @@ static int __maybe_unused amd_pmc_suspend(struct device *dev)
return rc;
}
-static int __maybe_unused amd_pmc_resume(struct device *dev)
+static void amd_pmc_resume(void *context)
{
- struct amd_pmc_dev *pdev = dev_get_drvdata(dev);
+ struct amd_pmc_dev *pdev = dev_get_drvdata((struct device *)context);
int rc;
u8 msg;
msg = amd_pmc_get_os_hint(pdev);
rc = amd_pmc_send_cmd(pdev, 0, NULL, msg, 0);
if (rc)
- dev_err(pdev->dev, "resume failed\n");
+ dev_err(pdev->dev, "resume failed: %d\n", rc);
/* Let SMU know that we are looking for stats */
amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_DUMP_DATA, 0);
/* Dump the IdleMask to see the blockers */
- amd_pmc_idlemask_read(pdev, dev, NULL);
+ amd_pmc_idlemask_read(pdev, pdev->dev, NULL);
/* Write data incremented by 1 to distinguish in stb_read */
if (enable_stb)
rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_PREDEF + 1);
if (rc)
- dev_err(pdev->dev, "error writing to STB\n");
+ dev_err(pdev->dev, "error writing to STB: %d\n", rc);
/* Restore the QoS request back to defaults if it was set */
if (pdev->cpu_id == AMD_CPU_ID_CZN)
@@ -711,15 +714,8 @@ static int __maybe_unused amd_pmc_resume(struct device *dev)
/* Notify on failed entry */
amd_pmc_validate_deepest(pdev);
-
- return rc;
}
-static const struct dev_pm_ops amd_pmc_pm_ops = {
- .suspend_noirq = amd_pmc_suspend,
- .resume_noirq = amd_pmc_resume,
-};
-
static const struct pci_device_id pmc_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_YC) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_CZN) },
@@ -805,6 +801,11 @@ static int amd_pmc_read_stb(struct amd_pmc_dev *dev, u32 *buf)
static int amd_pmc_probe(struct platform_device *pdev)
{
struct amd_pmc_dev *dev = &pmc;
+ struct lps0_callback_handler lps0_handler = {
+ .prepare_late_callback = amd_pmc_suspend,
+ .restore_early_callback = amd_pmc_resume,
+ .context = &pdev->dev,
+ };
struct pci_dev *rdev;
u32 base_addr_lo, base_addr_hi;
u64 base_addr, fch_phys_addr;
@@ -884,6 +885,10 @@ static int amd_pmc_probe(struct platform_device *pdev)
amd_pmc_get_smu_version(dev);
platform_set_drvdata(pdev, dev);
+ err = acpi_register_lps0_callbacks(&lps0_handler);
+ if (err)
+ goto err_pci_dev_put;
+
amd_pmc_dbgfs_register(dev);
cpu_latency_qos_add_request(&dev->amd_pmc_pm_qos_req, PM_QOS_DEFAULT_VALUE);
return 0;
@@ -896,7 +901,13 @@ static int amd_pmc_probe(struct platform_device *pdev)
static int amd_pmc_remove(struct platform_device *pdev)
{
struct amd_pmc_dev *dev = platform_get_drvdata(pdev);
+ struct lps0_callback_handler lps0_handler = {
+ .prepare_late_callback = amd_pmc_suspend,
+ .restore_early_callback = amd_pmc_resume,
+ .context = &pdev->dev,
+ };
+ acpi_unregister_lps0_callbacks(&lps0_handler);
amd_pmc_dbgfs_unregister(dev);
pci_dev_put(dev->rdev);
mutex_destroy(&dev->lock);
@@ -917,7 +928,6 @@ static struct platform_driver amd_pmc_driver = {
.driver = {
.name = "amd_pmc",
.acpi_match_table = amd_pmc_acpi_ids,
- .pm = &amd_pmc_pm_ops,
},
.probe = amd_pmc_probe,
.remove = amd_pmc_remove,
The `OS_HINT` message is supposed to indicate that everything else that is supposed to go into the deepest state has done so. This assumption is invalid as: 1) The CPUs will still go in and out of the deepest state 2) Other devices may still run their `noirq` suspend routines 3) The LPS0 ACPI device will still run To more closely mirror how this works on other operating systems, move the `amd-pmc` suspend to the very last thing before the s2idle loop via an lps0 callback. Fixes: 8d89835b0467 ("PM: suspend: Do not pause cpuidle in the suspend-to-idle path") Signed-off-by: Mario Limonciello <mario.limonciello@amd.com> --- changes from v1->v2: * adjust for changes in previous patches * display a debugging message for constraints drivers/platform/x86/amd-pmc.c | 42 +++++++++++++++++++++------------- 1 file changed, 26 insertions(+), 16 deletions(-)