@@ -639,9 +639,9 @@ static int amd_pmc_verify_czn_rtc(struct amd_pmc_dev *pdev, u32 *arg)
return rc;
}
-static int __maybe_unused amd_pmc_suspend(struct device *dev)
+static int amd_pmc_suspend(void *context, bool constraints_met)
{
- struct amd_pmc_dev *pdev = dev_get_drvdata(dev);
+ struct amd_pmc_dev *pdev = dev_get_drvdata((struct device *)context);
int rc;
u8 msg;
u32 arg = 1;
@@ -658,7 +658,7 @@ static int __maybe_unused amd_pmc_suspend(struct device *dev)
}
/* Dump the IdleMask before we send hint to SMU */
- amd_pmc_idlemask_read(pdev, dev, NULL);
+ amd_pmc_idlemask_read(pdev, pdev->dev, NULL);
msg = amd_pmc_get_os_hint(pdev);
rc = amd_pmc_send_cmd(pdev, arg, NULL, msg, 0);
if (rc) {
@@ -681,28 +681,28 @@ static int __maybe_unused amd_pmc_suspend(struct device *dev)
return rc;
}
-static int __maybe_unused amd_pmc_resume(struct device *dev)
+static void amd_pmc_resume(void *context)
{
- struct amd_pmc_dev *pdev = dev_get_drvdata(dev);
+ struct amd_pmc_dev *pdev = dev_get_drvdata((struct device *)context);
int rc;
u8 msg;
msg = amd_pmc_get_os_hint(pdev);
rc = amd_pmc_send_cmd(pdev, 0, NULL, msg, 0);
if (rc)
- dev_err(pdev->dev, "resume failed\n");
+ dev_err(pdev->dev, "resume failed: %d\n", rc);
/* Let SMU know that we are looking for stats */
amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_DUMP_DATA, 0);
/* Dump the IdleMask to see the blockers */
- amd_pmc_idlemask_read(pdev, dev, NULL);
+ amd_pmc_idlemask_read(pdev, pdev->dev, NULL);
/* Write data incremented by 1 to distinguish in stb_read */
if (enable_stb)
rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_PREDEF + 1);
if (rc)
- dev_err(pdev->dev, "error writing to STB\n");
+ dev_err(pdev->dev, "error writing to STB: %d\n", rc);
/* Restore the QoS request back to defaults if it was set */
if (pdev->cpu_id == AMD_CPU_ID_CZN)
@@ -711,15 +711,8 @@ static int __maybe_unused amd_pmc_resume(struct device *dev)
/* Notify on failed entry */
amd_pmc_validate_deepest(pdev);
-
- return rc;
}
-static const struct dev_pm_ops amd_pmc_pm_ops = {
- .suspend_noirq = amd_pmc_suspend,
- .resume_noirq = amd_pmc_resume,
-};
-
static const struct pci_device_id pmc_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_YC) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_CZN) },
@@ -884,6 +877,12 @@ static int amd_pmc_probe(struct platform_device *pdev)
amd_pmc_get_smu_version(dev);
platform_set_drvdata(pdev, dev);
+ err = acpi_register_lps0_callbacks(amd_pmc_suspend,
+ amd_pmc_resume,
+ &pdev->dev);
+ if (err)
+ goto err_pci_dev_put;
+
amd_pmc_dbgfs_register(dev);
cpu_latency_qos_add_request(&dev->amd_pmc_pm_qos_req, PM_QOS_DEFAULT_VALUE);
return 0;
@@ -897,6 +896,9 @@ static int amd_pmc_remove(struct platform_device *pdev)
{
struct amd_pmc_dev *dev = platform_get_drvdata(pdev);
+ acpi_unregister_lps0_callbacks(amd_pmc_suspend,
+ amd_pmc_resume,
+ &pdev->dev);
amd_pmc_dbgfs_unregister(dev);
pci_dev_put(dev->rdev);
mutex_destroy(&dev->lock);
@@ -917,7 +919,6 @@ static struct platform_driver amd_pmc_driver = {
.driver = {
.name = "amd_pmc",
.acpi_match_table = amd_pmc_acpi_ids,
- .pm = &amd_pmc_pm_ops,
},
.probe = amd_pmc_probe,
.remove = amd_pmc_remove,
The `OS_HINT` message is supposed to indicate that everything else that is supposed to go into the deepest state has done so. This assumption is invalid as: 1) The CPUs will still go in and out of the deepest state 2) Other devices may still run their `noirq` suspend routines 3) The LPS0 ACPI device will still run To more closely mirror how this works on other operating systems, move the `amd-pmc` suspend to the very last thing before the s2idle loop via an lps0 callback. Fixes: 8d89835b0467 ("PM: suspend: Do not pause cpuidle in the suspend-to-idle path") Signed-off-by: Mario Limonciello <mario.limonciello@amd.com> --- drivers/platform/x86/amd-pmc.c | 33 +++++++++++++++++---------------- 1 file changed, 17 insertions(+), 16 deletions(-)