@@ -86,6 +86,8 @@ struct lpi_device_constraint_amd {
int min_dstate;
};
+static LIST_HEAD(lps0_s2idle_devops_head);
+
static struct lpi_constraints *lpi_constraints_table;
static int lpi_constraints_table_size;
static int rev_id;
@@ -444,6 +446,8 @@ static struct acpi_scan_handler lps0_handler = {
int acpi_s2idle_prepare_late(void)
{
+ struct acpi_s2idle_dev_ops *handler;
+
if (!lps0_device_handle || sleep_no_lps0)
return 0;
@@ -474,14 +478,26 @@ int acpi_s2idle_prepare_late(void)
acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_ENTRY,
lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
}
+
+ list_for_each_entry(handler, &lps0_s2idle_devops_head, list_node) {
+ if (handler->prepare)
+ handler->prepare();
+ }
+
return 0;
}
void acpi_s2idle_restore_early(void)
{
+ struct acpi_s2idle_dev_ops *handler;
+
if (!lps0_device_handle || sleep_no_lps0)
return;
+ list_for_each_entry(handler, &lps0_s2idle_devops_head, list_node)
+ if (handler->restore)
+ handler->restore();
+
/* Modern standby exit */
if (lps0_dsm_func_mask_microsoft > 0)
acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_EXIT,
@@ -524,4 +540,28 @@ void acpi_s2idle_setup(void)
s2idle_set_ops(&acpi_s2idle_ops_lps0);
}
+int acpi_register_lps0_dev(struct acpi_s2idle_dev_ops *arg)
+{
+ if (!lps0_device_handle || sleep_no_lps0)
+ return -ENODEV;
+
+ lock_system_sleep();
+ list_add(&arg->list_node, &lps0_s2idle_devops_head);
+ unlock_system_sleep();
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(acpi_register_lps0_dev);
+
+void acpi_unregister_lps0_dev(struct acpi_s2idle_dev_ops *arg)
+{
+ if (!lps0_device_handle || sleep_no_lps0)
+ return;
+
+ lock_system_sleep();
+ list_del(&arg->list_node);
+ unlock_system_sleep();
+}
+EXPORT_SYMBOL_GPL(acpi_unregister_lps0_dev);
+
#endif /* CONFIG_SUSPEND */
@@ -1023,7 +1023,15 @@ void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state,
acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state,
u32 val_a, u32 val_b);
-
+#ifdef CONFIG_X86
+struct acpi_s2idle_dev_ops {
+ struct list_head list_node;
+ void (*prepare)(void);
+ void (*restore)(void);
+};
+int acpi_register_lps0_dev(struct acpi_s2idle_dev_ops *arg);
+void acpi_unregister_lps0_dev(struct acpi_s2idle_dev_ops *arg);
+#endif /* CONFIG_X86 */
#ifndef CONFIG_IA64
void arch_reserve_mem_area(acpi_physical_address addr, size_t size);
#else
Currenty the latest thing run during a suspend to idle attempt is the LPS0 `prepare_late` callback and the earliest thing is the `resume_early` callback. There is a desire for the `amd-pmc` driver to suspend later in the suspend process (ideally the very last thing), so create a callback that it or any other driver can hook into to do this. Signed-off-by: Mario Limonciello <mario.limonciello@amd.com> --- changes from v4->v5: * fix a check for handler->prepare Reported by Dan Carpenter <dan.carpenter@oracle.com> changes from v3->v4: * drop use of mutex, use lock_system_sleep instead * don't pass argument of context, driver will maintain this internally * don't allow failing prepare stage * don't allow unregistering if sleep_no_lps0 is set changes from v2->v3: * Check that callbacks exist before calling changes from v1->v2: * Change register/unregister arguments to be struct drivers/acpi/x86/s2idle.c | 40 +++++++++++++++++++++++++++++++++++++++ include/linux/acpi.h | 10 +++++++++- 2 files changed, 49 insertions(+), 1 deletion(-)