@@ -86,6 +86,9 @@ struct lpi_device_constraint_amd {
int min_dstate;
};
+static LIST_HEAD(lps0_callback_handler_head);
+static DEFINE_MUTEX(lps0_callback_handler_mutex);
+
static struct lpi_constraints *lpi_constraints_table;
static int lpi_constraints_table_size;
static int rev_id;
@@ -444,6 +447,9 @@ static struct acpi_scan_handler lps0_handler = {
int acpi_s2idle_prepare_late(void)
{
+ struct lps0_callback_handler *handler;
+ int rc = 0;
+
if (!lps0_device_handle || sleep_no_lps0)
return 0;
@@ -474,14 +480,31 @@ int acpi_s2idle_prepare_late(void)
acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_ENTRY,
lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
}
- return 0;
+
+ mutex_lock(&lps0_callback_handler_mutex);
+ list_for_each_entry(handler, &lps0_callback_handler_head, list_node) {
+ rc = handler->prepare_late_callback(handler->context);
+ if (rc)
+ goto out;
+ }
+out:
+ mutex_unlock(&lps0_callback_handler_mutex);
+
+ return rc;
}
void acpi_s2idle_restore_early(void)
{
+ struct lps0_callback_handler *handler;
+
if (!lps0_device_handle || sleep_no_lps0)
return;
+ mutex_lock(&lps0_callback_handler_mutex);
+ list_for_each_entry(handler, &lps0_callback_handler_head, list_node)
+ handler->restore_early_callback(handler->context);
+ mutex_unlock(&lps0_callback_handler_mutex);
+
/* Modern standby exit */
if (lps0_dsm_func_mask_microsoft > 0)
acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_EXIT,
@@ -524,4 +547,44 @@ void acpi_s2idle_setup(void)
s2idle_set_ops(&acpi_s2idle_ops_lps0);
}
+int acpi_register_lps0_callbacks(struct lps0_callback_handler *arg)
+{
+ struct lps0_callback_handler *handler;
+
+ if (!lps0_device_handle || sleep_no_lps0)
+ return -ENODEV;
+
+ handler = kmalloc(sizeof(*handler), GFP_KERNEL);
+ if (!handler)
+ return -ENOMEM;
+ handler->prepare_late_callback = arg->prepare_late_callback;
+ handler->restore_early_callback = arg->restore_early_callback;
+ handler->context = arg->context;
+
+ mutex_lock(&lps0_callback_handler_mutex);
+ list_add(&handler->list_node, &lps0_callback_handler_head);
+ mutex_unlock(&lps0_callback_handler_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(acpi_register_lps0_callbacks);
+
+void acpi_unregister_lps0_callbacks(struct lps0_callback_handler *arg)
+{
+ struct lps0_callback_handler *handler;
+
+ mutex_lock(&lps0_callback_handler_mutex);
+ list_for_each_entry(handler, &lps0_callback_handler_head, list_node) {
+ if (handler->prepare_late_callback == arg->prepare_late_callback &&
+ handler->restore_early_callback == arg->restore_early_callback &&
+ handler->context == arg->context) {
+ list_del(&handler->list_node);
+ kfree(handler);
+ break;
+ }
+ }
+ mutex_unlock(&lps0_callback_handler_mutex);
+}
+EXPORT_SYMBOL_GPL(acpi_unregister_lps0_callbacks);
+
#endif /* CONFIG_SUSPEND */
@@ -1023,7 +1023,16 @@ void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state,
acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state,
u32 val_a, u32 val_b);
-
+#ifdef CONFIG_X86
+struct lps0_callback_handler {
+ struct list_head list_node;
+ int (*prepare_late_callback)(void *context);
+ void (*restore_early_callback)(void *context);
+ void *context;
+};
+int acpi_register_lps0_callbacks(struct lps0_callback_handler *arg);
+void acpi_unregister_lps0_callbacks(struct lps0_callback_handler *arg);
+#endif /* CONFIG_X86 */
#ifndef CONFIG_IA64
void arch_reserve_mem_area(acpi_physical_address addr, size_t size);
#else
Currenty the latest thing run during a suspend to idle attempt is the LPS0 `prepare_late` callback and the earliest thing is the `resume_early` callback. There is a desire for the `amd-pmc` driver to suspend later in the suspend process (ideally the very last thing), so create a callback that it or any other driver can hook into to do this. Signed-off-by: Mario Limonciello <mario.limonciello@amd.com> --- changes from v1->v2: * Change register/unregister arguments to be struct drivers/acpi/x86/s2idle.c | 65 ++++++++++++++++++++++++++++++++++++++- include/linux/acpi.h | 11 ++++++- 2 files changed, 74 insertions(+), 2 deletions(-)