@@ -54,8 +54,13 @@
/*
* Initialize the stackprotector canary value.
*
- * NOTE: this must only be called from functions that never return,
+ * NOTE: this must only be called from functions that never return
* and it must always be inlined.
+ *
+ * In addition, it should be called from a compilation unit for which
+ * stack protector is disabled. Alternatively, the caller should not end
+ * with a function call which gets tail-call optimized as that would
+ * lead to checking a modified canary value.
*/
static __always_inline void boot_init_stack_canary(void)
{
@@ -243,6 +243,14 @@ static void notrace start_secondary(void
wmb();
cpu_startup_entry(CPUHP_ONLINE);
+
+ /*
+ * Prevent tail call to cpu_startup_entry() because the stack protector
+ * guard has been changed a couple of function calls up, in
+ * boot_init_stack_canary() and must not be checked before tail calling
+ * another function.
+ */
+ prevent_tail_call_optimization();
}
void __init smp_store_boot_cpu_info(void)
@@ -116,6 +116,7 @@ asmlinkage __visible void cpu_bringup_an
#endif
cpu_bringup();
cpu_startup_entry(CPUHP_ONLINE);
+ prevent_tail_call_optimization();
}
static void xen_smp_intr_free(unsigned int cpu)
@@ -556,4 +556,11 @@ static __always_inline void __write_once
# define __kprobes
# define nokprobe_inline inline
#endif
+
+/*
+ * This is needed in functions which generate the stack canary, see
+ * arch/x86/kernel/smpboot.c::start_secondary() for an example.
+ */
+#define prevent_tail_call_optimization() mb()
+
#endif /* __LINUX_COMPILER_H */
@@ -683,6 +683,8 @@ asmlinkage __visible void __init start_k
/* Do the rest non-__init'ed, we're now alive */
rest_init();
+
+ prevent_tail_call_optimization();
}
/* Call all constructor functions linked into the kernel. */