@@ -26,10 +26,11 @@
#define KFPU_MXCSR _BITUL(1) /* MXCSR will be initialized */
extern void kernel_fpu_begin_mask(unsigned int kfpu_mask);
extern void kernel_fpu_end(void);
extern bool irq_fpu_usable(void);
+extern void fpu_save_state(void);
extern void fpregs_mark_activate(void);
/* Code that is unaware of kernel_fpu_begin_mask() can use this */
static inline void kernel_fpu_begin(void)
{
@@ -118,11 +118,11 @@ static void update_avx_timestamp(struct fpu *fpu)
/*
* Save the FPU register state in fpu->fpstate->regs. The register state is
* preserved.
*
- * Must be called with fpregs_lock() held.
+ * Must be called with fpregs_lock() held or hardirqs disabled.
*
* The legacy FNSAVE instruction clears all FPU state unconditionally, so
* register state has to be reloaded. That might be a pointless exercise
* when the FPU is going to be used by another task right after that. But
* this only affects 20+ years old 32bit systems and avoids conditionals all
@@ -431,26 +431,31 @@ int fpu_copy_uabi_to_guest_fpstate(struct fpu_guest *gfpu, const void *buf,
return copy_uabi_from_kernel_to_xstate(kstate, ustate, vpkru);
}
EXPORT_SYMBOL_GPL(fpu_copy_uabi_to_guest_fpstate);
#endif /* CONFIG_KVM */
+static __always_inline void __fpu_save_state(void)
+{
+ if (!(current->flags & (PF_KTHREAD | PF_USER_WORKER)) &&
+ !test_thread_flag(TIF_NEED_FPU_LOAD)) {
+ set_thread_flag(TIF_NEED_FPU_LOAD);
+ save_fpregs_to_fpstate(x86_task_fpu(current));
+ }
+ __cpu_invalidate_fpregs_state();
+}
+
void kernel_fpu_begin_mask(unsigned int kfpu_mask)
{
if (!irqs_disabled())
fpregs_lock();
WARN_ON_FPU(!irq_fpu_usable());
WARN_ON_FPU(this_cpu_read(in_kernel_fpu));
this_cpu_write(in_kernel_fpu, true);
- if (!(current->flags & (PF_KTHREAD | PF_USER_WORKER)) &&
- !test_thread_flag(TIF_NEED_FPU_LOAD)) {
- set_thread_flag(TIF_NEED_FPU_LOAD);
- save_fpregs_to_fpstate(x86_task_fpu(current));
- }
- __cpu_invalidate_fpregs_state();
+ __fpu_save_state();
/* Put sane initial values into the control registers. */
if (likely(kfpu_mask & KFPU_MXCSR) && boot_cpu_has(X86_FEATURE_XMM))
ldmxcsr(MXCSR_DEFAULT);
@@ -467,10 +472,34 @@ void kernel_fpu_end(void)
if (!irqs_disabled())
fpregs_unlock();
}
EXPORT_SYMBOL_GPL(kernel_fpu_end);
+#ifdef CONFIG_PM_SLEEP
+/*
+ * If the FPU registers are live for the current task, save them to current's
+ * memory register state and set TIF_NEED_FPU_LOAD. This is used by the suspend
+ * and kexec code to prepare for the FPU registers being clobbered. Unlike
+ * kernel_fpu_begin(), this function can be called with hardirqs disabled, and
+ * it does not initialize the FPU control registers for kernel-mode FPU use.
+ */
+void fpu_save_state(void)
+{
+ unsigned long flags;
+
+ WARN_ON_FPU(this_cpu_read(in_kernel_fpu));
+
+ /*
+ * This is sometimes called with hardirqs disabled, so we need to use
+ * local_irq_save/restore() instead of fpregs_lock/unlock().
+ */
+ local_irq_save(flags);
+ __fpu_save_state();
+ local_irq_restore(flags);
+}
+#endif /* CONFIG_PM_SLEEP */
+
/*
* Sync the FPU register state to current's memory register state when the
* current task owns the FPU. The hardware register state is preserved.
*/
void fpu_sync_fpstate(struct fpu *fpu)