1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 1994 Linus Torvalds
5 * Pentium III FXSR, SSE support
6 * General FPU state handling cleanups
7 * Gareth Hughes <gareth@valinux.com>, May 2000
9 #include <asm/fpu/api.h>
10 #include <asm/fpu/regset.h>
11 #include <asm/fpu/sched.h>
12 #include <asm/fpu/signal.h>
13 #include <asm/fpu/types.h>
14 #include <asm/traps.h>
15 #include <asm/irq_regs.h>
17 #include <linux/hardirq.h>
18 #include <linux/pkeys.h>
25 #define CREATE_TRACE_POINTS
26 #include <asm/trace/fpu.h>
29 DEFINE_STATIC_KEY_FALSE(__fpu_state_size_dynamic);
30 DEFINE_PER_CPU(u64, xfd_state);
33 /* The FPU state configuration data for kernel and user space */
34 struct fpu_state_config fpu_kernel_cfg __ro_after_init;
35 struct fpu_state_config fpu_user_cfg __ro_after_init;
38 * Represents the initial FPU state. It's mostly (but not completely) zeroes,
39 * depending on the FPU hardware format:
41 struct fpstate init_fpstate __ro_after_init;
44 * Track whether the kernel is using the FPU state
49 * - by IRQ context code to potentially use the FPU
52 * - to debug kernel_fpu_begin()/end() correctness
54 static DEFINE_PER_CPU(bool, in_kernel_fpu);
57 * Track which context is using the FPU on the CPU:
59 DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
61 static bool kernel_fpu_disabled(void)
63 return this_cpu_read(in_kernel_fpu);
66 static bool interrupted_kernel_fpu_idle(void)
68 return !kernel_fpu_disabled();
72 * Were we in user mode (or vm86 mode) when we were
75 * Doing kernel_fpu_begin/end() is ok if we are running
76 * in an interrupt context from user mode - we'll just
77 * save the FPU state as required.
79 static bool interrupted_user_mode(void)
81 struct pt_regs *regs = get_irq_regs();
82 return regs && user_mode(regs);
86 * Can we use the FPU in kernel mode with the
87 * whole "kernel_fpu_begin/end()" sequence?
89 * It's always ok in process context (ie "not interrupt")
90 * but it is sometimes ok even from an irq.
92 bool irq_fpu_usable(void)
94 return !in_interrupt() ||
95 interrupted_user_mode() ||
96 interrupted_kernel_fpu_idle();
98 EXPORT_SYMBOL(irq_fpu_usable);
101 * Save the FPU register state in fpu->fpstate->regs. The register state is
104 * Must be called with fpregs_lock() held.
106 * The legacy FNSAVE instruction clears all FPU state unconditionally, so
107 * register state has to be reloaded. That might be a pointless exercise
108 * when the FPU is going to be used by another task right after that. But
109 * this only affects 20+ years old 32bit systems and avoids conditionals all
112 * FXSAVE and all XSAVE variants preserve the FPU register state.
114 void save_fpregs_to_fpstate(struct fpu *fpu)
116 if (likely(use_xsave())) {
117 os_xsave(fpu->fpstate);
120 * AVX512 state is tracked here because its use is
121 * known to slow the max clock speed of the core.
123 if (fpu->fpstate->regs.xsave.header.xfeatures & XFEATURE_MASK_AVX512)
124 fpu->avx512_timestamp = jiffies;
128 if (likely(use_fxsr())) {
129 fxsave(&fpu->fpstate->regs.fxsave);
134 * Legacy FPU register saving, FNSAVE always clears FPU registers,
135 * so we have to reload them from the memory state.
137 asm volatile("fnsave %[fp]; fwait" : [fp] "=m" (fpu->fpstate->regs.fsave));
138 frstor(&fpu->fpstate->regs.fsave);
141 void restore_fpregs_from_fpstate(struct fpstate *fpstate, u64 mask)
144 * AMD K7/K8 and later CPUs up to Zen don't save/restore
145 * FDP/FIP/FOP unless an exception is pending. Clear the x87 state
146 * here by setting it to fixed values. "m" is a random variable
147 * that should be in L1.
149 if (unlikely(static_cpu_has_bug(X86_BUG_FXSAVE_LEAK))) {
153 "fildl %P[addr]" /* set F?P to defined value */
154 : : [addr] "m" (fpstate));
159 * Restoring state always needs to modify all features
160 * which are in @mask even if the current task cannot use
163 * So fpstate->xfeatures cannot be used here, because then
164 * a feature for which the task has no permission but was
165 * used by the previous task would not go into init state.
167 mask = fpu_kernel_cfg.max_features & mask;
169 os_xrstor(&fpstate->regs.xsave, mask);
172 fxrstor(&fpstate->regs.fxsave);
174 frstor(&fpstate->regs.fsave);
178 void fpu_reset_from_exception_fixup(void)
180 restore_fpregs_from_fpstate(&init_fpstate, XFEATURE_MASK_FPSTATE);
183 #if IS_ENABLED(CONFIG_KVM)
184 static void __fpstate_reset(struct fpstate *fpstate);
186 bool fpu_alloc_guest_fpstate(struct fpu_guest *gfpu)
188 struct fpstate *fpstate;
191 size = fpu_user_cfg.default_size + ALIGN(offsetof(struct fpstate, regs), 64);
192 fpstate = vzalloc(size);
196 __fpstate_reset(fpstate);
197 fpstate_init_user(fpstate);
198 fpstate->is_valloc = true;
199 fpstate->is_guest = true;
201 gfpu->fpstate = fpstate;
204 EXPORT_SYMBOL_GPL(fpu_alloc_guest_fpstate);
206 void fpu_free_guest_fpstate(struct fpu_guest *gfpu)
208 struct fpstate *fps = gfpu->fpstate;
213 if (WARN_ON_ONCE(!fps->is_valloc || !fps->is_guest || fps->in_use))
216 gfpu->fpstate = NULL;
219 EXPORT_SYMBOL_GPL(fpu_free_guest_fpstate);
221 int fpu_swap_kvm_fpstate(struct fpu_guest *guest_fpu, bool enter_guest)
223 struct fpstate *guest_fps = guest_fpu->fpstate;
224 struct fpu *fpu = ¤t->thread.fpu;
225 struct fpstate *cur_fps = fpu->fpstate;
228 if (!cur_fps->is_confidential && !test_thread_flag(TIF_NEED_FPU_LOAD))
229 save_fpregs_to_fpstate(fpu);
233 fpu->__task_fpstate = cur_fps;
234 fpu->fpstate = guest_fps;
235 guest_fps->in_use = true;
237 guest_fps->in_use = false;
238 fpu->fpstate = fpu->__task_fpstate;
239 fpu->__task_fpstate = NULL;
242 cur_fps = fpu->fpstate;
244 if (!cur_fps->is_confidential)
245 restore_fpregs_from_fpstate(cur_fps, XFEATURE_MASK_FPSTATE);
247 fpregs_mark_activate();
251 EXPORT_SYMBOL_GPL(fpu_swap_kvm_fpstate);
253 void fpu_copy_guest_fpstate_to_uabi(struct fpu_guest *gfpu, void *buf,
254 unsigned int size, u32 pkru)
256 struct fpstate *kstate = gfpu->fpstate;
257 union fpregs_state *ustate = buf;
258 struct membuf mb = { .p = buf, .left = size };
260 if (cpu_feature_enabled(X86_FEATURE_XSAVE)) {
261 __copy_xstate_to_uabi_buf(mb, kstate, pkru, XSTATE_COPY_XSAVE);
263 memcpy(&ustate->fxsave, &kstate->regs.fxsave,
264 sizeof(ustate->fxsave));
265 /* Make it restorable on a XSAVE enabled host */
266 ustate->xsave.header.xfeatures = XFEATURE_MASK_FPSSE;
269 EXPORT_SYMBOL_GPL(fpu_copy_guest_fpstate_to_uabi);
271 int fpu_copy_uabi_to_guest_fpstate(struct fpu_guest *gfpu, const void *buf,
272 u64 xcr0, u32 *vpkru)
274 struct fpstate *kstate = gfpu->fpstate;
275 const union fpregs_state *ustate = buf;
276 struct pkru_state *xpkru;
279 if (!cpu_feature_enabled(X86_FEATURE_XSAVE)) {
280 if (ustate->xsave.header.xfeatures & ~XFEATURE_MASK_FPSSE)
282 if (ustate->fxsave.mxcsr & ~mxcsr_feature_mask)
284 memcpy(&kstate->regs.fxsave, &ustate->fxsave, sizeof(ustate->fxsave));
288 if (ustate->xsave.header.xfeatures & ~xcr0)
291 ret = copy_uabi_from_kernel_to_xstate(kstate, ustate);
295 /* Retrieve PKRU if not in init state */
296 if (kstate->regs.xsave.header.xfeatures & XFEATURE_MASK_PKRU) {
297 xpkru = get_xsave_addr(&kstate->regs.xsave, XFEATURE_PKRU);
298 *vpkru = xpkru->pkru;
301 /* Ensure that XCOMP_BV is set up for XSAVES */
302 xstate_init_xcomp_bv(&kstate->regs.xsave, kstate->xfeatures);
305 EXPORT_SYMBOL_GPL(fpu_copy_uabi_to_guest_fpstate);
306 #endif /* CONFIG_KVM */
308 void kernel_fpu_begin_mask(unsigned int kfpu_mask)
312 WARN_ON_FPU(!irq_fpu_usable());
313 WARN_ON_FPU(this_cpu_read(in_kernel_fpu));
315 this_cpu_write(in_kernel_fpu, true);
317 if (!(current->flags & PF_KTHREAD) &&
318 !test_thread_flag(TIF_NEED_FPU_LOAD)) {
319 set_thread_flag(TIF_NEED_FPU_LOAD);
320 save_fpregs_to_fpstate(¤t->thread.fpu);
322 __cpu_invalidate_fpregs_state();
324 /* Put sane initial values into the control registers. */
325 if (likely(kfpu_mask & KFPU_MXCSR) && boot_cpu_has(X86_FEATURE_XMM))
326 ldmxcsr(MXCSR_DEFAULT);
328 if (unlikely(kfpu_mask & KFPU_387) && boot_cpu_has(X86_FEATURE_FPU))
329 asm volatile ("fninit");
331 EXPORT_SYMBOL_GPL(kernel_fpu_begin_mask);
333 void kernel_fpu_end(void)
335 WARN_ON_FPU(!this_cpu_read(in_kernel_fpu));
337 this_cpu_write(in_kernel_fpu, false);
340 EXPORT_SYMBOL_GPL(kernel_fpu_end);
343 * Sync the FPU register state to current's memory register state when the
344 * current task owns the FPU. The hardware register state is preserved.
346 void fpu_sync_fpstate(struct fpu *fpu)
348 WARN_ON_FPU(fpu != ¤t->thread.fpu);
351 trace_x86_fpu_before_save(fpu);
353 if (!test_thread_flag(TIF_NEED_FPU_LOAD))
354 save_fpregs_to_fpstate(fpu);
356 trace_x86_fpu_after_save(fpu);
360 static inline unsigned int init_fpstate_copy_size(void)
363 return fpu_kernel_cfg.default_size;
365 /* XSAVE(S) just needs the legacy and the xstate header part */
366 return sizeof(init_fpstate.regs.xsave);
369 static inline void fpstate_init_fxstate(struct fpstate *fpstate)
371 fpstate->regs.fxsave.cwd = 0x37f;
372 fpstate->regs.fxsave.mxcsr = MXCSR_DEFAULT;
376 * Legacy x87 fpstate state init:
378 static inline void fpstate_init_fstate(struct fpstate *fpstate)
380 fpstate->regs.fsave.cwd = 0xffff037fu;
381 fpstate->regs.fsave.swd = 0xffff0000u;
382 fpstate->regs.fsave.twd = 0xffffffffu;
383 fpstate->regs.fsave.fos = 0xffff0000u;
387 * Used in two places:
388 * 1) Early boot to setup init_fpstate for non XSAVE systems
389 * 2) fpu_init_fpstate_user() which is invoked from KVM
391 void fpstate_init_user(struct fpstate *fpstate)
393 if (!cpu_feature_enabled(X86_FEATURE_FPU)) {
394 fpstate_init_soft(&fpstate->regs.soft);
398 xstate_init_xcomp_bv(&fpstate->regs.xsave, fpstate->xfeatures);
400 if (cpu_feature_enabled(X86_FEATURE_FXSR))
401 fpstate_init_fxstate(fpstate);
403 fpstate_init_fstate(fpstate);
406 static void __fpstate_reset(struct fpstate *fpstate)
408 /* Initialize sizes and feature masks */
409 fpstate->size = fpu_kernel_cfg.default_size;
410 fpstate->user_size = fpu_user_cfg.default_size;
411 fpstate->xfeatures = fpu_kernel_cfg.default_features;
412 fpstate->user_xfeatures = fpu_user_cfg.default_features;
413 fpstate->xfd = init_fpstate.xfd;
416 void fpstate_reset(struct fpu *fpu)
418 /* Set the fpstate pointer to the default fpstate */
419 fpu->fpstate = &fpu->__fpstate;
420 __fpstate_reset(fpu->fpstate);
422 /* Initialize the permission related info in fpu */
423 fpu->perm.__state_perm = fpu_kernel_cfg.default_features;
424 fpu->perm.__state_size = fpu_kernel_cfg.default_size;
425 fpu->perm.__user_state_size = fpu_user_cfg.default_size;
428 static inline void fpu_inherit_perms(struct fpu *dst_fpu)
430 if (fpu_state_size_dynamic()) {
431 struct fpu *src_fpu = ¤t->group_leader->thread.fpu;
433 spin_lock_irq(¤t->sighand->siglock);
434 /* Fork also inherits the permissions of the parent */
435 dst_fpu->perm = src_fpu->perm;
436 spin_unlock_irq(¤t->sighand->siglock);
440 /* Clone current's FPU state on fork */
441 int fpu_clone(struct task_struct *dst, unsigned long clone_flags)
443 struct fpu *src_fpu = ¤t->thread.fpu;
444 struct fpu *dst_fpu = &dst->thread.fpu;
446 /* The new task's FPU state cannot be valid in the hardware. */
447 dst_fpu->last_cpu = -1;
449 fpstate_reset(dst_fpu);
451 if (!cpu_feature_enabled(X86_FEATURE_FPU))
455 * Enforce reload for user space tasks and prevent kernel threads
456 * from trying to save the FPU registers on context switch.
458 set_tsk_thread_flag(dst, TIF_NEED_FPU_LOAD);
461 * No FPU state inheritance for kernel threads and IO
464 if (dst->flags & (PF_KTHREAD | PF_IO_WORKER)) {
465 /* Clear out the minimal state */
466 memcpy(&dst_fpu->fpstate->regs, &init_fpstate.regs,
467 init_fpstate_copy_size());
472 * Save the default portion of the current FPU state into the
473 * clone. Assume all dynamic features to be defined as caller-
474 * saved, which enables skipping both the expansion of fpstate
475 * and the copying of any dynamic state.
477 * Do not use memcpy() when TIF_NEED_FPU_LOAD is set because
478 * copying is not valid when current uses non-default states.
481 if (test_thread_flag(TIF_NEED_FPU_LOAD))
482 fpregs_restore_userregs();
483 save_fpregs_to_fpstate(dst_fpu);
484 if (!(clone_flags & CLONE_THREAD))
485 fpu_inherit_perms(dst_fpu);
488 trace_x86_fpu_copy_src(src_fpu);
489 trace_x86_fpu_copy_dst(dst_fpu);
495 * Whitelist the FPU register state embedded into task_struct for hardened
498 void fpu_thread_struct_whitelist(unsigned long *offset, unsigned long *size)
500 *offset = offsetof(struct thread_struct, fpu.__fpstate.regs);
501 *size = fpu_kernel_cfg.default_size;
505 * Drops current FPU state: deactivates the fpregs and
506 * the fpstate. NOTE: it still leaves previous contents
507 * in the fpregs in the eager-FPU case.
509 * This function can be used in cases where we know that
510 * a state-restore is coming: either an explicit one,
513 void fpu__drop(struct fpu *fpu)
517 if (fpu == ¤t->thread.fpu) {
518 /* Ignore delayed exceptions from user space */
519 asm volatile("1: fwait\n"
521 _ASM_EXTABLE(1b, 2b));
522 fpregs_deactivate(fpu);
525 trace_x86_fpu_dropped(fpu);
531 * Clear FPU registers by setting them up from the init fpstate.
532 * Caller must do fpregs_[un]lock() around it.
534 static inline void restore_fpregs_from_init_fpstate(u64 features_mask)
537 os_xrstor(&init_fpstate.regs.xsave, features_mask);
539 fxrstor(&init_fpstate.regs.fxsave);
541 frstor(&init_fpstate.regs.fsave);
543 pkru_write_default();
547 * Reset current->fpu memory state to the init values.
549 static void fpu_reset_fpregs(void)
551 struct fpu *fpu = ¤t->thread.fpu;
556 * This does not change the actual hardware registers. It just
557 * resets the memory image and sets TIF_NEED_FPU_LOAD so a
558 * subsequent return to usermode will reload the registers from the
559 * task's memory image.
561 * Do not use fpstate_init() here. Just copy init_fpstate which has
562 * the correct content already except for PKRU.
564 * PKRU handling does not rely on the xstate when restoring for
565 * user space as PKRU is eagerly written in switch_to() and
568 memcpy(&fpu->fpstate->regs, &init_fpstate.regs, init_fpstate_copy_size());
569 set_thread_flag(TIF_NEED_FPU_LOAD);
574 * Reset current's user FPU states to the init states. current's
575 * supervisor states, if any, are not modified by this function. The
576 * caller guarantees that the XSTATE header in memory is intact.
578 void fpu__clear_user_states(struct fpu *fpu)
580 WARN_ON_FPU(fpu != ¤t->thread.fpu);
583 if (!cpu_feature_enabled(X86_FEATURE_FPU)) {
590 * Ensure that current's supervisor states are loaded into their
591 * corresponding registers.
593 if (xfeatures_mask_supervisor() &&
594 !fpregs_state_valid(fpu, smp_processor_id())) {
595 os_xrstor(&fpu->fpstate->regs.xsave, xfeatures_mask_supervisor());
598 /* Reset user states in registers. */
599 restore_fpregs_from_init_fpstate(XFEATURE_MASK_USER_RESTORE);
602 * Now all FPU registers have their desired values. Inform the FPU
603 * state machine that current's FPU registers are in the hardware
604 * registers. The memory image does not need to be updated because
605 * any operation relying on it has to save the registers first when
606 * current's FPU is marked active.
608 fpregs_mark_activate();
612 void fpu_flush_thread(void)
614 fpstate_reset(¤t->thread.fpu);
618 * Load FPU context before returning to userspace.
620 void switch_fpu_return(void)
622 if (!static_cpu_has(X86_FEATURE_FPU))
625 fpregs_restore_userregs();
627 EXPORT_SYMBOL_GPL(switch_fpu_return);
629 #ifdef CONFIG_X86_DEBUG_FPU
631 * If current FPU state according to its tracking (loaded FPU context on this
632 * CPU) is not valid then we must have TIF_NEED_FPU_LOAD set so the context is
633 * loaded on return to userland.
635 void fpregs_assert_state_consistent(void)
637 struct fpu *fpu = ¤t->thread.fpu;
639 if (test_thread_flag(TIF_NEED_FPU_LOAD))
642 WARN_ON_FPU(!fpregs_state_valid(fpu, smp_processor_id()));
644 EXPORT_SYMBOL_GPL(fpregs_assert_state_consistent);
647 void fpregs_mark_activate(void)
649 struct fpu *fpu = ¤t->thread.fpu;
651 fpregs_activate(fpu);
652 fpu->last_cpu = smp_processor_id();
653 clear_thread_flag(TIF_NEED_FPU_LOAD);
657 * x87 math exception handling:
660 int fpu__exception_code(struct fpu *fpu, int trap_nr)
664 if (trap_nr == X86_TRAP_MF) {
665 unsigned short cwd, swd;
667 * (~cwd & swd) will mask out exceptions that are not set to unmasked
668 * status. 0x3f is the exception bits in these regs, 0x200 is the
669 * C1 reg you need in case of a stack fault, 0x040 is the stack
670 * fault bit. We should only be taking one exception at a time,
671 * so if this combination doesn't produce any single exception,
672 * then we have a bad program that isn't synchronizing its FPU usage
673 * and it will suffer the consequences since we won't be able to
674 * fully reproduce the context of the exception.
676 if (boot_cpu_has(X86_FEATURE_FXSR)) {
677 cwd = fpu->fpstate->regs.fxsave.cwd;
678 swd = fpu->fpstate->regs.fxsave.swd;
680 cwd = (unsigned short)fpu->fpstate->regs.fsave.cwd;
681 swd = (unsigned short)fpu->fpstate->regs.fsave.swd;
687 * The SIMD FPU exceptions are handled a little differently, as there
688 * is only a single status/control register. Thus, to determine which
689 * unmasked exception was caught we must mask the exception mask bits
690 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
692 unsigned short mxcsr = MXCSR_DEFAULT;
694 if (boot_cpu_has(X86_FEATURE_XMM))
695 mxcsr = fpu->fpstate->regs.fxsave.mxcsr;
697 err = ~(mxcsr >> 7) & mxcsr;
700 if (err & 0x001) { /* Invalid op */
702 * swd & 0x240 == 0x040: Stack Underflow
703 * swd & 0x240 == 0x240: Stack Overflow
704 * User must clear the SF bit (0x40) if set
707 } else if (err & 0x004) { /* Divide by Zero */
709 } else if (err & 0x008) { /* Overflow */
711 } else if (err & 0x012) { /* Denormal, Underflow */
713 } else if (err & 0x020) { /* Precision */
718 * If we're using IRQ 13, or supposedly even some trap
719 * X86_TRAP_MF implementations, it's possible
720 * we get a spurious trap, which is not an error.