x86/fpu: Add XFD state to fpstate
[sfrench/cifs-2.6.git] / arch / x86 / kernel / fpu / core.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  Copyright (C) 1994 Linus Torvalds
4  *
5  *  Pentium III FXSR, SSE support
6  *  General FPU state handling cleanups
7  *      Gareth Hughes <gareth@valinux.com>, May 2000
8  */
9 #include <asm/fpu/api.h>
10 #include <asm/fpu/regset.h>
11 #include <asm/fpu/sched.h>
12 #include <asm/fpu/signal.h>
13 #include <asm/fpu/types.h>
14 #include <asm/traps.h>
15 #include <asm/irq_regs.h>
16
17 #include <linux/hardirq.h>
18 #include <linux/pkeys.h>
19
20 #include "context.h"
21 #include "internal.h"
22 #include "legacy.h"
23 #include "xstate.h"
24
25 #define CREATE_TRACE_POINTS
26 #include <asm/trace/fpu.h>
27
28 #ifdef CONFIG_X86_64
29 DEFINE_STATIC_KEY_FALSE(__fpu_state_size_dynamic);
30 DEFINE_PER_CPU(u64, xfd_state);
31 #endif
32
33 /* The FPU state configuration data for kernel and user space */
34 struct fpu_state_config fpu_kernel_cfg __ro_after_init;
35 struct fpu_state_config fpu_user_cfg __ro_after_init;
36
37 /*
38  * Represents the initial FPU state. It's mostly (but not completely) zeroes,
39  * depending on the FPU hardware format:
40  */
41 struct fpstate init_fpstate __ro_after_init;
42
43 /*
44  * Track whether the kernel is using the FPU state
45  * currently.
46  *
47  * This flag is used:
48  *
49  *   - by IRQ context code to potentially use the FPU
50  *     if it's unused.
51  *
52  *   - to debug kernel_fpu_begin()/end() correctness
53  */
54 static DEFINE_PER_CPU(bool, in_kernel_fpu);
55
56 /*
57  * Track which context is using the FPU on the CPU:
58  */
59 DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
60
61 static bool kernel_fpu_disabled(void)
62 {
63         return this_cpu_read(in_kernel_fpu);
64 }
65
66 static bool interrupted_kernel_fpu_idle(void)
67 {
68         return !kernel_fpu_disabled();
69 }
70
71 /*
72  * Were we in user mode (or vm86 mode) when we were
73  * interrupted?
74  *
75  * Doing kernel_fpu_begin/end() is ok if we are running
76  * in an interrupt context from user mode - we'll just
77  * save the FPU state as required.
78  */
79 static bool interrupted_user_mode(void)
80 {
81         struct pt_regs *regs = get_irq_regs();
82         return regs && user_mode(regs);
83 }
84
85 /*
86  * Can we use the FPU in kernel mode with the
87  * whole "kernel_fpu_begin/end()" sequence?
88  *
89  * It's always ok in process context (ie "not interrupt")
90  * but it is sometimes ok even from an irq.
91  */
92 bool irq_fpu_usable(void)
93 {
94         return !in_interrupt() ||
95                 interrupted_user_mode() ||
96                 interrupted_kernel_fpu_idle();
97 }
98 EXPORT_SYMBOL(irq_fpu_usable);
99
100 /*
101  * Save the FPU register state in fpu->fpstate->regs. The register state is
102  * preserved.
103  *
104  * Must be called with fpregs_lock() held.
105  *
106  * The legacy FNSAVE instruction clears all FPU state unconditionally, so
107  * register state has to be reloaded. That might be a pointless exercise
108  * when the FPU is going to be used by another task right after that. But
109  * this only affects 20+ years old 32bit systems and avoids conditionals all
110  * over the place.
111  *
112  * FXSAVE and all XSAVE variants preserve the FPU register state.
113  */
114 void save_fpregs_to_fpstate(struct fpu *fpu)
115 {
116         if (likely(use_xsave())) {
117                 os_xsave(fpu->fpstate);
118
119                 /*
120                  * AVX512 state is tracked here because its use is
121                  * known to slow the max clock speed of the core.
122                  */
123                 if (fpu->fpstate->regs.xsave.header.xfeatures & XFEATURE_MASK_AVX512)
124                         fpu->avx512_timestamp = jiffies;
125                 return;
126         }
127
128         if (likely(use_fxsr())) {
129                 fxsave(&fpu->fpstate->regs.fxsave);
130                 return;
131         }
132
133         /*
134          * Legacy FPU register saving, FNSAVE always clears FPU registers,
135          * so we have to reload them from the memory state.
136          */
137         asm volatile("fnsave %[fp]; fwait" : [fp] "=m" (fpu->fpstate->regs.fsave));
138         frstor(&fpu->fpstate->regs.fsave);
139 }
140
141 void restore_fpregs_from_fpstate(struct fpstate *fpstate, u64 mask)
142 {
143         /*
144          * AMD K7/K8 and later CPUs up to Zen don't save/restore
145          * FDP/FIP/FOP unless an exception is pending. Clear the x87 state
146          * here by setting it to fixed values.  "m" is a random variable
147          * that should be in L1.
148          */
149         if (unlikely(static_cpu_has_bug(X86_BUG_FXSAVE_LEAK))) {
150                 asm volatile(
151                         "fnclex\n\t"
152                         "emms\n\t"
153                         "fildl %P[addr]"        /* set F?P to defined value */
154                         : : [addr] "m" (fpstate));
155         }
156
157         if (use_xsave()) {
158                 /*
159                  * Restoring state always needs to modify all features
160                  * which are in @mask even if the current task cannot use
161                  * extended features.
162                  *
163                  * So fpstate->xfeatures cannot be used here, because then
164                  * a feature for which the task has no permission but was
165                  * used by the previous task would not go into init state.
166                  */
167                 mask = fpu_kernel_cfg.max_features & mask;
168
169                 os_xrstor(&fpstate->regs.xsave, mask);
170         } else {
171                 if (use_fxsr())
172                         fxrstor(&fpstate->regs.fxsave);
173                 else
174                         frstor(&fpstate->regs.fsave);
175         }
176 }
177
178 void fpu_reset_from_exception_fixup(void)
179 {
180         restore_fpregs_from_fpstate(&init_fpstate, XFEATURE_MASK_FPSTATE);
181 }
182
183 #if IS_ENABLED(CONFIG_KVM)
184 static void __fpstate_reset(struct fpstate *fpstate);
185
186 bool fpu_alloc_guest_fpstate(struct fpu_guest *gfpu)
187 {
188         struct fpstate *fpstate;
189         unsigned int size;
190
191         size = fpu_user_cfg.default_size + ALIGN(offsetof(struct fpstate, regs), 64);
192         fpstate = vzalloc(size);
193         if (!fpstate)
194                 return false;
195
196         __fpstate_reset(fpstate);
197         fpstate_init_user(fpstate);
198         fpstate->is_valloc      = true;
199         fpstate->is_guest       = true;
200
201         gfpu->fpstate = fpstate;
202         return true;
203 }
204 EXPORT_SYMBOL_GPL(fpu_alloc_guest_fpstate);
205
206 void fpu_free_guest_fpstate(struct fpu_guest *gfpu)
207 {
208         struct fpstate *fps = gfpu->fpstate;
209
210         if (!fps)
211                 return;
212
213         if (WARN_ON_ONCE(!fps->is_valloc || !fps->is_guest || fps->in_use))
214                 return;
215
216         gfpu->fpstate = NULL;
217         vfree(fps);
218 }
219 EXPORT_SYMBOL_GPL(fpu_free_guest_fpstate);
220
221 int fpu_swap_kvm_fpstate(struct fpu_guest *guest_fpu, bool enter_guest)
222 {
223         struct fpstate *guest_fps = guest_fpu->fpstate;
224         struct fpu *fpu = &current->thread.fpu;
225         struct fpstate *cur_fps = fpu->fpstate;
226
227         fpregs_lock();
228         if (!cur_fps->is_confidential && !test_thread_flag(TIF_NEED_FPU_LOAD))
229                 save_fpregs_to_fpstate(fpu);
230
231         /* Swap fpstate */
232         if (enter_guest) {
233                 fpu->__task_fpstate = cur_fps;
234                 fpu->fpstate = guest_fps;
235                 guest_fps->in_use = true;
236         } else {
237                 guest_fps->in_use = false;
238                 fpu->fpstate = fpu->__task_fpstate;
239                 fpu->__task_fpstate = NULL;
240         }
241
242         cur_fps = fpu->fpstate;
243
244         if (!cur_fps->is_confidential)
245                 restore_fpregs_from_fpstate(cur_fps, XFEATURE_MASK_FPSTATE);
246
247         fpregs_mark_activate();
248         fpregs_unlock();
249         return 0;
250 }
251 EXPORT_SYMBOL_GPL(fpu_swap_kvm_fpstate);
252
253 void fpu_copy_guest_fpstate_to_uabi(struct fpu_guest *gfpu, void *buf,
254                                     unsigned int size, u32 pkru)
255 {
256         struct fpstate *kstate = gfpu->fpstate;
257         union fpregs_state *ustate = buf;
258         struct membuf mb = { .p = buf, .left = size };
259
260         if (cpu_feature_enabled(X86_FEATURE_XSAVE)) {
261                 __copy_xstate_to_uabi_buf(mb, kstate, pkru, XSTATE_COPY_XSAVE);
262         } else {
263                 memcpy(&ustate->fxsave, &kstate->regs.fxsave,
264                        sizeof(ustate->fxsave));
265                 /* Make it restorable on a XSAVE enabled host */
266                 ustate->xsave.header.xfeatures = XFEATURE_MASK_FPSSE;
267         }
268 }
269 EXPORT_SYMBOL_GPL(fpu_copy_guest_fpstate_to_uabi);
270
271 int fpu_copy_uabi_to_guest_fpstate(struct fpu_guest *gfpu, const void *buf,
272                                    u64 xcr0, u32 *vpkru)
273 {
274         struct fpstate *kstate = gfpu->fpstate;
275         const union fpregs_state *ustate = buf;
276         struct pkru_state *xpkru;
277         int ret;
278
279         if (!cpu_feature_enabled(X86_FEATURE_XSAVE)) {
280                 if (ustate->xsave.header.xfeatures & ~XFEATURE_MASK_FPSSE)
281                         return -EINVAL;
282                 if (ustate->fxsave.mxcsr & ~mxcsr_feature_mask)
283                         return -EINVAL;
284                 memcpy(&kstate->regs.fxsave, &ustate->fxsave, sizeof(ustate->fxsave));
285                 return 0;
286         }
287
288         if (ustate->xsave.header.xfeatures & ~xcr0)
289                 return -EINVAL;
290
291         ret = copy_uabi_from_kernel_to_xstate(kstate, ustate);
292         if (ret)
293                 return ret;
294
295         /* Retrieve PKRU if not in init state */
296         if (kstate->regs.xsave.header.xfeatures & XFEATURE_MASK_PKRU) {
297                 xpkru = get_xsave_addr(&kstate->regs.xsave, XFEATURE_PKRU);
298                 *vpkru = xpkru->pkru;
299         }
300
301         /* Ensure that XCOMP_BV is set up for XSAVES */
302         xstate_init_xcomp_bv(&kstate->regs.xsave, kstate->xfeatures);
303         return 0;
304 }
305 EXPORT_SYMBOL_GPL(fpu_copy_uabi_to_guest_fpstate);
306 #endif /* CONFIG_KVM */
307
308 void kernel_fpu_begin_mask(unsigned int kfpu_mask)
309 {
310         preempt_disable();
311
312         WARN_ON_FPU(!irq_fpu_usable());
313         WARN_ON_FPU(this_cpu_read(in_kernel_fpu));
314
315         this_cpu_write(in_kernel_fpu, true);
316
317         if (!(current->flags & PF_KTHREAD) &&
318             !test_thread_flag(TIF_NEED_FPU_LOAD)) {
319                 set_thread_flag(TIF_NEED_FPU_LOAD);
320                 save_fpregs_to_fpstate(&current->thread.fpu);
321         }
322         __cpu_invalidate_fpregs_state();
323
324         /* Put sane initial values into the control registers. */
325         if (likely(kfpu_mask & KFPU_MXCSR) && boot_cpu_has(X86_FEATURE_XMM))
326                 ldmxcsr(MXCSR_DEFAULT);
327
328         if (unlikely(kfpu_mask & KFPU_387) && boot_cpu_has(X86_FEATURE_FPU))
329                 asm volatile ("fninit");
330 }
331 EXPORT_SYMBOL_GPL(kernel_fpu_begin_mask);
332
333 void kernel_fpu_end(void)
334 {
335         WARN_ON_FPU(!this_cpu_read(in_kernel_fpu));
336
337         this_cpu_write(in_kernel_fpu, false);
338         preempt_enable();
339 }
340 EXPORT_SYMBOL_GPL(kernel_fpu_end);
341
342 /*
343  * Sync the FPU register state to current's memory register state when the
344  * current task owns the FPU. The hardware register state is preserved.
345  */
346 void fpu_sync_fpstate(struct fpu *fpu)
347 {
348         WARN_ON_FPU(fpu != &current->thread.fpu);
349
350         fpregs_lock();
351         trace_x86_fpu_before_save(fpu);
352
353         if (!test_thread_flag(TIF_NEED_FPU_LOAD))
354                 save_fpregs_to_fpstate(fpu);
355
356         trace_x86_fpu_after_save(fpu);
357         fpregs_unlock();
358 }
359
360 static inline unsigned int init_fpstate_copy_size(void)
361 {
362         if (!use_xsave())
363                 return fpu_kernel_cfg.default_size;
364
365         /* XSAVE(S) just needs the legacy and the xstate header part */
366         return sizeof(init_fpstate.regs.xsave);
367 }
368
369 static inline void fpstate_init_fxstate(struct fpstate *fpstate)
370 {
371         fpstate->regs.fxsave.cwd = 0x37f;
372         fpstate->regs.fxsave.mxcsr = MXCSR_DEFAULT;
373 }
374
375 /*
376  * Legacy x87 fpstate state init:
377  */
378 static inline void fpstate_init_fstate(struct fpstate *fpstate)
379 {
380         fpstate->regs.fsave.cwd = 0xffff037fu;
381         fpstate->regs.fsave.swd = 0xffff0000u;
382         fpstate->regs.fsave.twd = 0xffffffffu;
383         fpstate->regs.fsave.fos = 0xffff0000u;
384 }
385
386 /*
387  * Used in two places:
388  * 1) Early boot to setup init_fpstate for non XSAVE systems
389  * 2) fpu_init_fpstate_user() which is invoked from KVM
390  */
391 void fpstate_init_user(struct fpstate *fpstate)
392 {
393         if (!cpu_feature_enabled(X86_FEATURE_FPU)) {
394                 fpstate_init_soft(&fpstate->regs.soft);
395                 return;
396         }
397
398         xstate_init_xcomp_bv(&fpstate->regs.xsave, fpstate->xfeatures);
399
400         if (cpu_feature_enabled(X86_FEATURE_FXSR))
401                 fpstate_init_fxstate(fpstate);
402         else
403                 fpstate_init_fstate(fpstate);
404 }
405
406 static void __fpstate_reset(struct fpstate *fpstate)
407 {
408         /* Initialize sizes and feature masks */
409         fpstate->size           = fpu_kernel_cfg.default_size;
410         fpstate->user_size      = fpu_user_cfg.default_size;
411         fpstate->xfeatures      = fpu_kernel_cfg.default_features;
412         fpstate->user_xfeatures = fpu_user_cfg.default_features;
413         fpstate->xfd            = init_fpstate.xfd;
414 }
415
416 void fpstate_reset(struct fpu *fpu)
417 {
418         /* Set the fpstate pointer to the default fpstate */
419         fpu->fpstate = &fpu->__fpstate;
420         __fpstate_reset(fpu->fpstate);
421
422         /* Initialize the permission related info in fpu */
423         fpu->perm.__state_perm          = fpu_kernel_cfg.default_features;
424         fpu->perm.__state_size          = fpu_kernel_cfg.default_size;
425         fpu->perm.__user_state_size     = fpu_user_cfg.default_size;
426 }
427
428 static inline void fpu_inherit_perms(struct fpu *dst_fpu)
429 {
430         if (fpu_state_size_dynamic()) {
431                 struct fpu *src_fpu = &current->group_leader->thread.fpu;
432
433                 spin_lock_irq(&current->sighand->siglock);
434                 /* Fork also inherits the permissions of the parent */
435                 dst_fpu->perm = src_fpu->perm;
436                 spin_unlock_irq(&current->sighand->siglock);
437         }
438 }
439
440 /* Clone current's FPU state on fork */
441 int fpu_clone(struct task_struct *dst, unsigned long clone_flags)
442 {
443         struct fpu *src_fpu = &current->thread.fpu;
444         struct fpu *dst_fpu = &dst->thread.fpu;
445
446         /* The new task's FPU state cannot be valid in the hardware. */
447         dst_fpu->last_cpu = -1;
448
449         fpstate_reset(dst_fpu);
450
451         if (!cpu_feature_enabled(X86_FEATURE_FPU))
452                 return 0;
453
454         /*
455          * Enforce reload for user space tasks and prevent kernel threads
456          * from trying to save the FPU registers on context switch.
457          */
458         set_tsk_thread_flag(dst, TIF_NEED_FPU_LOAD);
459
460         /*
461          * No FPU state inheritance for kernel threads and IO
462          * worker threads.
463          */
464         if (dst->flags & (PF_KTHREAD | PF_IO_WORKER)) {
465                 /* Clear out the minimal state */
466                 memcpy(&dst_fpu->fpstate->regs, &init_fpstate.regs,
467                        init_fpstate_copy_size());
468                 return 0;
469         }
470
471         /*
472          * Save the default portion of the current FPU state into the
473          * clone. Assume all dynamic features to be defined as caller-
474          * saved, which enables skipping both the expansion of fpstate
475          * and the copying of any dynamic state.
476          *
477          * Do not use memcpy() when TIF_NEED_FPU_LOAD is set because
478          * copying is not valid when current uses non-default states.
479          */
480         fpregs_lock();
481         if (test_thread_flag(TIF_NEED_FPU_LOAD))
482                 fpregs_restore_userregs();
483         save_fpregs_to_fpstate(dst_fpu);
484         if (!(clone_flags & CLONE_THREAD))
485                 fpu_inherit_perms(dst_fpu);
486         fpregs_unlock();
487
488         trace_x86_fpu_copy_src(src_fpu);
489         trace_x86_fpu_copy_dst(dst_fpu);
490
491         return 0;
492 }
493
494 /*
495  * Whitelist the FPU register state embedded into task_struct for hardened
496  * usercopy.
497  */
498 void fpu_thread_struct_whitelist(unsigned long *offset, unsigned long *size)
499 {
500         *offset = offsetof(struct thread_struct, fpu.__fpstate.regs);
501         *size = fpu_kernel_cfg.default_size;
502 }
503
504 /*
505  * Drops current FPU state: deactivates the fpregs and
506  * the fpstate. NOTE: it still leaves previous contents
507  * in the fpregs in the eager-FPU case.
508  *
509  * This function can be used in cases where we know that
510  * a state-restore is coming: either an explicit one,
511  * or a reschedule.
512  */
513 void fpu__drop(struct fpu *fpu)
514 {
515         preempt_disable();
516
517         if (fpu == &current->thread.fpu) {
518                 /* Ignore delayed exceptions from user space */
519                 asm volatile("1: fwait\n"
520                              "2:\n"
521                              _ASM_EXTABLE(1b, 2b));
522                 fpregs_deactivate(fpu);
523         }
524
525         trace_x86_fpu_dropped(fpu);
526
527         preempt_enable();
528 }
529
530 /*
531  * Clear FPU registers by setting them up from the init fpstate.
532  * Caller must do fpregs_[un]lock() around it.
533  */
534 static inline void restore_fpregs_from_init_fpstate(u64 features_mask)
535 {
536         if (use_xsave())
537                 os_xrstor(&init_fpstate.regs.xsave, features_mask);
538         else if (use_fxsr())
539                 fxrstor(&init_fpstate.regs.fxsave);
540         else
541                 frstor(&init_fpstate.regs.fsave);
542
543         pkru_write_default();
544 }
545
546 /*
547  * Reset current->fpu memory state to the init values.
548  */
549 static void fpu_reset_fpregs(void)
550 {
551         struct fpu *fpu = &current->thread.fpu;
552
553         fpregs_lock();
554         fpu__drop(fpu);
555         /*
556          * This does not change the actual hardware registers. It just
557          * resets the memory image and sets TIF_NEED_FPU_LOAD so a
558          * subsequent return to usermode will reload the registers from the
559          * task's memory image.
560          *
561          * Do not use fpstate_init() here. Just copy init_fpstate which has
562          * the correct content already except for PKRU.
563          *
564          * PKRU handling does not rely on the xstate when restoring for
565          * user space as PKRU is eagerly written in switch_to() and
566          * flush_thread().
567          */
568         memcpy(&fpu->fpstate->regs, &init_fpstate.regs, init_fpstate_copy_size());
569         set_thread_flag(TIF_NEED_FPU_LOAD);
570         fpregs_unlock();
571 }
572
573 /*
574  * Reset current's user FPU states to the init states.  current's
575  * supervisor states, if any, are not modified by this function.  The
576  * caller guarantees that the XSTATE header in memory is intact.
577  */
578 void fpu__clear_user_states(struct fpu *fpu)
579 {
580         WARN_ON_FPU(fpu != &current->thread.fpu);
581
582         fpregs_lock();
583         if (!cpu_feature_enabled(X86_FEATURE_FPU)) {
584                 fpu_reset_fpregs();
585                 fpregs_unlock();
586                 return;
587         }
588
589         /*
590          * Ensure that current's supervisor states are loaded into their
591          * corresponding registers.
592          */
593         if (xfeatures_mask_supervisor() &&
594             !fpregs_state_valid(fpu, smp_processor_id())) {
595                 os_xrstor(&fpu->fpstate->regs.xsave, xfeatures_mask_supervisor());
596         }
597
598         /* Reset user states in registers. */
599         restore_fpregs_from_init_fpstate(XFEATURE_MASK_USER_RESTORE);
600
601         /*
602          * Now all FPU registers have their desired values.  Inform the FPU
603          * state machine that current's FPU registers are in the hardware
604          * registers. The memory image does not need to be updated because
605          * any operation relying on it has to save the registers first when
606          * current's FPU is marked active.
607          */
608         fpregs_mark_activate();
609         fpregs_unlock();
610 }
611
612 void fpu_flush_thread(void)
613 {
614         fpstate_reset(&current->thread.fpu);
615         fpu_reset_fpregs();
616 }
617 /*
618  * Load FPU context before returning to userspace.
619  */
620 void switch_fpu_return(void)
621 {
622         if (!static_cpu_has(X86_FEATURE_FPU))
623                 return;
624
625         fpregs_restore_userregs();
626 }
627 EXPORT_SYMBOL_GPL(switch_fpu_return);
628
629 #ifdef CONFIG_X86_DEBUG_FPU
630 /*
631  * If current FPU state according to its tracking (loaded FPU context on this
632  * CPU) is not valid then we must have TIF_NEED_FPU_LOAD set so the context is
633  * loaded on return to userland.
634  */
635 void fpregs_assert_state_consistent(void)
636 {
637         struct fpu *fpu = &current->thread.fpu;
638
639         if (test_thread_flag(TIF_NEED_FPU_LOAD))
640                 return;
641
642         WARN_ON_FPU(!fpregs_state_valid(fpu, smp_processor_id()));
643 }
644 EXPORT_SYMBOL_GPL(fpregs_assert_state_consistent);
645 #endif
646
647 void fpregs_mark_activate(void)
648 {
649         struct fpu *fpu = &current->thread.fpu;
650
651         fpregs_activate(fpu);
652         fpu->last_cpu = smp_processor_id();
653         clear_thread_flag(TIF_NEED_FPU_LOAD);
654 }
655
656 /*
657  * x87 math exception handling:
658  */
659
660 int fpu__exception_code(struct fpu *fpu, int trap_nr)
661 {
662         int err;
663
664         if (trap_nr == X86_TRAP_MF) {
665                 unsigned short cwd, swd;
666                 /*
667                  * (~cwd & swd) will mask out exceptions that are not set to unmasked
668                  * status.  0x3f is the exception bits in these regs, 0x200 is the
669                  * C1 reg you need in case of a stack fault, 0x040 is the stack
670                  * fault bit.  We should only be taking one exception at a time,
671                  * so if this combination doesn't produce any single exception,
672                  * then we have a bad program that isn't synchronizing its FPU usage
673                  * and it will suffer the consequences since we won't be able to
674                  * fully reproduce the context of the exception.
675                  */
676                 if (boot_cpu_has(X86_FEATURE_FXSR)) {
677                         cwd = fpu->fpstate->regs.fxsave.cwd;
678                         swd = fpu->fpstate->regs.fxsave.swd;
679                 } else {
680                         cwd = (unsigned short)fpu->fpstate->regs.fsave.cwd;
681                         swd = (unsigned short)fpu->fpstate->regs.fsave.swd;
682                 }
683
684                 err = swd & ~cwd;
685         } else {
686                 /*
687                  * The SIMD FPU exceptions are handled a little differently, as there
688                  * is only a single status/control register.  Thus, to determine which
689                  * unmasked exception was caught we must mask the exception mask bits
690                  * at 0x1f80, and then use these to mask the exception bits at 0x3f.
691                  */
692                 unsigned short mxcsr = MXCSR_DEFAULT;
693
694                 if (boot_cpu_has(X86_FEATURE_XMM))
695                         mxcsr = fpu->fpstate->regs.fxsave.mxcsr;
696
697                 err = ~(mxcsr >> 7) & mxcsr;
698         }
699
700         if (err & 0x001) {      /* Invalid op */
701                 /*
702                  * swd & 0x240 == 0x040: Stack Underflow
703                  * swd & 0x240 == 0x240: Stack Overflow
704                  * User must clear the SF bit (0x40) if set
705                  */
706                 return FPE_FLTINV;
707         } else if (err & 0x004) { /* Divide by Zero */
708                 return FPE_FLTDIV;
709         } else if (err & 0x008) { /* Overflow */
710                 return FPE_FLTOVF;
711         } else if (err & 0x012) { /* Denormal, Underflow */
712                 return FPE_FLTUND;
713         } else if (err & 0x020) { /* Precision */
714                 return FPE_FLTRES;
715         }
716
717         /*
718          * If we're using IRQ 13, or supposedly even some trap
719          * X86_TRAP_MF implementations, it's possible
720          * we get a spurious trap, which is not an error.
721          */
722         return 0;
723 }