summaryrefslogtreecommitdiff
path: root/arch/x86/include/asm/fpu
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2015-05-24 09:23:25 +0200
committerIngo Molnar <mingo@kernel.org>2015-05-25 12:49:40 +0200
commit8c05f05edb7795ecd1fa95d5d44bc5b22fd85287 (patch)
tree69ff3b2e21a9cfb996414a729affc44b4e2e8378 /arch/x86/include/asm/fpu
parent685c9616248c4f0d57e0d81d3236c80bdce1af46 (diff)
x86/fpu: Micro-optimize the copy_xregs_to_kernel*() and copy_kernel_to_xregs*() functions
The copy_xregs_to_kernel*() and copy_kernel_to_xregs*() functions are used to copy FPU registers to kernel memory and vice versa. They are never expected to fail, yet they have a return code, mostly because that way they can share the assembly macros with the copy*user*() functions. This error code is then silently ignored by the context switching and other code - which made the bug in: b8c1b8ea7b21 ("x86/fpu: Fix FPU state save area alignment bug") harder to fix than necessary. So remove the return values and check for no faults when FPU debugging is enabled in the .config. This improves the eagerfpu context switching fast path by a couple of instructions, when FPU debugging is disabled: ffffffff810407fa: 89 c2 mov %eax,%edx ffffffff810407fc: 48 0f ae 2f xrstor64 (%rdi) ffffffff81040800: 31 c0 xor %eax,%eax -ffffffff81040802: eb 0a jmp ffffffff8104080e <__switch_to+0x321> +ffffffff81040802: eb 16 jmp ffffffff8104081a <__switch_to+0x32d> ffffffff81040804: 31 c0 xor %eax,%eax ffffffff81040806: 48 0f ae 8b c0 05 00 fxrstor64 0x5c0(%rbx) ffffffff8104080d: 00 Cc: Andy Lutomirski <luto@amacapital.net> Cc: Borislav Petkov <bp@alien8.de> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/include/asm/fpu')
-rw-r--r--arch/x86/include/asm/fpu/internal.h37
1 files changed, 23 insertions, 14 deletions
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index 1352d380bd46..99690bed920a 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -232,7 +232,7 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu)
* This function is called only during boot time when x86 caps are not set
* up and alternative can not be used yet.
*/
-static inline int copy_xregs_to_kernel_booting(struct xregs_state *xstate)
+static inline void copy_xregs_to_kernel_booting(struct xregs_state *xstate)
{
u64 mask = -1;
u32 lmask = mask;
@@ -253,14 +253,16 @@ static inline int copy_xregs_to_kernel_booting(struct xregs_state *xstate)
xstate_fault(err)
: "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err)
: "memory");
- return err;
+
+ /* We should never fault when copying to a kernel buffer: */
+ WARN_ON_FPU(err);
}
/*
* This function is called only during boot time when x86 caps are not set
* up and alternative can not be used yet.
*/
-static inline int copy_kernel_to_xregs_booting(struct xregs_state *xstate, u64 mask)
+static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate, u64 mask)
{
u32 lmask = mask;
u32 hmask = mask >> 32;
@@ -280,13 +282,15 @@ static inline int copy_kernel_to_xregs_booting(struct xregs_state *xstate, u64 m
xstate_fault(err)
: "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err)
: "memory");
- return err;
+
+ /* We should never fault when copying from a kernel buffer: */
+ WARN_ON_FPU(err);
}
/*
* Save processor xstate to xsave area.
*/
-static inline int copy_xregs_to_kernel(struct xregs_state *xstate)
+static inline void copy_xregs_to_kernel(struct xregs_state *xstate)
{
u64 mask = -1;
u32 lmask = mask;
@@ -319,13 +323,14 @@ static inline int copy_xregs_to_kernel(struct xregs_state *xstate)
: "0" (err)
: "memory");
- return err;
+ /* We should never fault when copying to a kernel buffer: */
+ WARN_ON_FPU(err);
}
/*
* Restore processor xstate from xsave area.
*/
-static inline int copy_kernel_to_xregs(struct xregs_state *xstate, u64 mask)
+static inline void copy_kernel_to_xregs(struct xregs_state *xstate, u64 mask)
{
u32 lmask = mask;
u32 hmask = mask >> 32;
@@ -347,7 +352,8 @@ static inline int copy_kernel_to_xregs(struct xregs_state *xstate, u64 mask)
: "0" (err)
: "memory");
- return err;
+ /* We should never fault when copying from a kernel buffer: */
+ WARN_ON_FPU(err);
}
/*
@@ -433,12 +439,15 @@ static inline int copy_fpregs_to_fpstate(struct fpu *fpu)
static inline int __copy_fpstate_to_fpregs(struct fpu *fpu)
{
- if (use_xsave())
- return copy_kernel_to_xregs(&fpu->state.xsave, -1);
- else if (use_fxsr())
- return copy_kernel_to_fxregs(&fpu->state.fxsave);
- else
- return copy_kernel_to_fregs(&fpu->state.fsave);
+ if (use_xsave()) {
+ copy_kernel_to_xregs(&fpu->state.xsave, -1);
+ return 0;
+ } else {
+ if (use_fxsr())
+ return copy_kernel_to_fxregs(&fpu->state.fxsave);
+ else
+ return copy_kernel_to_fregs(&fpu->state.fsave);
+ }
}
static inline int copy_fpstate_to_fpregs(struct fpu *fpu)