1
0
Fork 0

x86/fpu: Factor out the exception error code handling code

Factor out the FPU error code handling code from traps.c and fpu/internal.h
and move them close to each other.

Also convert the helper functions to 'struct fpu *', which further simplifies
them.

Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
hifive-unleashed-5.1
Ingo Molnar 2015-04-30 09:29:38 +02:00
parent acd58a3ad0
commit e1cebad49c
3 changed files with 102 additions and 86 deletions

View File

@ -30,7 +30,8 @@ extern void fpu__init_system(struct cpuinfo_x86 *c);
extern void fpu__activate_curr(struct fpu *fpu);
extern void fpstate_init(struct fpu *fpu);
extern int dump_fpu(struct pt_regs *, struct user_i387_struct *);
extern int dump_fpu(struct pt_regs *, struct user_i387_struct *);
extern int fpu__exception_code(struct fpu *fpu, int trap_nr);
/*
* High level FPU state handling functions:
@ -467,34 +468,4 @@ static inline void user_fpu_begin(void)
preempt_enable();
}
/*
* i387 state interaction
*/
static inline unsigned short get_fpu_cwd(struct task_struct *tsk)
{
if (cpu_has_fxsr) {
return tsk->thread.fpu.state.fxsave.cwd;
} else {
return (unsigned short)tsk->thread.fpu.state.fsave.cwd;
}
}
static inline unsigned short get_fpu_swd(struct task_struct *tsk)
{
if (cpu_has_fxsr) {
return tsk->thread.fpu.state.fxsave.swd;
} else {
return (unsigned short)tsk->thread.fpu.state.fsave.swd;
}
}
static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk)
{
if (cpu_has_xmm) {
return tsk->thread.fpu.state.fxsave.mxcsr;
} else {
return MXCSR_DEFAULT;
}
}
#endif /* _ASM_X86_FPU_INTERNAL_H */

View File

@ -8,6 +8,7 @@
#include <asm/fpu/internal.h>
#include <asm/fpu/regset.h>
#include <asm/fpu/signal.h>
#include <asm/traps.h>
#include <linux/hardirq.h>
@ -749,3 +750,90 @@ int dump_fpu(struct pt_regs *regs, struct user_i387_struct *ufpu)
EXPORT_SYMBOL(dump_fpu);
#endif /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */
/*
* x87 math exception handling:
*/
static inline unsigned short get_fpu_cwd(struct fpu *fpu)
{
if (cpu_has_fxsr) {
return fpu->state.fxsave.cwd;
} else {
return (unsigned short)fpu->state.fsave.cwd;
}
}
static inline unsigned short get_fpu_swd(struct fpu *fpu)
{
if (cpu_has_fxsr) {
return fpu->state.fxsave.swd;
} else {
return (unsigned short)fpu->state.fsave.swd;
}
}
static inline unsigned short get_fpu_mxcsr(struct fpu *fpu)
{
if (cpu_has_xmm) {
return fpu->state.fxsave.mxcsr;
} else {
return MXCSR_DEFAULT;
}
}
int fpu__exception_code(struct fpu *fpu, int trap_nr)
{
int err;
if (trap_nr == X86_TRAP_MF) {
unsigned short cwd, swd;
/*
* (~cwd & swd) will mask out exceptions that are not set to unmasked
* status. 0x3f is the exception bits in these regs, 0x200 is the
* C1 reg you need in case of a stack fault, 0x040 is the stack
* fault bit. We should only be taking one exception at a time,
* so if this combination doesn't produce any single exception,
* then we have a bad program that isn't synchronizing its FPU usage
* and it will suffer the consequences since we won't be able to
* fully reproduce the context of the exception
*/
cwd = get_fpu_cwd(fpu);
swd = get_fpu_swd(fpu);
err = swd & ~cwd;
} else {
/*
* The SIMD FPU exceptions are handled a little differently, as there
* is only a single status/control register. Thus, to determine which
* unmasked exception was caught we must mask the exception mask bits
* at 0x1f80, and then use these to mask the exception bits at 0x3f.
*/
unsigned short mxcsr = get_fpu_mxcsr(fpu);
err = ~(mxcsr >> 7) & mxcsr;
}
if (err & 0x001) { /* Invalid op */
/*
* swd & 0x240 == 0x040: Stack Underflow
* swd & 0x240 == 0x240: Stack Overflow
* User must clear the SF bit (0x40) if set
*/
return FPE_FLTINV;
} else if (err & 0x004) { /* Divide by Zero */
return FPE_FLTDIV;
} else if (err & 0x008) { /* Overflow */
return FPE_FLTOVF;
} else if (err & 0x012) { /* Denormal, Underflow */
return FPE_FLTUND;
} else if (err & 0x020) { /* Precision */
return FPE_FLTRES;
}
/*
* If we're using IRQ 13, or supposedly even some trap
* X86_TRAP_MF implementations, it's possible
* we get a spurious trap, which is not an error.
*/
return 0;
}

View File

@ -708,8 +708,8 @@ NOKPROBE_SYMBOL(do_debug);
static void math_error(struct pt_regs *regs, int error_code, int trapnr)
{
struct task_struct *task = current;
struct fpu *fpu = &task->thread.fpu;
siginfo_t info;
unsigned short err;
char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
"simd exception";
@ -717,8 +717,7 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr)
return;
conditional_sti(regs);
if (!user_mode(regs))
{
if (!user_mode(regs)) {
if (!fixup_exception(regs)) {
task->thread.error_code = error_code;
task->thread.trap_nr = trapnr;
@ -730,62 +729,20 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr)
/*
* Save the info for the exception handler and clear the error.
*/
fpu__save(&task->thread.fpu);
task->thread.trap_nr = trapnr;
fpu__save(fpu);
task->thread.trap_nr = trapnr;
task->thread.error_code = error_code;
info.si_signo = SIGFPE;
info.si_errno = 0;
info.si_addr = (void __user *)uprobe_get_trap_addr(regs);
if (trapnr == X86_TRAP_MF) {
unsigned short cwd, swd;
/*
* (~cwd & swd) will mask out exceptions that are not set to unmasked
* status. 0x3f is the exception bits in these regs, 0x200 is the
* C1 reg you need in case of a stack fault, 0x040 is the stack
* fault bit. We should only be taking one exception at a time,
* so if this combination doesn't produce any single exception,
* then we have a bad program that isn't synchronizing its FPU usage
* and it will suffer the consequences since we won't be able to
* fully reproduce the context of the exception
*/
cwd = get_fpu_cwd(task);
swd = get_fpu_swd(task);
info.si_signo = SIGFPE;
info.si_errno = 0;
info.si_addr = (void __user *)uprobe_get_trap_addr(regs);
err = swd & ~cwd;
} else {
/*
* The SIMD FPU exceptions are handled a little differently, as there
* is only a single status/control register. Thus, to determine which
* unmasked exception was caught we must mask the exception mask bits
* at 0x1f80, and then use these to mask the exception bits at 0x3f.
*/
unsigned short mxcsr = get_fpu_mxcsr(task);
err = ~(mxcsr >> 7) & mxcsr;
}
info.si_code = fpu__exception_code(fpu, trapnr);
if (err & 0x001) { /* Invalid op */
/*
* swd & 0x240 == 0x040: Stack Underflow
* swd & 0x240 == 0x240: Stack Overflow
* User must clear the SF bit (0x40) if set
*/
info.si_code = FPE_FLTINV;
} else if (err & 0x004) { /* Divide by Zero */
info.si_code = FPE_FLTDIV;
} else if (err & 0x008) { /* Overflow */
info.si_code = FPE_FLTOVF;
} else if (err & 0x012) { /* Denormal, Underflow */
info.si_code = FPE_FLTUND;
} else if (err & 0x020) { /* Precision */
info.si_code = FPE_FLTRES;
} else {
/*
* If we're using IRQ 13, or supposedly even some trap
* X86_TRAP_MF implementations, it's possible
* we get a spurious trap, which is not an error.
*/
/* Retry when we get spurious exceptions: */
if (!info.si_code)
return;
}
force_sig_info(SIGFPE, &info, task);
}