alistair23-linux/arch/sh/include/asm/fpu.h
Paul Mundt 0ea820cf9b sh: Move over to dynamically allocated FPU context.
This follows the x86 xstate changes and implements a task_xstate slab
cache that is dynamically sized to match one of hard FP/soft FP/FPU-less.

This also tidies up and consolidates some of the SH-2A/SH-4 FPU
fragmentation. Now fpu state restorers are commonly defined, with the
init_fpu()/fpu_init() mess reworked to follow the x86 convention.
The fpu_init() register initialization has been replaced by xstate setup
followed by writing out to hardware via the standard restore path.

As init_fpu() now performs a slab allocation a secondary lighterweight
restorer is also introduced for the context switch.

In the future the DSP state will be rolled in here, too.

More work remains for math emulation and the SH-5 FPU, which presently
uses its own special (UP-only) interfaces.

Signed-off-by: Paul Mundt <lethal@linux-sh.org>
2010-01-13 12:51:40 +09:00

72 lines
1.7 KiB
C

#ifndef __ASM_SH_FPU_H
#define __ASM_SH_FPU_H
#ifndef __ASSEMBLY__
struct task_struct;
#ifdef CONFIG_SH_FPU
static inline void release_fpu(struct pt_regs *regs)
{
regs->sr |= SR_FD;
}
static inline void grab_fpu(struct pt_regs *regs)
{
regs->sr &= ~SR_FD;
}
extern void save_fpu(struct task_struct *__tsk);
extern void restore_fpu(struct task_struct *__tsk);
extern void fpu_state_restore(struct pt_regs *regs);
extern void __fpu_state_restore(void);
#else
#define save_fpu(tsk) do { } while (0)
#define restore_fpu(tsk) do { } while (0)
#define release_fpu(regs) do { } while (0)
#define grab_fpu(regs) do { } while (0)
#define fpu_state_restore(regs) do { } while (0)
#define __fpu_state_restore(regs) do { } while (0)
#endif
struct user_regset;
extern int do_fpu_inst(unsigned short, struct pt_regs *);
extern int init_fpu(struct task_struct *);
extern int fpregs_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf);
static inline void __unlazy_fpu(struct task_struct *tsk, struct pt_regs *regs)
{
if (task_thread_info(tsk)->status & TS_USEDFPU) {
task_thread_info(tsk)->status &= ~TS_USEDFPU;
save_fpu(tsk);
release_fpu(regs);
} else
tsk->fpu_counter = 0;
}
static inline void unlazy_fpu(struct task_struct *tsk, struct pt_regs *regs)
{
preempt_disable();
__unlazy_fpu(tsk, regs);
preempt_enable();
}
static inline void clear_fpu(struct task_struct *tsk, struct pt_regs *regs)
{
preempt_disable();
if (task_thread_info(tsk)->status & TS_USEDFPU) {
task_thread_info(tsk)->status &= ~TS_USEDFPU;
release_fpu(regs);
}
preempt_enable();
}
#endif /* __ASSEMBLY__ */
#endif /* __ASM_SH_FPU_H */