1
0
Fork 0

x86/fpu: Change fpu_owner_task to fpu_fpregs_owner_ctx

Track the FPU owner context instead of the owner task: this change,
together with other changes, will allow in subsequent patches the
elimination of 'struct task_struct' usage in various FPU code:
we'll be able to use 'struct fpu' only.

There's no change in code size:

      text           data     bss      dec            hex filename
  13066467        2545248 1626112 17237827        1070743 vmlinux.before
  13066467        2545248 1626112 17237827        1070743 vmlinux.after

Reviewed-by: Borislav Petkov <bp@alien8.de>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
hifive-unleashed-5.1
Ingo Molnar 2015-04-23 12:18:28 +02:00
parent b0c050c5ba
commit 36b544dcd3
2 changed files with 11 additions and 12 deletions

View File

@ -37,7 +37,7 @@ extern unsigned int mxcsr_feature_mask;
extern void fpu__cpu_init(void);
extern void eager_fpu_init(void);
DECLARE_PER_CPU(struct task_struct *, fpu_owner_task);
DECLARE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
extern void convert_from_fxsr(struct user_i387_ia32_struct *env,
struct task_struct *tsk);
@ -63,7 +63,7 @@ static inline void finit_soft_fpu(struct i387_soft_struct *soft) {}
#endif
/*
* Must be run with preemption disabled: this clears the fpu_owner_task,
* Must be run with preemption disabled: this clears the fpu_fpregs_owner_ctx,
* on this CPU.
*
* This will disable any lazy FPU state restore of the current FPU state,
@ -71,7 +71,7 @@ static inline void finit_soft_fpu(struct i387_soft_struct *soft) {}
*/
static inline void __cpu_disable_lazy_restore(unsigned int cpu)
{
per_cpu(fpu_owner_task, cpu) = NULL;
per_cpu(fpu_fpregs_owner_ctx, cpu) = NULL;
}
/*
@ -86,7 +86,7 @@ static inline void task_disable_lazy_fpu_restore(struct task_struct *tsk)
static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu)
{
return new == this_cpu_read_stable(fpu_owner_task) &&
return &new->thread.fpu == this_cpu_read_stable(fpu_fpregs_owner_ctx) &&
cpu == new->thread.fpu.last_cpu;
}
@ -327,14 +327,14 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
static inline void __thread_clear_has_fpu(struct fpu *fpu)
{
fpu->has_fpu = 0;
this_cpu_write(fpu_owner_task, NULL);
this_cpu_write(fpu_fpregs_owner_ctx, NULL);
}
/* Must be paired with a 'clts' before! */
static inline void __thread_set_has_fpu(struct task_struct *tsk)
{
tsk->thread.fpu.has_fpu = 1;
this_cpu_write(fpu_owner_task, tsk);
this_cpu_write(fpu_fpregs_owner_ctx, &tsk->thread.fpu);
}
/*
@ -431,7 +431,7 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta
else
old->thread.fpu.last_cpu = cpu;
/* But leave fpu_owner_task! */
/* But leave fpu_fpregs_owner_ctx! */
old->thread.fpu.has_fpu = 0;
/* Don't change CR0.TS if we just switch! */

View File

@ -21,9 +21,9 @@
static DEFINE_PER_CPU(bool, in_kernel_fpu);
/*
* Track which task is using the FPU on the CPU:
* Track which context is using the FPU on the CPU:
*/
DEFINE_PER_CPU(struct task_struct *, fpu_owner_task);
DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
static void kernel_fpu_disable(void)
{
@ -96,15 +96,14 @@ EXPORT_SYMBOL(irq_fpu_usable);
void __kernel_fpu_begin(void)
{
struct task_struct *me = current;
struct fpu *fpu = &me->thread.fpu;
struct fpu *fpu = &current->thread.fpu;
kernel_fpu_disable();
if (fpu->has_fpu) {
fpu_save_init(fpu);
} else {
this_cpu_write(fpu_owner_task, NULL);
this_cpu_write(fpu_fpregs_owner_ctx, NULL);
if (!use_eager_fpu())
clts();
}