1
0
Fork 0

Merge branch 'x86-fpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fpu fixes and cleanups from Ingo Molnar:
 "This is _way_ more cleanups than fixes, but the bugs were subtle and
  hard to hit, and the primary reason for them existing was the
  unnecessary historical complexity of some of the x86/fpu interfaces.

  The first bunch of commits clean up and simplify the xstate user copy
  handling functions, in reaction to the collective head-scratching
  about the xstate user-copy handling code that leads up to the fix for
  this SkyLake xstate handling bug:

     0852b374173b: x86/fpu: Add FPU state copying quirk to handle XRSTOR failure on Intel Skylake CPUs

  The cleanups don't change any functionality, they just (hopefully)
  make it all clearer, more consistent, more debuggable and more robust.

  Note that most of the linecount increase comes from these commits,
  where we better split the user/kernel copy logic by having more
  variants, instead repeated fragile patterns of:

               if (kbuf) {
                       memcpy(kbuf + pos, data, copy);
               } else {
                       if (__copy_to_user(ubuf + pos, data, copy))
                               return -EFAULT;
               }

  The next bunch of commits simplify the FPU state-machine to get rid of
  old lazy-FPU idiosyncrasies - a defensive simplification to make all
  the code easier to review and fix. No change in functionality.

  Then there's a couple of additional debugging tweaks: static checker
  warning fix and move an FPU related warning to under WARN_ON_FPU(),
  followed by another bunch of commits that represent a finegrained
  split-up of the fixes from Eric Biggers to handle weird xstate bits
  properly.

  I did this finegrained split-up because some of these fixes also
  impact the ABI for weird xstate handling, for which we'd like to have
  good bisection results, should they cause any problems. (We also had
  one regression with the more monolithic fixes, so splitting it all up
  sounded prudent for robustness reasons as well.)

  About the whole series: the commits up to 03eaec81ac have been in
  -next for months - but I've recently rebased them to remove a state
  machine clean-up commit that was objected to, and to make it more
  bisectable - so technically it's a new, rebased tree.

  Robustness history: this series had some regressions along the way,
  and all reported regressions have been fixed. All but one of the
  regressions manifested itself as easy to report warnings. The previous
  version of this latest series was also in linux-next, with one
  (warning-only) regression reported which is fixed in the latest
  version.

  Barring last minute brown paper bag bugs (and the commits are now
  older by a day which I'd hope helps paperbag reduction), I'm
  reasonably confident about its general robustness.

  Famous last words ..."

* 'x86-fpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (42 commits)
  x86/fpu: Use using_compacted_format() instead of open coded X86_FEATURE_XSAVES
  x86/fpu: Use validate_xstate_header() to validate the xstate_header in copy_user_to_xstate()
  x86/fpu: Eliminate the 'xfeatures' local variable in copy_user_to_xstate()
  x86/fpu: Copy the full header in copy_user_to_xstate()
  x86/fpu: Use validate_xstate_header() to validate the xstate_header in copy_kernel_to_xstate()
  x86/fpu: Eliminate the 'xfeatures' local variable in copy_kernel_to_xstate()
  x86/fpu: Copy the full state_header in copy_kernel_to_xstate()
  x86/fpu: Use validate_xstate_header() to validate the xstate_header in __fpu__restore_sig()
  x86/fpu: Use validate_xstate_header() to validate the xstate_header in xstateregs_set()
  x86/fpu: Introduce validate_xstate_header()
  x86/fpu: Rename fpu__activate_fpstate_read/write() to fpu__prepare_[read|write]()
  x86/fpu: Rename fpu__activate_curr() to fpu__initialize()
  x86/fpu: Simplify and speed up fpu__copy()
  x86/fpu: Fix stale comments about lazy FPU logic
  x86/fpu: Rename fpu::fpstate_active to fpu::initialized
  x86/fpu: Remove fpu__current_fpstate_write_begin/end()
  x86/fpu: Fix fpu__activate_fpstate_read() and update comments
  x86/fpu: Reinitialize FPU registers if restoring FPU state fails
  x86/fpu: Don't let userspace set bogus xcomp_bv
  x86/fpu: Turn WARN_ON() in context switch into WARN_ON_FPU()
  ...
hifive-unleashed-5.1
Linus Torvalds 2017-09-27 08:33:26 -07:00
commit 7031b64125
15 changed files with 385 additions and 325 deletions

View File

@ -231,7 +231,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
ksig->ka.sa.sa_restorer)
sp = (unsigned long) ksig->ka.sa.sa_restorer;
if (fpu->fpstate_active) {
if (fpu->initialized) {
unsigned long fx_aligned, math_size;
sp = fpu__alloc_mathframe(sp, 1, &fx_aligned, &math_size);

View File

@ -23,11 +23,9 @@
/*
* High level FPU state handling functions:
*/
extern void fpu__activate_curr(struct fpu *fpu);
extern void fpu__activate_fpstate_read(struct fpu *fpu);
extern void fpu__activate_fpstate_write(struct fpu *fpu);
extern void fpu__current_fpstate_write_begin(void);
extern void fpu__current_fpstate_write_end(void);
extern void fpu__initialize(struct fpu *fpu);
extern void fpu__prepare_read(struct fpu *fpu);
extern void fpu__prepare_write(struct fpu *fpu);
extern void fpu__save(struct fpu *fpu);
extern void fpu__restore(struct fpu *fpu);
extern int fpu__restore_sig(void __user *buf, int ia32_frame);
@ -120,20 +118,11 @@ extern void fpstate_sanitize_xstate(struct fpu *fpu);
err; \
})
#define check_insn(insn, output, input...) \
({ \
int err; \
#define kernel_insn(insn, output, input...) \
asm volatile("1:" #insn "\n\t" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3: movl $-1,%[err]\n" \
" jmp 2b\n" \
".previous\n" \
_ASM_EXTABLE(1b, 3b) \
: [err] "=r" (err), output \
: "0"(0), input); \
err; \
})
_ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_fprestore) \
: output : input)
static inline int copy_fregs_to_user(struct fregs_state __user *fx)
{
@ -153,20 +142,16 @@ static inline int copy_fxregs_to_user(struct fxregs_state __user *fx)
static inline void copy_kernel_to_fxregs(struct fxregs_state *fx)
{
int err;
if (IS_ENABLED(CONFIG_X86_32)) {
err = check_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
kernel_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
} else {
if (IS_ENABLED(CONFIG_AS_FXSAVEQ)) {
err = check_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
kernel_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
} else {
/* See comment in copy_fxregs_to_kernel() below. */
err = check_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx), "m" (*fx));
kernel_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx), "m" (*fx));
}
}
/* Copying from a kernel buffer to FPU registers should never fail: */
WARN_ON_FPU(err);
}
static inline int copy_user_to_fxregs(struct fxregs_state __user *fx)
@ -183,9 +168,7 @@ static inline int copy_user_to_fxregs(struct fxregs_state __user *fx)
static inline void copy_kernel_to_fregs(struct fregs_state *fx)
{
int err = check_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
WARN_ON_FPU(err);
kernel_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
}
static inline int copy_user_to_fregs(struct fregs_state __user *fx)
@ -281,18 +264,13 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu)
* Use XRSTORS to restore context if it is enabled. XRSTORS supports compact
* XSAVE area format.
*/
#define XSTATE_XRESTORE(st, lmask, hmask, err) \
#define XSTATE_XRESTORE(st, lmask, hmask) \
asm volatile(ALTERNATIVE(XRSTOR, \
XRSTORS, X86_FEATURE_XSAVES) \
"\n" \
"xor %[err], %[err]\n" \
"3:\n" \
".pushsection .fixup,\"ax\"\n" \
"4: movl $-2, %[err]\n" \
"jmp 3b\n" \
".popsection\n" \
_ASM_EXTABLE(661b, 4b) \
: [err] "=r" (err) \
_ASM_EXTABLE_HANDLE(661b, 3b, ex_handler_fprestore)\
: \
: "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
: "memory")
@ -336,7 +314,10 @@ static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate)
else
XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
/* We should never fault when copying from a kernel buffer: */
/*
* We should never fault when copying from a kernel buffer, and the FPU
* state we set at boot time should be valid.
*/
WARN_ON_FPU(err);
}
@ -350,7 +331,7 @@ static inline void copy_xregs_to_kernel(struct xregs_state *xstate)
u32 hmask = mask >> 32;
int err;
WARN_ON(!alternatives_patched);
WARN_ON_FPU(!alternatives_patched);
XSTATE_XSAVE(xstate, lmask, hmask, err);
@ -365,12 +346,8 @@ static inline void copy_kernel_to_xregs(struct xregs_state *xstate, u64 mask)
{
u32 lmask = mask;
u32 hmask = mask >> 32;
int err;
XSTATE_XRESTORE(xstate, lmask, hmask, err);
/* We should never fault when copying from a kernel buffer: */
WARN_ON_FPU(err);
XSTATE_XRESTORE(xstate, lmask, hmask);
}
/*
@ -526,37 +503,16 @@ static inline int fpregs_state_valid(struct fpu *fpu, unsigned int cpu)
*/
static inline void fpregs_deactivate(struct fpu *fpu)
{
WARN_ON_FPU(!fpu->fpregs_active);
fpu->fpregs_active = 0;
this_cpu_write(fpu_fpregs_owner_ctx, NULL);
trace_x86_fpu_regs_deactivated(fpu);
}
static inline void fpregs_activate(struct fpu *fpu)
{
WARN_ON_FPU(fpu->fpregs_active);
fpu->fpregs_active = 1;
this_cpu_write(fpu_fpregs_owner_ctx, fpu);
trace_x86_fpu_regs_activated(fpu);
}
/*
* The question "does this thread have fpu access?"
* is slightly racy, since preemption could come in
* and revoke it immediately after the test.
*
* However, even in that very unlikely scenario,
* we can just assume we have FPU access - typically
* to save the FP state - we'll just take a #NM
* fault and get the FPU access back.
*/
static inline int fpregs_active(void)
{
return current->thread.fpu.fpregs_active;
}
/*
* FPU state switching for scheduling.
*
@ -571,14 +527,13 @@ static inline int fpregs_active(void)
static inline void
switch_fpu_prepare(struct fpu *old_fpu, int cpu)
{
if (old_fpu->fpregs_active) {
if (old_fpu->initialized) {
if (!copy_fpregs_to_fpstate(old_fpu))
old_fpu->last_cpu = -1;
else
old_fpu->last_cpu = cpu;
/* But leave fpu_fpregs_owner_ctx! */
old_fpu->fpregs_active = 0;
trace_x86_fpu_regs_deactivated(old_fpu);
} else
old_fpu->last_cpu = -1;
@ -595,7 +550,7 @@ switch_fpu_prepare(struct fpu *old_fpu, int cpu)
static inline void switch_fpu_finish(struct fpu *new_fpu, int cpu)
{
bool preload = static_cpu_has(X86_FEATURE_FPU) &&
new_fpu->fpstate_active;
new_fpu->initialized;
if (preload) {
if (!fpregs_state_valid(new_fpu, cpu))
@ -617,8 +572,7 @@ static inline void user_fpu_begin(void)
struct fpu *fpu = &current->thread.fpu;
preempt_disable();
if (!fpregs_active())
fpregs_activate(fpu);
fpregs_activate(fpu);
preempt_enable();
}

View File

@ -68,6 +68,9 @@ struct fxregs_state {
/* Default value for fxregs_state.mxcsr: */
#define MXCSR_DEFAULT 0x1f80
/* Copy both mxcsr & mxcsr_flags with a single u64 memcpy: */
#define MXCSR_AND_FLAGS_SIZE sizeof(u64)
/*
* Software based FPU emulation state. This is arbitrary really,
* it matches the x87 format to make it easier to understand:
@ -290,36 +293,13 @@ struct fpu {
unsigned int last_cpu;
/*
* @fpstate_active:
* @initialized:
*
* This flag indicates whether this context is active: if the task
* This flag indicates whether this context is initialized: if the task
* is not running then we can restore from this context, if the task
* is running then we should save into this context.
*/
unsigned char fpstate_active;
/*
* @fpregs_active:
*
* This flag determines whether a given context is actively
* loaded into the FPU's registers and that those registers
* represent the task's current FPU state.
*
* Note the interaction with fpstate_active:
*
* # task does not use the FPU:
* fpstate_active == 0
*
* # task uses the FPU and regs are active:
* fpstate_active == 1 && fpregs_active == 1
*
* # the regs are inactive but still match fpstate:
* fpstate_active == 1 && fpregs_active == 0 && fpregs_owner == fpu
*
* The third state is what we use for the lazy restore optimization
* on lazy-switching CPUs.
*/
unsigned char fpregs_active;
unsigned char initialized;
/*
* @state:

View File

@ -48,8 +48,12 @@ void fpu__xstate_clear_all_cpu_caps(void);
void *get_xsave_addr(struct xregs_state *xsave, int xstate);
const void *get_xsave_field_ptr(int xstate_field);
int using_compacted_format(void);
int copyout_from_xsaves(unsigned int pos, unsigned int count, void *kbuf,
void __user *ubuf, struct xregs_state *xsave);
int copyin_to_xsaves(const void *kbuf, const void __user *ubuf,
struct xregs_state *xsave);
int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int offset, unsigned int size);
int copy_xstate_to_user(void __user *ubuf, struct xregs_state *xsave, unsigned int offset, unsigned int size);
int copy_kernel_to_xstate(struct xregs_state *xsave, const void *kbuf);
int copy_user_to_xstate(struct xregs_state *xsave, const void __user *ubuf);
/* Validate an xstate header supplied by userspace (ptrace or sigreturn) */
extern int validate_xstate_header(const struct xstate_header *hdr);
#endif

View File

@ -12,25 +12,22 @@ DECLARE_EVENT_CLASS(x86_fpu,
TP_STRUCT__entry(
__field(struct fpu *, fpu)
__field(bool, fpregs_active)
__field(bool, fpstate_active)
__field(bool, initialized)
__field(u64, xfeatures)
__field(u64, xcomp_bv)
),
TP_fast_assign(
__entry->fpu = fpu;
__entry->fpregs_active = fpu->fpregs_active;
__entry->fpstate_active = fpu->fpstate_active;
__entry->initialized = fpu->initialized;
if (boot_cpu_has(X86_FEATURE_OSXSAVE)) {
__entry->xfeatures = fpu->state.xsave.header.xfeatures;
__entry->xcomp_bv = fpu->state.xsave.header.xcomp_bv;
}
),
TP_printk("x86/fpu: %p fpregs_active: %d fpstate_active: %d xfeatures: %llx xcomp_bv: %llx",
TP_printk("x86/fpu: %p initialized: %d xfeatures: %llx xcomp_bv: %llx",
__entry->fpu,
__entry->fpregs_active,
__entry->fpstate_active,
__entry->initialized,
__entry->xfeatures,
__entry->xcomp_bv
)

View File

@ -100,7 +100,7 @@ void __kernel_fpu_begin(void)
kernel_fpu_disable();
if (fpu->fpregs_active) {
if (fpu->initialized) {
/*
* Ignore return value -- we don't care if reg state
* is clobbered.
@ -116,7 +116,7 @@ void __kernel_fpu_end(void)
{
struct fpu *fpu = &current->thread.fpu;
if (fpu->fpregs_active)
if (fpu->initialized)
copy_kernel_to_fpregs(&fpu->state);
kernel_fpu_enable();
@ -148,7 +148,7 @@ void fpu__save(struct fpu *fpu)
preempt_disable();
trace_x86_fpu_before_save(fpu);
if (fpu->fpregs_active) {
if (fpu->initialized) {
if (!copy_fpregs_to_fpstate(fpu)) {
copy_kernel_to_fpregs(&fpu->state);
}
@ -189,10 +189,9 @@ EXPORT_SYMBOL_GPL(fpstate_init);
int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
{
dst_fpu->fpregs_active = 0;
dst_fpu->last_cpu = -1;
if (!src_fpu->fpstate_active || !static_cpu_has(X86_FEATURE_FPU))
if (!src_fpu->initialized || !static_cpu_has(X86_FEATURE_FPU))
return 0;
WARN_ON_FPU(src_fpu != &current->thread.fpu);
@ -206,26 +205,14 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
/*
* Save current FPU registers directly into the child
* FPU context, without any memory-to-memory copying.
* In lazy mode, if the FPU context isn't loaded into
* fpregs, CR0.TS will be set and do_device_not_available
* will load the FPU context.
*
* We have to do all this with preemption disabled,
* mostly because of the FNSAVE case, because in that
* case we must not allow preemption in the window
* between the FNSAVE and us marking the context lazy.
*
* It shouldn't be an issue as even FNSAVE is plenty
* fast in terms of critical section length.
* ( The function 'fails' in the FNSAVE case, which destroys
* register contents so we have to copy them back. )
*/
preempt_disable();
if (!copy_fpregs_to_fpstate(dst_fpu)) {
memcpy(&src_fpu->state, &dst_fpu->state,
fpu_kernel_xstate_size);
memcpy(&src_fpu->state, &dst_fpu->state, fpu_kernel_xstate_size);
copy_kernel_to_fpregs(&src_fpu->state);
}
preempt_enable();
trace_x86_fpu_copy_src(src_fpu);
trace_x86_fpu_copy_dst(dst_fpu);
@ -237,45 +224,48 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
* Activate the current task's in-memory FPU context,
* if it has not been used before:
*/
void fpu__activate_curr(struct fpu *fpu)
void fpu__initialize(struct fpu *fpu)
{
WARN_ON_FPU(fpu != &current->thread.fpu);
if (!fpu->fpstate_active) {
if (!fpu->initialized) {
fpstate_init(&fpu->state);
trace_x86_fpu_init_state(fpu);
trace_x86_fpu_activate_state(fpu);
/* Safe to do for the current task: */
fpu->fpstate_active = 1;
fpu->initialized = 1;
}
}
EXPORT_SYMBOL_GPL(fpu__activate_curr);
EXPORT_SYMBOL_GPL(fpu__initialize);
/*
* This function must be called before we read a task's fpstate.
*
* If the task has not used the FPU before then initialize its
* fpstate.
* There's two cases where this gets called:
*
* - for the current task (when coredumping), in which case we have
* to save the latest FPU registers into the fpstate,
*
* - or it's called for stopped tasks (ptrace), in which case the
* registers were already saved by the context-switch code when
* the task scheduled out - we only have to initialize the registers
* if they've never been initialized.
*
* If the task has used the FPU before then save it.
*/
void fpu__activate_fpstate_read(struct fpu *fpu)
void fpu__prepare_read(struct fpu *fpu)
{
/*
* If fpregs are active (in the current CPU), then
* copy them to the fpstate:
*/
if (fpu->fpregs_active) {
if (fpu == &current->thread.fpu) {
fpu__save(fpu);
} else {
if (!fpu->fpstate_active) {
if (!fpu->initialized) {
fpstate_init(&fpu->state);
trace_x86_fpu_init_state(fpu);
trace_x86_fpu_activate_state(fpu);
/* Safe to do for current and for stopped child tasks: */
fpu->fpstate_active = 1;
fpu->initialized = 1;
}
}
}
@ -283,17 +273,17 @@ void fpu__activate_fpstate_read(struct fpu *fpu)
/*
* This function must be called before we write a task's fpstate.
*
* If the task has used the FPU before then unlazy it.
* If the task has used the FPU before then invalidate any cached FPU registers.
* If the task has not used the FPU before then initialize its fpstate.
*
* After this function call, after registers in the fpstate are
* modified and the child task has woken up, the child task will
* restore the modified FPU state from the modified context. If we
* didn't clear its lazy status here then the lazy in-registers
* didn't clear its cached status here then the cached in-registers
* state pending on its former CPU could be restored, corrupting
* the modifications.
*/
void fpu__activate_fpstate_write(struct fpu *fpu)
void fpu__prepare_write(struct fpu *fpu)
{
/*
* Only stopped child tasks can be used to modify the FPU
@ -301,8 +291,8 @@ void fpu__activate_fpstate_write(struct fpu *fpu)
*/
WARN_ON_FPU(fpu == &current->thread.fpu);
if (fpu->fpstate_active) {
/* Invalidate any lazy state: */
if (fpu->initialized) {
/* Invalidate any cached state: */
__fpu_invalidate_fpregs_state(fpu);
} else {
fpstate_init(&fpu->state);
@ -310,73 +300,10 @@ void fpu__activate_fpstate_write(struct fpu *fpu)
trace_x86_fpu_activate_state(fpu);
/* Safe to do for stopped child tasks: */
fpu->fpstate_active = 1;
fpu->initialized = 1;
}
}
/*
* This function must be called before we write the current
* task's fpstate.
*
* This call gets the current FPU register state and moves
* it in to the 'fpstate'. Preemption is disabled so that
* no writes to the 'fpstate' can occur from context
* swiches.
*
* Must be followed by a fpu__current_fpstate_write_end().
*/
void fpu__current_fpstate_write_begin(void)
{
struct fpu *fpu = &current->thread.fpu;
/*
* Ensure that the context-switching code does not write
* over the fpstate while we are doing our update.
*/
preempt_disable();
/*
* Move the fpregs in to the fpu's 'fpstate'.
*/
fpu__activate_fpstate_read(fpu);
/*
* The caller is about to write to 'fpu'. Ensure that no
* CPU thinks that its fpregs match the fpstate. This
* ensures we will not be lazy and skip a XRSTOR in the
* future.
*/
__fpu_invalidate_fpregs_state(fpu);
}
/*
* This function must be paired with fpu__current_fpstate_write_begin()
*
* This will ensure that the modified fpstate gets placed back in
* the fpregs if necessary.
*
* Note: This function may be called whether or not an _actual_
* write to the fpstate occurred.
*/
void fpu__current_fpstate_write_end(void)
{
struct fpu *fpu = &current->thread.fpu;
/*
* 'fpu' now has an updated copy of the state, but the
* registers may still be out of date. Update them with
* an XRSTOR if they are active.
*/
if (fpregs_active())
copy_kernel_to_fpregs(&fpu->state);
/*
* Our update is done and the fpregs/fpstate are in sync
* if necessary. Context switches can happen again.
*/
preempt_enable();
}
/*
* 'fpu__restore()' is called to copy FPU registers from
* the FPU fpstate to the live hw registers and to activate
@ -389,7 +316,7 @@ void fpu__current_fpstate_write_end(void)
*/
void fpu__restore(struct fpu *fpu)
{
fpu__activate_curr(fpu);
fpu__initialize(fpu);
/* Avoid __kernel_fpu_begin() right after fpregs_activate() */
kernel_fpu_disable();
@ -414,15 +341,17 @@ void fpu__drop(struct fpu *fpu)
{
preempt_disable();
if (fpu->fpregs_active) {
/* Ignore delayed exceptions from user space */
asm volatile("1: fwait\n"
"2:\n"
_ASM_EXTABLE(1b, 2b));
fpregs_deactivate(fpu);
if (fpu == &current->thread.fpu) {
if (fpu->initialized) {
/* Ignore delayed exceptions from user space */
asm volatile("1: fwait\n"
"2:\n"
_ASM_EXTABLE(1b, 2b));
fpregs_deactivate(fpu);
}
}
fpu->fpstate_active = 0;
fpu->initialized = 0;
trace_x86_fpu_dropped(fpu);
@ -462,9 +391,11 @@ void fpu__clear(struct fpu *fpu)
* Make sure fpstate is cleared and initialized.
*/
if (static_cpu_has(X86_FEATURE_FPU)) {
fpu__activate_curr(fpu);
preempt_disable();
fpu__initialize(fpu);
user_fpu_begin();
copy_init_fpstate_to_fpregs();
preempt_enable();
}
}

View File

@ -240,7 +240,7 @@ static void __init fpu__init_system_ctx_switch(void)
WARN_ON_FPU(!on_boot_cpu);
on_boot_cpu = 0;
WARN_ON_FPU(current->thread.fpu.fpstate_active);
WARN_ON_FPU(current->thread.fpu.initialized);
}
/*

View File

@ -16,14 +16,14 @@ int regset_fpregs_active(struct task_struct *target, const struct user_regset *r
{
struct fpu *target_fpu = &target->thread.fpu;
return target_fpu->fpstate_active ? regset->n : 0;
return target_fpu->initialized ? regset->n : 0;
}
int regset_xregset_fpregs_active(struct task_struct *target, const struct user_regset *regset)
{
struct fpu *target_fpu = &target->thread.fpu;
if (boot_cpu_has(X86_FEATURE_FXSR) && target_fpu->fpstate_active)
if (boot_cpu_has(X86_FEATURE_FXSR) && target_fpu->initialized)
return regset->n;
else
return 0;
@ -38,7 +38,7 @@ int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
if (!boot_cpu_has(X86_FEATURE_FXSR))
return -ENODEV;
fpu__activate_fpstate_read(fpu);
fpu__prepare_read(fpu);
fpstate_sanitize_xstate(fpu);
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
@ -55,7 +55,7 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
if (!boot_cpu_has(X86_FEATURE_FXSR))
return -ENODEV;
fpu__activate_fpstate_write(fpu);
fpu__prepare_write(fpu);
fpstate_sanitize_xstate(fpu);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
@ -89,10 +89,13 @@ int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
xsave = &fpu->state.xsave;
fpu__activate_fpstate_read(fpu);
fpu__prepare_read(fpu);
if (using_compacted_format()) {
ret = copyout_from_xsaves(pos, count, kbuf, ubuf, xsave);
if (kbuf)
ret = copy_xstate_to_kernel(kbuf, xsave, pos, count);
else
ret = copy_xstate_to_user(ubuf, xsave, pos, count);
} else {
fpstate_sanitize_xstate(fpu);
/*
@ -129,12 +132,23 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
xsave = &fpu->state.xsave;
fpu__activate_fpstate_write(fpu);
fpu__prepare_write(fpu);
if (boot_cpu_has(X86_FEATURE_XSAVES))
ret = copyin_to_xsaves(kbuf, ubuf, xsave);
else
if (using_compacted_format()) {
if (kbuf)
ret = copy_kernel_to_xstate(xsave, kbuf);
else
ret = copy_user_to_xstate(xsave, ubuf);
} else {
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
if (!ret)
ret = validate_xstate_header(&xsave->header);
}
/*
* mxcsr reserved bits must be masked to zero for security reasons.
*/
xsave->i387.mxcsr &= mxcsr_feature_mask;
/*
* In case of failure, mark all states as init:
@ -142,16 +156,6 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
if (ret)
fpstate_init(&fpu->state);
/*
* mxcsr reserved bits must be masked to zero for security reasons.
*/
xsave->i387.mxcsr &= mxcsr_feature_mask;
xsave->header.xfeatures &= xfeatures_mask;
/*
* These bits must be zero.
*/
memset(&xsave->header.reserved, 0, 48);
return ret;
}
@ -299,7 +303,7 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset,
struct fpu *fpu = &target->thread.fpu;
struct user_i387_ia32_struct env;
fpu__activate_fpstate_read(fpu);
fpu__prepare_read(fpu);
if (!boot_cpu_has(X86_FEATURE_FPU))
return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf);
@ -329,7 +333,7 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
struct user_i387_ia32_struct env;
int ret;
fpu__activate_fpstate_write(fpu);
fpu__prepare_write(fpu);
fpstate_sanitize_xstate(fpu);
if (!boot_cpu_has(X86_FEATURE_FPU))
@ -369,7 +373,7 @@ int dump_fpu(struct pt_regs *regs, struct user_i387_struct *ufpu)
struct fpu *fpu = &tsk->thread.fpu;
int fpvalid;
fpvalid = fpu->fpstate_active;
fpvalid = fpu->initialized;
if (fpvalid)
fpvalid = !fpregs_get(tsk, NULL,
0, sizeof(struct user_i387_ia32_struct),

View File

@ -155,7 +155,8 @@ static inline int copy_fpregs_to_sigframe(struct xregs_state __user *buf)
*/
int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
{
struct xregs_state *xsave = &current->thread.fpu.state.xsave;
struct fpu *fpu = &current->thread.fpu;
struct xregs_state *xsave = &fpu->state.xsave;
struct task_struct *tsk = current;
int ia32_fxstate = (buf != buf_fx);
@ -170,13 +171,13 @@ int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
sizeof(struct user_i387_ia32_struct), NULL,
(struct _fpstate_32 __user *) buf) ? -1 : 1;
if (fpregs_active() || using_compacted_format()) {
if (fpu->initialized || using_compacted_format()) {
/* Save the live register state to the user directly. */
if (copy_fpregs_to_sigframe(buf_fx))
return -1;
/* Update the thread's fxstate to save the fsave header. */
if (ia32_fxstate)
copy_fxregs_to_kernel(&tsk->thread.fpu);
copy_fxregs_to_kernel(fpu);
} else {
/*
* It is a *bug* if kernel uses compacted-format for xsave
@ -189,7 +190,7 @@ int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
return -1;
}
fpstate_sanitize_xstate(&tsk->thread.fpu);
fpstate_sanitize_xstate(fpu);
if (__copy_to_user(buf_fx, xsave, fpu_user_xstate_size))
return -1;
}
@ -213,8 +214,11 @@ sanitize_restored_xstate(struct task_struct *tsk,
struct xstate_header *header = &xsave->header;
if (use_xsave()) {
/* These bits must be zero. */
memset(header->reserved, 0, 48);
/*
* Note: we don't need to zero the reserved bits in the
* xstate_header here because we either didn't copy them at all,
* or we checked earlier that they aren't set.
*/
/*
* Init the state that is not present in the memory
@ -223,7 +227,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
if (fx_only)
header->xfeatures = XFEATURE_MASK_FPSSE;
else
header->xfeatures &= (xfeatures_mask & xfeatures);
header->xfeatures &= xfeatures;
}
if (use_fxsr()) {
@ -279,7 +283,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
if (!access_ok(VERIFY_READ, buf, size))
return -EACCES;
fpu__activate_curr(fpu);
fpu__initialize(fpu);
if (!static_cpu_has(X86_FEATURE_FPU))
return fpregs_soft_set(current, NULL,
@ -307,28 +311,29 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
/*
* For 32-bit frames with fxstate, copy the user state to the
* thread's fpu state, reconstruct fxstate from the fsave
* header. Sanitize the copied state etc.
* header. Validate and sanitize the copied state.
*/
struct fpu *fpu = &tsk->thread.fpu;
struct user_i387_ia32_struct env;
int err = 0;
/*
* Drop the current fpu which clears fpu->fpstate_active. This ensures
* Drop the current fpu which clears fpu->initialized. This ensures
* that any context-switch during the copy of the new state,
* avoids the intermediate state from getting restored/saved.
* Thus avoiding the new restored state from getting corrupted.
* We will be ready to restore/save the state only after
* fpu->fpstate_active is again set.
* fpu->initialized is again set.
*/
fpu__drop(fpu);
if (using_compacted_format()) {
err = copyin_to_xsaves(NULL, buf_fx,
&fpu->state.xsave);
err = copy_user_to_xstate(&fpu->state.xsave, buf_fx);
} else {
err = __copy_from_user(&fpu->state.xsave,
buf_fx, state_size);
err = __copy_from_user(&fpu->state.xsave, buf_fx, state_size);
if (!err && state_size > offsetof(struct xregs_state, header))
err = validate_xstate_header(&fpu->state.xsave.header);
}
if (err || __copy_from_user(&env, buf, sizeof(env))) {
@ -339,7 +344,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
sanitize_restored_xstate(tsk, &env, xfeatures, fx_only);
}
fpu->fpstate_active = 1;
fpu->initialized = 1;
preempt_disable();
fpu__restore(fpu);
preempt_enable();

View File

@ -483,6 +483,30 @@ int using_compacted_format(void)
return boot_cpu_has(X86_FEATURE_XSAVES);
}
/* Validate an xstate header supplied by userspace (ptrace or sigreturn) */
int validate_xstate_header(const struct xstate_header *hdr)
{
/* No unknown or supervisor features may be set */
if (hdr->xfeatures & (~xfeatures_mask | XFEATURE_MASK_SUPERVISOR))
return -EINVAL;
/* Userspace must use the uncompacted format */
if (hdr->xcomp_bv)
return -EINVAL;
/*
* If 'reserved' is shrunken to add a new field, make sure to validate
* that new field here!
*/
BUILD_BUG_ON(sizeof(hdr->reserved) != 48);
/* No reserved bits may be set */
if (memchr_inv(hdr->reserved, 0, sizeof(hdr->reserved)))
return -EINVAL;
return 0;
}
static void __xstate_dump_leaves(void)
{
int i;
@ -867,7 +891,7 @@ const void *get_xsave_field_ptr(int xsave_state)
{
struct fpu *fpu = &current->thread.fpu;
if (!fpu->fpstate_active)
if (!fpu->initialized)
return NULL;
/*
* fpu__save() takes the CPU's xstate registers
@ -920,48 +944,55 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
}
#endif /* ! CONFIG_ARCH_HAS_PKEYS */
/*
* Weird legacy quirk: SSE and YMM states store information in the
* MXCSR and MXCSR_FLAGS fields of the FP area. That means if the FP
* area is marked as unused in the xfeatures header, we need to copy
* MXCSR and MXCSR_FLAGS if either SSE or YMM are in use.
*/
static inline bool xfeatures_mxcsr_quirk(u64 xfeatures)
{
if (!(xfeatures & (XFEATURE_MASK_SSE|XFEATURE_MASK_YMM)))
return false;
if (xfeatures & XFEATURE_MASK_FP)
return false;
return true;
}
/*
* This is similar to user_regset_copyout(), but will not add offset to
* the source data pointer or increment pos, count, kbuf, and ubuf.
*/
static inline int xstate_copyout(unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf,
const void *data, const int start_pos,
const int end_pos)
static inline void
__copy_xstate_to_kernel(void *kbuf, const void *data,
unsigned int offset, unsigned int size, unsigned int size_total)
{
if ((count == 0) || (pos < start_pos))
return 0;
if (offset < size_total) {
unsigned int copy = min(size, size_total - offset);
if (end_pos < 0 || pos < end_pos) {
unsigned int copy = (end_pos < 0 ? count : min(count, end_pos - pos));
if (kbuf) {
memcpy(kbuf + pos, data, copy);
} else {
if (__copy_to_user(ubuf + pos, data, copy))
return -EFAULT;
}
memcpy(kbuf + offset, data, copy);
}
return 0;
}
/*
* Convert from kernel XSAVES compacted format to standard format and copy
* to a ptrace buffer. It supports partial copy but pos always starts from
* zero. This is called from xstateregs_get() and there we check the CPU
* has XSAVES.
* to a kernel-space ptrace buffer.
*
* It supports partial copy but pos always starts from zero. This is called
* from xstateregs_get() and there we check the CPU has XSAVES.
*/
int copyout_from_xsaves(unsigned int pos, unsigned int count, void *kbuf,
void __user *ubuf, struct xregs_state *xsave)
int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int offset_start, unsigned int size_total)
{
unsigned int offset, size;
int ret, i;
struct xstate_header header;
int i;
/*
* Currently copy_regset_to_user() starts from pos 0:
*/
if (unlikely(pos != 0))
if (unlikely(offset_start != 0))
return -EFAULT;
/*
@ -977,8 +1008,91 @@ int copyout_from_xsaves(unsigned int pos, unsigned int count, void *kbuf,
offset = offsetof(struct xregs_state, header);
size = sizeof(header);
ret = xstate_copyout(offset, size, kbuf, ubuf, &header, 0, count);
__copy_xstate_to_kernel(kbuf, &header, offset, size, size_total);
for (i = 0; i < XFEATURE_MAX; i++) {
/*
* Copy only in-use xstates:
*/
if ((header.xfeatures >> i) & 1) {
void *src = __raw_xsave_addr(xsave, 1 << i);
offset = xstate_offsets[i];
size = xstate_sizes[i];
/* The next component has to fit fully into the output buffer: */
if (offset + size > size_total)
break;
__copy_xstate_to_kernel(kbuf, src, offset, size, size_total);
}
}
if (xfeatures_mxcsr_quirk(header.xfeatures)) {
offset = offsetof(struct fxregs_state, mxcsr);
size = MXCSR_AND_FLAGS_SIZE;
__copy_xstate_to_kernel(kbuf, &xsave->i387.mxcsr, offset, size, size_total);
}
/*
* Fill xsave->i387.sw_reserved value for ptrace frame:
*/
offset = offsetof(struct fxregs_state, sw_reserved);
size = sizeof(xstate_fx_sw_bytes);
__copy_xstate_to_kernel(kbuf, xstate_fx_sw_bytes, offset, size, size_total);
return 0;
}
static inline int
__copy_xstate_to_user(void __user *ubuf, const void *data, unsigned int offset, unsigned int size, unsigned int size_total)
{
if (!size)
return 0;
if (offset < size_total) {
unsigned int copy = min(size, size_total - offset);
if (__copy_to_user(ubuf + offset, data, copy))
return -EFAULT;
}
return 0;
}
/*
* Convert from kernel XSAVES compacted format to standard format and copy
* to a user-space buffer. It supports partial copy but pos always starts from
* zero. This is called from xstateregs_get() and there we check the CPU
* has XSAVES.
*/
int copy_xstate_to_user(void __user *ubuf, struct xregs_state *xsave, unsigned int offset_start, unsigned int size_total)
{
unsigned int offset, size;
int ret, i;
struct xstate_header header;
/*
* Currently copy_regset_to_user() starts from pos 0:
*/
if (unlikely(offset_start != 0))
return -EFAULT;
/*
* The destination is a ptrace buffer; we put in only user xstates:
*/
memset(&header, 0, sizeof(header));
header.xfeatures = xsave->header.xfeatures;
header.xfeatures &= ~XFEATURE_MASK_SUPERVISOR;
/*
* Copy xregs_state->header:
*/
offset = offsetof(struct xregs_state, header);
size = sizeof(header);
ret = __copy_xstate_to_user(ubuf, &header, offset, size, size_total);
if (ret)
return ret;
@ -992,25 +1106,30 @@ int copyout_from_xsaves(unsigned int pos, unsigned int count, void *kbuf,
offset = xstate_offsets[i];
size = xstate_sizes[i];
ret = xstate_copyout(offset, size, kbuf, ubuf, src, 0, count);
/* The next component has to fit fully into the output buffer: */
if (offset + size > size_total)
break;
ret = __copy_xstate_to_user(ubuf, src, offset, size, size_total);
if (ret)
return ret;
if (offset + size >= count)
break;
}
}
if (xfeatures_mxcsr_quirk(header.xfeatures)) {
offset = offsetof(struct fxregs_state, mxcsr);
size = MXCSR_AND_FLAGS_SIZE;
__copy_xstate_to_user(ubuf, &xsave->i387.mxcsr, offset, size, size_total);
}
/*
* Fill xsave->i387.sw_reserved value for ptrace frame:
*/
offset = offsetof(struct fxregs_state, sw_reserved);
size = sizeof(xstate_fx_sw_bytes);
ret = xstate_copyout(offset, size, kbuf, ubuf, xstate_fx_sw_bytes, 0, count);
ret = __copy_xstate_to_user(ubuf, xstate_fx_sw_bytes, offset, size, size_total);
if (ret)
return ret;
@ -1018,55 +1137,42 @@ int copyout_from_xsaves(unsigned int pos, unsigned int count, void *kbuf,
}
/*
* Convert from a ptrace standard-format buffer to kernel XSAVES format
* and copy to the target thread. This is called from xstateregs_set() and
* there we check the CPU has XSAVES and a whole standard-sized buffer
* exists.
* Convert from a ptrace standard-format kernel buffer to kernel XSAVES format
* and copy to the target thread. This is called from xstateregs_set().
*/
int copyin_to_xsaves(const void *kbuf, const void __user *ubuf,
struct xregs_state *xsave)
int copy_kernel_to_xstate(struct xregs_state *xsave, const void *kbuf)
{
unsigned int offset, size;
int i;
u64 xfeatures;
u64 allowed_features;
struct xstate_header hdr;
offset = offsetof(struct xregs_state, header);
size = sizeof(xfeatures);
size = sizeof(hdr);
if (kbuf) {
memcpy(&xfeatures, kbuf + offset, size);
} else {
if (__copy_from_user(&xfeatures, ubuf + offset, size))
return -EFAULT;
}
memcpy(&hdr, kbuf + offset, size);
/*
* Reject if the user sets any disabled or supervisor features:
*/
allowed_features = xfeatures_mask & ~XFEATURE_MASK_SUPERVISOR;
if (xfeatures & ~allowed_features)
if (validate_xstate_header(&hdr))
return -EINVAL;
for (i = 0; i < XFEATURE_MAX; i++) {
u64 mask = ((u64)1 << i);
if (xfeatures & mask) {
if (hdr.xfeatures & mask) {
void *dst = __raw_xsave_addr(xsave, 1 << i);
offset = xstate_offsets[i];
size = xstate_sizes[i];
if (kbuf) {
memcpy(dst, kbuf + offset, size);
} else {
if (__copy_from_user(dst, ubuf + offset, size))
return -EFAULT;
}
memcpy(dst, kbuf + offset, size);
}
}
if (xfeatures_mxcsr_quirk(hdr.xfeatures)) {
offset = offsetof(struct fxregs_state, mxcsr);
size = MXCSR_AND_FLAGS_SIZE;
memcpy(&xsave->i387.mxcsr, kbuf + offset, size);
}
/*
* The state that came in from userspace was user-state only.
* Mask all the user states out of 'xfeatures':
@ -1076,7 +1182,63 @@ int copyin_to_xsaves(const void *kbuf, const void __user *ubuf,
/*
* Add back in the features that came in from userspace:
*/
xsave->header.xfeatures |= xfeatures;
xsave->header.xfeatures |= hdr.xfeatures;
return 0;
}
/*
* Convert from a ptrace or sigreturn standard-format user-space buffer to
* kernel XSAVES format and copy to the target thread. This is called from
* xstateregs_set(), as well as potentially from the sigreturn() and
* rt_sigreturn() system calls.
*/
int copy_user_to_xstate(struct xregs_state *xsave, const void __user *ubuf)
{
unsigned int offset, size;
int i;
struct xstate_header hdr;
offset = offsetof(struct xregs_state, header);
size = sizeof(hdr);
if (__copy_from_user(&hdr, ubuf + offset, size))
return -EFAULT;
if (validate_xstate_header(&hdr))
return -EINVAL;
for (i = 0; i < XFEATURE_MAX; i++) {
u64 mask = ((u64)1 << i);
if (hdr.xfeatures & mask) {
void *dst = __raw_xsave_addr(xsave, 1 << i);
offset = xstate_offsets[i];
size = xstate_sizes[i];
if (__copy_from_user(dst, ubuf + offset, size))
return -EFAULT;
}
}
if (xfeatures_mxcsr_quirk(hdr.xfeatures)) {
offset = offsetof(struct fxregs_state, mxcsr);
size = MXCSR_AND_FLAGS_SIZE;
if (__copy_from_user(&xsave->i387.mxcsr, ubuf + offset, size))
return -EFAULT;
}
/*
* The state that came in from userspace was user-state only.
* Mask all the user states out of 'xfeatures':
*/
xsave->header.xfeatures &= XFEATURE_MASK_SUPERVISOR;
/*
* Add back in the features that came in from userspace:
*/
xsave->header.xfeatures |= hdr.xfeatures;
return 0;
}

View File

@ -263,7 +263,7 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
sp = (unsigned long) ka->sa.sa_restorer;
}
if (fpu->fpstate_active) {
if (fpu->initialized) {
sp = fpu__alloc_mathframe(sp, IS_ENABLED(CONFIG_X86_32),
&buf_fx, &math_size);
*fpstate = (void __user *)sp;
@ -279,7 +279,7 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
return (void __user *)-1L;
/* save i387 and extended state */
if (fpu->fpstate_active &&
if (fpu->initialized &&
copy_fpstate_to_sigframe(*fpstate, (void __user *)buf_fx, math_size) < 0)
return (void __user *)-1L;
@ -755,7 +755,7 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
/*
* Ensure the signal handler starts with the new fpu state.
*/
if (fpu->fpstate_active)
if (fpu->initialized)
fpu__clear(fpu);
}
signal_setup_done(failed, ksig, stepping);

View File

@ -7225,7 +7225,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
int r;
sigset_t sigsaved;
fpu__activate_curr(fpu);
fpu__initialize(fpu);
if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);

View File

@ -114,7 +114,7 @@ void math_emulate(struct math_emu_info *info)
struct desc_struct code_descriptor;
struct fpu *fpu = &current->thread.fpu;
fpu__activate_curr(fpu);
fpu__initialize(fpu);
#ifdef RE_ENTRANT_CHECKING
if (emulating) {

View File

@ -2,6 +2,7 @@
#include <linux/uaccess.h>
#include <linux/sched/debug.h>
#include <asm/fpu/internal.h>
#include <asm/traps.h>
#include <asm/kdebug.h>
@ -78,6 +79,29 @@ bool ex_handler_refcount(const struct exception_table_entry *fixup,
}
EXPORT_SYMBOL_GPL(ex_handler_refcount);
/*
* Handler for when we fail to restore a task's FPU state. We should never get
* here because the FPU state of a task using the FPU (task->thread.fpu.state)
* should always be valid. However, past bugs have allowed userspace to set
* reserved bits in the XSAVE area using PTRACE_SETREGSET or sys_rt_sigreturn().
* These caused XRSTOR to fail when switching to the task, leaking the FPU
* registers of the task previously executing on the CPU. Mitigate this class
* of vulnerability by restoring from the initial state (essentially, zeroing
* out all the FPU registers) if we can't restore from the task's FPU state.
*/
bool ex_handler_fprestore(const struct exception_table_entry *fixup,
struct pt_regs *regs, int trapnr)
{
regs->ip = ex_fixup_addr(fixup);
WARN_ONCE(1, "Bad FPU state detected at %pB, reinitializing FPU registers.",
(void *)instruction_pointer(regs));
__copy_kernel_to_fpregs(&init_fpstate, -1);
return true;
}
EXPORT_SYMBOL_GPL(ex_handler_fprestore);
bool ex_handler_ext(const struct exception_table_entry *fixup,
struct pt_regs *regs, int trapnr)
{

View File

@ -18,7 +18,6 @@
#include <asm/cpufeature.h> /* boot_cpu_has, ... */
#include <asm/mmu_context.h> /* vma_pkey() */
#include <asm/fpu/internal.h> /* fpregs_active() */
int __execute_only_pkey(struct mm_struct *mm)
{
@ -45,7 +44,7 @@ int __execute_only_pkey(struct mm_struct *mm)
*/
preempt_disable();
if (!need_to_set_mm_pkey &&
fpregs_active() &&
current->thread.fpu.initialized &&
!__pkru_allows_read(read_pkru(), execute_only_pkey)) {
preempt_enable();
return execute_only_pkey;