Merge branch 'x86/uaccess' into core/percpu

This commit is contained in:
Ingo Molnar 2009-02-10 00:40:48 +01:00
commit 5d96218b4a
9 changed files with 476 additions and 413 deletions

View file

@ -46,78 +46,83 @@ void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
{
int err;
int err = 0;
if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
return -EFAULT;
/* If you change siginfo_t structure, please make sure that
this code is fixed accordingly.
It should never copy any pad contained in the structure
to avoid security leaks, but must copy the generic
3 ints plus the relevant union member. */
err = __put_user(from->si_signo, &to->si_signo);
err |= __put_user(from->si_errno, &to->si_errno);
err |= __put_user((short)from->si_code, &to->si_code);
put_user_try {
/* If you change siginfo_t structure, please make sure that
this code is fixed accordingly.
It should never copy any pad contained in the structure
to avoid security leaks, but must copy the generic
3 ints plus the relevant union member. */
put_user_ex(from->si_signo, &to->si_signo);
put_user_ex(from->si_errno, &to->si_errno);
put_user_ex((short)from->si_code, &to->si_code);
if (from->si_code < 0) {
err |= __put_user(from->si_pid, &to->si_pid);
err |= __put_user(from->si_uid, &to->si_uid);
err |= __put_user(ptr_to_compat(from->si_ptr), &to->si_ptr);
} else {
/*
* First 32bits of unions are always present:
* si_pid === si_band === si_tid === si_addr(LS half)
*/
err |= __put_user(from->_sifields._pad[0],
&to->_sifields._pad[0]);
switch (from->si_code >> 16) {
case __SI_FAULT >> 16:
break;
case __SI_CHLD >> 16:
err |= __put_user(from->si_utime, &to->si_utime);
err |= __put_user(from->si_stime, &to->si_stime);
err |= __put_user(from->si_status, &to->si_status);
/* FALL THROUGH */
default:
case __SI_KILL >> 16:
err |= __put_user(from->si_uid, &to->si_uid);
break;
case __SI_POLL >> 16:
err |= __put_user(from->si_fd, &to->si_fd);
break;
case __SI_TIMER >> 16:
err |= __put_user(from->si_overrun, &to->si_overrun);
err |= __put_user(ptr_to_compat(from->si_ptr),
&to->si_ptr);
break;
/* This is not generated by the kernel as of now. */
case __SI_RT >> 16:
case __SI_MESGQ >> 16:
err |= __put_user(from->si_uid, &to->si_uid);
err |= __put_user(from->si_int, &to->si_int);
break;
if (from->si_code < 0) {
put_user_ex(from->si_pid, &to->si_pid);
put_user_ex(from->si_uid, &to->si_uid);
put_user_ex(ptr_to_compat(from->si_ptr), &to->si_ptr);
} else {
/*
* First 32bits of unions are always present:
* si_pid === si_band === si_tid === si_addr(LS half)
*/
put_user_ex(from->_sifields._pad[0],
&to->_sifields._pad[0]);
switch (from->si_code >> 16) {
case __SI_FAULT >> 16:
break;
case __SI_CHLD >> 16:
put_user_ex(from->si_utime, &to->si_utime);
put_user_ex(from->si_stime, &to->si_stime);
put_user_ex(from->si_status, &to->si_status);
/* FALL THROUGH */
default:
case __SI_KILL >> 16:
put_user_ex(from->si_uid, &to->si_uid);
break;
case __SI_POLL >> 16:
put_user_ex(from->si_fd, &to->si_fd);
break;
case __SI_TIMER >> 16:
put_user_ex(from->si_overrun, &to->si_overrun);
put_user_ex(ptr_to_compat(from->si_ptr),
&to->si_ptr);
break;
/* This is not generated by the kernel as of now. */
case __SI_RT >> 16:
case __SI_MESGQ >> 16:
put_user_ex(from->si_uid, &to->si_uid);
put_user_ex(from->si_int, &to->si_int);
break;
}
}
}
} put_user_catch(err);
return err;
}
int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
{
int err;
int err = 0;
u32 ptr32;
if (!access_ok(VERIFY_READ, from, sizeof(compat_siginfo_t)))
return -EFAULT;
err = __get_user(to->si_signo, &from->si_signo);
err |= __get_user(to->si_errno, &from->si_errno);
err |= __get_user(to->si_code, &from->si_code);
get_user_try {
get_user_ex(to->si_signo, &from->si_signo);
get_user_ex(to->si_errno, &from->si_errno);
get_user_ex(to->si_code, &from->si_code);
err |= __get_user(to->si_pid, &from->si_pid);
err |= __get_user(to->si_uid, &from->si_uid);
err |= __get_user(ptr32, &from->si_ptr);
to->si_ptr = compat_ptr(ptr32);
get_user_ex(to->si_pid, &from->si_pid);
get_user_ex(to->si_uid, &from->si_uid);
get_user_ex(ptr32, &from->si_ptr);
to->si_ptr = compat_ptr(ptr32);
} get_user_catch(err);
return err;
}
@ -142,17 +147,23 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
struct pt_regs *regs)
{
stack_t uss, uoss;
int ret;
int ret, err = 0;
mm_segment_t seg;
if (uss_ptr) {
u32 ptr;
memset(&uss, 0, sizeof(stack_t));
if (!access_ok(VERIFY_READ, uss_ptr, sizeof(stack_ia32_t)) ||
__get_user(ptr, &uss_ptr->ss_sp) ||
__get_user(uss.ss_flags, &uss_ptr->ss_flags) ||
__get_user(uss.ss_size, &uss_ptr->ss_size))
if (!access_ok(VERIFY_READ, uss_ptr, sizeof(stack_ia32_t)))
return -EFAULT;
get_user_try {
get_user_ex(ptr, &uss_ptr->ss_sp);
get_user_ex(uss.ss_flags, &uss_ptr->ss_flags);
get_user_ex(uss.ss_size, &uss_ptr->ss_size);
} get_user_catch(err);
if (err)
return -EFAULT;
uss.ss_sp = compat_ptr(ptr);
}
@ -161,10 +172,16 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
set_fs(seg);
if (ret >= 0 && uoss_ptr) {
if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)) ||
__put_user(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp) ||
__put_user(uoss.ss_flags, &uoss_ptr->ss_flags) ||
__put_user(uoss.ss_size, &uoss_ptr->ss_size))
if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
return -EFAULT;
put_user_try {
put_user_ex(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp);
put_user_ex(uoss.ss_flags, &uoss_ptr->ss_flags);
put_user_ex(uoss.ss_size, &uoss_ptr->ss_size);
} put_user_catch(err);
if (err)
ret = -EFAULT;
}
return ret;
@ -174,18 +191,18 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
* Do a signal return; undo the signal stack.
*/
#define COPY(x) { \
err |= __get_user(regs->x, &sc->x); \
get_user_ex(regs->x, &sc->x); \
}
#define COPY_SEG_CPL3(seg) { \
unsigned short tmp; \
err |= __get_user(tmp, &sc->seg); \
get_user_ex(tmp, &sc->seg); \
regs->seg = tmp | 3; \
}
#define RELOAD_SEG(seg) { \
unsigned int cur, pre; \
err |= __get_user(pre, &sc->seg); \
get_user_ex(pre, &sc->seg); \
savesegment(seg, cur); \
pre |= 3; \
if (pre != cur) \
@ -209,39 +226,42 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
sc, sc->err, sc->ip, sc->cs, sc->flags);
#endif
/*
* Reload fs and gs if they have changed in the signal
* handler. This does not handle long fs/gs base changes in
* the handler, but does not clobber them at least in the
* normal case.
*/
err |= __get_user(gs, &sc->gs);
gs |= 3;
savesegment(gs, oldgs);
if (gs != oldgs)
load_gs_index(gs);
get_user_try {
/*
* Reload fs and gs if they have changed in the signal
* handler. This does not handle long fs/gs base changes in
* the handler, but does not clobber them at least in the
* normal case.
*/
get_user_ex(gs, &sc->gs);
gs |= 3;
savesegment(gs, oldgs);
if (gs != oldgs)
load_gs_index(gs);
RELOAD_SEG(fs);
RELOAD_SEG(ds);
RELOAD_SEG(es);
RELOAD_SEG(fs);
RELOAD_SEG(ds);
RELOAD_SEG(es);
COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
COPY(dx); COPY(cx); COPY(ip);
/* Don't touch extended registers */
COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
COPY(dx); COPY(cx); COPY(ip);
/* Don't touch extended registers */
COPY_SEG_CPL3(cs);
COPY_SEG_CPL3(ss);
COPY_SEG_CPL3(cs);
COPY_SEG_CPL3(ss);
err |= __get_user(tmpflags, &sc->flags);
regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
/* disable syscall checks */
regs->orig_ax = -1;
get_user_ex(tmpflags, &sc->flags);
regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
/* disable syscall checks */
regs->orig_ax = -1;
err |= __get_user(tmp, &sc->fpstate);
buf = compat_ptr(tmp);
err |= restore_i387_xstate_ia32(buf);
get_user_ex(tmp, &sc->fpstate);
buf = compat_ptr(tmp);
err |= restore_i387_xstate_ia32(buf);
get_user_ex(*pax, &sc->ax);
} get_user_catch(err);
err |= __get_user(*pax, &sc->ax);
return err;
}
@ -319,36 +339,38 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
{
int tmp, err = 0;
savesegment(gs, tmp);
err |= __put_user(tmp, (unsigned int __user *)&sc->gs);
savesegment(fs, tmp);
err |= __put_user(tmp, (unsigned int __user *)&sc->fs);
savesegment(ds, tmp);
err |= __put_user(tmp, (unsigned int __user *)&sc->ds);
savesegment(es, tmp);
err |= __put_user(tmp, (unsigned int __user *)&sc->es);
put_user_try {
savesegment(gs, tmp);
put_user_ex(tmp, (unsigned int __user *)&sc->gs);
savesegment(fs, tmp);
put_user_ex(tmp, (unsigned int __user *)&sc->fs);
savesegment(ds, tmp);
put_user_ex(tmp, (unsigned int __user *)&sc->ds);
savesegment(es, tmp);
put_user_ex(tmp, (unsigned int __user *)&sc->es);
err |= __put_user(regs->di, &sc->di);
err |= __put_user(regs->si, &sc->si);
err |= __put_user(regs->bp, &sc->bp);
err |= __put_user(regs->sp, &sc->sp);
err |= __put_user(regs->bx, &sc->bx);
err |= __put_user(regs->dx, &sc->dx);
err |= __put_user(regs->cx, &sc->cx);
err |= __put_user(regs->ax, &sc->ax);
err |= __put_user(current->thread.trap_no, &sc->trapno);
err |= __put_user(current->thread.error_code, &sc->err);
err |= __put_user(regs->ip, &sc->ip);
err |= __put_user(regs->cs, (unsigned int __user *)&sc->cs);
err |= __put_user(regs->flags, &sc->flags);
err |= __put_user(regs->sp, &sc->sp_at_signal);
err |= __put_user(regs->ss, (unsigned int __user *)&sc->ss);
put_user_ex(regs->di, &sc->di);
put_user_ex(regs->si, &sc->si);
put_user_ex(regs->bp, &sc->bp);
put_user_ex(regs->sp, &sc->sp);
put_user_ex(regs->bx, &sc->bx);
put_user_ex(regs->dx, &sc->dx);
put_user_ex(regs->cx, &sc->cx);
put_user_ex(regs->ax, &sc->ax);
put_user_ex(current->thread.trap_no, &sc->trapno);
put_user_ex(current->thread.error_code, &sc->err);
put_user_ex(regs->ip, &sc->ip);
put_user_ex(regs->cs, (unsigned int __user *)&sc->cs);
put_user_ex(regs->flags, &sc->flags);
put_user_ex(regs->sp, &sc->sp_at_signal);
put_user_ex(regs->ss, (unsigned int __user *)&sc->ss);
err |= __put_user(ptr_to_compat(fpstate), &sc->fpstate);
put_user_ex(ptr_to_compat(fpstate), &sc->fpstate);
/* non-iBCS2 extensions.. */
err |= __put_user(mask, &sc->oldmask);
err |= __put_user(current->thread.cr2, &sc->cr2);
/* non-iBCS2 extensions.. */
put_user_ex(mask, &sc->oldmask);
put_user_ex(current->thread.cr2, &sc->cr2);
} put_user_catch(err);
return err;
}
@ -437,13 +459,17 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
else
restorer = &frame->retcode;
}
err |= __put_user(ptr_to_compat(restorer), &frame->pretcode);
/*
* These are actually not used anymore, but left because some
* gdb versions depend on them as a marker.
*/
err |= __put_user(*((u64 *)&code), (u64 *)frame->retcode);
put_user_try {
put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
/*
* These are actually not used anymore, but left because some
* gdb versions depend on them as a marker.
*/
put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
} put_user_catch(err);
if (err)
return -EFAULT;
@ -496,41 +522,40 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
return -EFAULT;
err |= __put_user(sig, &frame->sig);
err |= __put_user(ptr_to_compat(&frame->info), &frame->pinfo);
err |= __put_user(ptr_to_compat(&frame->uc), &frame->puc);
err |= copy_siginfo_to_user32(&frame->info, info);
if (err)
return -EFAULT;
put_user_try {
put_user_ex(sig, &frame->sig);
put_user_ex(ptr_to_compat(&frame->info), &frame->pinfo);
put_user_ex(ptr_to_compat(&frame->uc), &frame->puc);
err |= copy_siginfo_to_user32(&frame->info, info);
/* Create the ucontext. */
if (cpu_has_xsave)
err |= __put_user(UC_FP_XSTATE, &frame->uc.uc_flags);
else
err |= __put_user(0, &frame->uc.uc_flags);
err |= __put_user(0, &frame->uc.uc_link);
err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
err |= __put_user(sas_ss_flags(regs->sp),
&frame->uc.uc_stack.ss_flags);
err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
err |= ia32_setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
regs, set->sig[0]);
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
if (err)
return -EFAULT;
/* Create the ucontext. */
if (cpu_has_xsave)
put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags);
else
put_user_ex(0, &frame->uc.uc_flags);
put_user_ex(0, &frame->uc.uc_link);
put_user_ex(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
put_user_ex(sas_ss_flags(regs->sp),
&frame->uc.uc_stack.ss_flags);
put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
err |= ia32_setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
regs, set->sig[0]);
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
if (ka->sa.sa_flags & SA_RESTORER)
restorer = ka->sa.sa_restorer;
else
restorer = VDSO32_SYMBOL(current->mm->context.vdso,
rt_sigreturn);
err |= __put_user(ptr_to_compat(restorer), &frame->pretcode);
if (ka->sa.sa_flags & SA_RESTORER)
restorer = ka->sa.sa_restorer;
else
restorer = VDSO32_SYMBOL(current->mm->context.vdso,
rt_sigreturn);
put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
/*
* Not actually used anymore, but left because some gdb
* versions need it.
*/
put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
} put_user_catch(err);
/*
* Not actually used anymore, but left because some gdb
* versions need it.
*/
err |= __put_user(*((u64 *)&code), (u64 *)frame->retcode);
if (err)
return -EFAULT;

View file

@ -1471,8 +1471,6 @@ u64 _paravirt_ident_64(u64);
#define paravirt_nop ((void *)_paravirt_nop)
void paravirt_use_bytelocks(void);
#ifdef CONFIG_SMP
static inline int __raw_spin_is_locked(struct raw_spinlock *lock)

View file

@ -172,70 +172,8 @@ static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1;
}
#ifdef CONFIG_PARAVIRT
/*
* Define virtualization-friendly old-style lock byte lock, for use in
* pv_lock_ops if desired.
*
* This differs from the pre-2.6.24 spinlock by always using xchgb
* rather than decb to take the lock; this allows it to use a
* zero-initialized lock structure. It also maintains a 1-byte
* contention counter, so that we can implement
* __byte_spin_is_contended.
*/
struct __byte_spinlock {
s8 lock;
s8 spinners;
};
#ifndef CONFIG_PARAVIRT
static inline int __byte_spin_is_locked(raw_spinlock_t *lock)
{
struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
return bl->lock != 0;
}
static inline int __byte_spin_is_contended(raw_spinlock_t *lock)
{
struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
return bl->spinners != 0;
}
static inline void __byte_spin_lock(raw_spinlock_t *lock)
{
struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
s8 val = 1;
asm("1: xchgb %1, %0\n"
" test %1,%1\n"
" jz 3f\n"
" " LOCK_PREFIX "incb %2\n"
"2: rep;nop\n"
" cmpb $1, %0\n"
" je 2b\n"
" " LOCK_PREFIX "decb %2\n"
" jmp 1b\n"
"3:"
: "+m" (bl->lock), "+q" (val), "+m" (bl->spinners): : "memory");
}
static inline int __byte_spin_trylock(raw_spinlock_t *lock)
{
struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
u8 old = 1;
asm("xchgb %1,%0"
: "+m" (bl->lock), "+q" (old) : : "memory");
return old == 0;
}
static inline void __byte_spin_unlock(raw_spinlock_t *lock)
{
struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
smp_wmb();
bl->lock = 0;
}
#else /* !CONFIG_PARAVIRT */
static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
{
return __ticket_spin_is_locked(lock);
@ -267,7 +205,7 @@ static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
__raw_spin_lock(lock);
}
#endif /* CONFIG_PARAVIRT */
#endif
static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
{

View file

@ -40,6 +40,7 @@ struct thread_info {
*/
__u8 supervisor_stack[0];
#endif
int uaccess_err;
};
#define INIT_THREAD_INFO(tsk) \

View file

@ -121,7 +121,7 @@ extern int __get_user_bad(void);
#define __get_user_x(size, ret, x, ptr) \
asm volatile("call __get_user_" #size \
: "=a" (ret),"=d" (x) \
: "=a" (ret), "=d" (x) \
: "0" (ptr)) \
/* Careful: we have to cast the result to the type of the pointer
@ -181,12 +181,12 @@ extern int __get_user_bad(void);
#define __put_user_x(size, x, ptr, __ret_pu) \
asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
:"0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
: "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
#ifdef CONFIG_X86_32
#define __put_user_u64(x, addr, err) \
#define __put_user_asm_u64(x, addr, err, errret) \
asm volatile("1: movl %%eax,0(%2)\n" \
"2: movl %%edx,4(%2)\n" \
"3:\n" \
@ -197,14 +197,24 @@ extern int __get_user_bad(void);
_ASM_EXTABLE(1b, 4b) \
_ASM_EXTABLE(2b, 4b) \
: "=r" (err) \
: "A" (x), "r" (addr), "i" (-EFAULT), "0" (err))
: "A" (x), "r" (addr), "i" (errret), "0" (err))
#define __put_user_asm_ex_u64(x, addr) \
asm volatile("1: movl %%eax,0(%1)\n" \
"2: movl %%edx,4(%1)\n" \
"3:\n" \
_ASM_EXTABLE(1b, 2b - 1b) \
_ASM_EXTABLE(2b, 3b - 2b) \
: : "A" (x), "r" (addr))
#define __put_user_x8(x, ptr, __ret_pu) \
asm volatile("call __put_user_8" : "=a" (__ret_pu) \
: "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
#else
#define __put_user_u64(x, ptr, retval) \
__put_user_asm(x, ptr, retval, "q", "", "Zr", -EFAULT)
#define __put_user_asm_u64(x, ptr, retval, errret) \
__put_user_asm(x, ptr, retval, "q", "", "Zr", errret)
#define __put_user_asm_ex_u64(x, addr) \
__put_user_asm_ex(x, addr, "q", "", "Zr")
#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
#endif
@ -276,10 +286,32 @@ do { \
__put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \
break; \
case 4: \
__put_user_asm(x, ptr, retval, "l", "k", "ir", errret);\
__put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \
break; \
case 8: \
__put_user_u64((__typeof__(*ptr))(x), ptr, retval); \
__put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval, \
errret); \
break; \
default: \
__put_user_bad(); \
} \
} while (0)
#define __put_user_size_ex(x, ptr, size) \
do { \
__chk_user_ptr(ptr); \
switch (size) { \
case 1: \
__put_user_asm_ex(x, ptr, "b", "b", "iq"); \
break; \
case 2: \
__put_user_asm_ex(x, ptr, "w", "w", "ir"); \
break; \
case 4: \
__put_user_asm_ex(x, ptr, "l", "k", "ir"); \
break; \
case 8: \
__put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr); \
break; \
default: \
__put_user_bad(); \
@ -311,9 +343,12 @@ do { \
#ifdef CONFIG_X86_32
#define __get_user_asm_u64(x, ptr, retval, errret) (x) = __get_user_bad()
#define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad()
#else
#define __get_user_asm_u64(x, ptr, retval, errret) \
__get_user_asm(x, ptr, retval, "q", "", "=r", errret)
#define __get_user_asm_ex_u64(x, ptr) \
__get_user_asm_ex(x, ptr, "q", "", "=r")
#endif
#define __get_user_size(x, ptr, size, retval, errret) \
@ -350,6 +385,33 @@ do { \
: "=r" (err), ltype(x) \
: "m" (__m(addr)), "i" (errret), "0" (err))
#define __get_user_size_ex(x, ptr, size) \
do { \
__chk_user_ptr(ptr); \
switch (size) { \
case 1: \
__get_user_asm_ex(x, ptr, "b", "b", "=q"); \
break; \
case 2: \
__get_user_asm_ex(x, ptr, "w", "w", "=r"); \
break; \
case 4: \
__get_user_asm_ex(x, ptr, "l", "k", "=r"); \
break; \
case 8: \
__get_user_asm_ex_u64(x, ptr); \
break; \
default: \
(x) = __get_user_bad(); \
} \
} while (0)
#define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
asm volatile("1: mov"itype" %1,%"rtype"0\n" \
"2:\n" \
_ASM_EXTABLE(1b, 2b - 1b) \
: ltype(x) : "m" (__m(addr)))
#define __put_user_nocheck(x, ptr, size) \
({ \
int __pu_err; \
@ -385,6 +447,26 @@ struct __large_struct { unsigned long buf[100]; };
_ASM_EXTABLE(1b, 3b) \
: "=r"(err) \
: ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
#define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
asm volatile("1: mov"itype" %"rtype"0,%1\n" \
"2:\n" \
_ASM_EXTABLE(1b, 2b - 1b) \
: : ltype(x), "m" (__m(addr)))
/*
* uaccess_try and catch
*/
#define uaccess_try do { \
int prev_err = current_thread_info()->uaccess_err; \
current_thread_info()->uaccess_err = 0; \
barrier();
#define uaccess_catch(err) \
(err) |= current_thread_info()->uaccess_err; \
current_thread_info()->uaccess_err = prev_err; \
} while (0)
/**
* __get_user: - Get a simple variable from user space, with less checking.
* @x: Variable to store result.
@ -408,6 +490,7 @@ struct __large_struct { unsigned long buf[100]; };
#define __get_user(x, ptr) \
__get_user_nocheck((x), (ptr), sizeof(*(ptr)))
/**
* __put_user: - Write a simple value into user space, with less checking.
* @x: Value to copy to user space.
@ -434,6 +517,45 @@ struct __large_struct { unsigned long buf[100]; };
#define __get_user_unaligned __get_user
#define __put_user_unaligned __put_user
/*
* {get|put}_user_try and catch
*
* get_user_try {
* get_user_ex(...);
* } get_user_catch(err)
*/
#define get_user_try uaccess_try
#define get_user_catch(err) uaccess_catch(err)
#define get_user_ex(x, ptr) do { \
unsigned long __gue_val; \
__get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
(x) = (__force __typeof__(*(ptr)))__gue_val; \
} while (0)
#ifdef CONFIG_X86_WP_WORKS_OK
#define put_user_try uaccess_try
#define put_user_catch(err) uaccess_catch(err)
#define put_user_ex(x, ptr) \
__put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
#else /* !CONFIG_X86_WP_WORKS_OK */
#define put_user_try do { \
int __uaccess_err = 0;
#define put_user_catch(err) \
(err) |= __uaccess_err; \
} while (0)
#define put_user_ex(x, ptr) do { \
__uaccess_err |= __put_user(x, ptr); \
} while (0)
#endif /* CONFIG_X86_WP_WORKS_OK */
/*
* movsl can be slow when source and dest are not both 8-byte aligned
*/

View file

@ -3466,40 +3466,6 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
return 0;
}
int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc)
{
unsigned int irq;
int ret;
unsigned int irq_want;
irq_want = nr_irqs_gsi;
irq = create_irq_nr(irq_want);
if (irq == 0)
return -1;
#ifdef CONFIG_INTR_REMAP
if (!intr_remapping_enabled)
goto no_ir;
ret = msi_alloc_irte(dev, irq, 1);
if (ret < 0)
goto error;
no_ir:
#endif
ret = setup_msi_irq(dev, msidesc, irq);
if (ret < 0) {
destroy_irq(irq);
return ret;
}
return 0;
#ifdef CONFIG_INTR_REMAP
error:
destroy_irq(irq);
return ret;
#endif
}
int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
{
unsigned int irq;

View file

@ -26,13 +26,3 @@ struct pv_lock_ops pv_lock_ops = {
};
EXPORT_SYMBOL(pv_lock_ops);
void __init paravirt_use_bytelocks(void)
{
#ifdef CONFIG_SMP
pv_lock_ops.spin_is_locked = __byte_spin_is_locked;
pv_lock_ops.spin_is_contended = __byte_spin_is_contended;
pv_lock_ops.spin_lock = __byte_spin_lock;
pv_lock_ops.spin_trylock = __byte_spin_trylock;
pv_lock_ops.spin_unlock = __byte_spin_unlock;
#endif
}

View file

@ -51,24 +51,24 @@
#endif
#define COPY(x) { \
err |= __get_user(regs->x, &sc->x); \
get_user_ex(regs->x, &sc->x); \
}
#define COPY_SEG(seg) { \
unsigned short tmp; \
err |= __get_user(tmp, &sc->seg); \
get_user_ex(tmp, &sc->seg); \
regs->seg = tmp; \
}
#define COPY_SEG_CPL3(seg) { \
unsigned short tmp; \
err |= __get_user(tmp, &sc->seg); \
get_user_ex(tmp, &sc->seg); \
regs->seg = tmp | 3; \
}
#define GET_SEG(seg) { \
unsigned short tmp; \
err |= __get_user(tmp, &sc->seg); \
get_user_ex(tmp, &sc->seg); \
loadsegment(seg, tmp); \
}
@ -83,45 +83,49 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
get_user_try {
#ifdef CONFIG_X86_32
GET_SEG(gs);
COPY_SEG(fs);
COPY_SEG(es);
COPY_SEG(ds);
GET_SEG(gs);
COPY_SEG(fs);
COPY_SEG(es);
COPY_SEG(ds);
#endif /* CONFIG_X86_32 */
COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
COPY(dx); COPY(cx); COPY(ip);
COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
COPY(dx); COPY(cx); COPY(ip);
#ifdef CONFIG_X86_64
COPY(r8);
COPY(r9);
COPY(r10);
COPY(r11);
COPY(r12);
COPY(r13);
COPY(r14);
COPY(r15);
COPY(r8);
COPY(r9);
COPY(r10);
COPY(r11);
COPY(r12);
COPY(r13);
COPY(r14);
COPY(r15);
#endif /* CONFIG_X86_64 */
#ifdef CONFIG_X86_32
COPY_SEG_CPL3(cs);
COPY_SEG_CPL3(ss);
COPY_SEG_CPL3(cs);
COPY_SEG_CPL3(ss);
#else /* !CONFIG_X86_32 */
/* Kernel saves and restores only the CS segment register on signals,
* which is the bare minimum needed to allow mixed 32/64-bit code.
* App's signal handler can save/restore other segments if needed. */
COPY_SEG_CPL3(cs);
/* Kernel saves and restores only the CS segment register on signals,
* which is the bare minimum needed to allow mixed 32/64-bit code.
* App's signal handler can save/restore other segments if needed. */
COPY_SEG_CPL3(cs);
#endif /* CONFIG_X86_32 */
err |= __get_user(tmpflags, &sc->flags);
regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
regs->orig_ax = -1; /* disable syscall checks */
get_user_ex(tmpflags, &sc->flags);
regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
regs->orig_ax = -1; /* disable syscall checks */
err |= __get_user(buf, &sc->fpstate);
err |= restore_i387_xstate(buf);
get_user_ex(buf, &sc->fpstate);
err |= restore_i387_xstate(buf);
get_user_ex(*pax, &sc->ax);
} get_user_catch(err);
err |= __get_user(*pax, &sc->ax);
return err;
}
@ -131,57 +135,60 @@ setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
{
int err = 0;
#ifdef CONFIG_X86_32
{
unsigned int tmp;
put_user_try {
savesegment(gs, tmp);
err |= __put_user(tmp, (unsigned int __user *)&sc->gs);
}
err |= __put_user(regs->fs, (unsigned int __user *)&sc->fs);
err |= __put_user(regs->es, (unsigned int __user *)&sc->es);
err |= __put_user(regs->ds, (unsigned int __user *)&sc->ds);
#ifdef CONFIG_X86_32
{
unsigned int tmp;
savesegment(gs, tmp);
put_user_ex(tmp, (unsigned int __user *)&sc->gs);
}
put_user_ex(regs->fs, (unsigned int __user *)&sc->fs);
put_user_ex(regs->es, (unsigned int __user *)&sc->es);
put_user_ex(regs->ds, (unsigned int __user *)&sc->ds);
#endif /* CONFIG_X86_32 */
err |= __put_user(regs->di, &sc->di);
err |= __put_user(regs->si, &sc->si);
err |= __put_user(regs->bp, &sc->bp);
err |= __put_user(regs->sp, &sc->sp);
err |= __put_user(regs->bx, &sc->bx);
err |= __put_user(regs->dx, &sc->dx);
err |= __put_user(regs->cx, &sc->cx);
err |= __put_user(regs->ax, &sc->ax);
put_user_ex(regs->di, &sc->di);
put_user_ex(regs->si, &sc->si);
put_user_ex(regs->bp, &sc->bp);
put_user_ex(regs->sp, &sc->sp);
put_user_ex(regs->bx, &sc->bx);
put_user_ex(regs->dx, &sc->dx);
put_user_ex(regs->cx, &sc->cx);
put_user_ex(regs->ax, &sc->ax);
#ifdef CONFIG_X86_64
err |= __put_user(regs->r8, &sc->r8);
err |= __put_user(regs->r9, &sc->r9);
err |= __put_user(regs->r10, &sc->r10);
err |= __put_user(regs->r11, &sc->r11);
err |= __put_user(regs->r12, &sc->r12);
err |= __put_user(regs->r13, &sc->r13);
err |= __put_user(regs->r14, &sc->r14);
err |= __put_user(regs->r15, &sc->r15);
put_user_ex(regs->r8, &sc->r8);
put_user_ex(regs->r9, &sc->r9);
put_user_ex(regs->r10, &sc->r10);
put_user_ex(regs->r11, &sc->r11);
put_user_ex(regs->r12, &sc->r12);
put_user_ex(regs->r13, &sc->r13);
put_user_ex(regs->r14, &sc->r14);
put_user_ex(regs->r15, &sc->r15);
#endif /* CONFIG_X86_64 */
err |= __put_user(current->thread.trap_no, &sc->trapno);
err |= __put_user(current->thread.error_code, &sc->err);
err |= __put_user(regs->ip, &sc->ip);
put_user_ex(current->thread.trap_no, &sc->trapno);
put_user_ex(current->thread.error_code, &sc->err);
put_user_ex(regs->ip, &sc->ip);
#ifdef CONFIG_X86_32
err |= __put_user(regs->cs, (unsigned int __user *)&sc->cs);
err |= __put_user(regs->flags, &sc->flags);
err |= __put_user(regs->sp, &sc->sp_at_signal);
err |= __put_user(regs->ss, (unsigned int __user *)&sc->ss);
put_user_ex(regs->cs, (unsigned int __user *)&sc->cs);
put_user_ex(regs->flags, &sc->flags);
put_user_ex(regs->sp, &sc->sp_at_signal);
put_user_ex(regs->ss, (unsigned int __user *)&sc->ss);
#else /* !CONFIG_X86_32 */
err |= __put_user(regs->flags, &sc->flags);
err |= __put_user(regs->cs, &sc->cs);
err |= __put_user(0, &sc->gs);
err |= __put_user(0, &sc->fs);
put_user_ex(regs->flags, &sc->flags);
put_user_ex(regs->cs, &sc->cs);
put_user_ex(0, &sc->gs);
put_user_ex(0, &sc->fs);
#endif /* CONFIG_X86_32 */
err |= __put_user(fpstate, &sc->fpstate);
put_user_ex(fpstate, &sc->fpstate);
/* non-iBCS2 extensions.. */
err |= __put_user(mask, &sc->oldmask);
err |= __put_user(current->thread.cr2, &sc->cr2);
/* non-iBCS2 extensions.. */
put_user_ex(mask, &sc->oldmask);
put_user_ex(current->thread.cr2, &sc->cr2);
} put_user_catch(err);
return err;
}
@ -336,43 +343,41 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
return -EFAULT;
err |= __put_user(sig, &frame->sig);
err |= __put_user(&frame->info, &frame->pinfo);
err |= __put_user(&frame->uc, &frame->puc);
err |= copy_siginfo_to_user(&frame->info, info);
if (err)
return -EFAULT;
put_user_try {
put_user_ex(sig, &frame->sig);
put_user_ex(&frame->info, &frame->pinfo);
put_user_ex(&frame->uc, &frame->puc);
err |= copy_siginfo_to_user(&frame->info, info);
/* Create the ucontext. */
if (cpu_has_xsave)
err |= __put_user(UC_FP_XSTATE, &frame->uc.uc_flags);
else
err |= __put_user(0, &frame->uc.uc_flags);
err |= __put_user(0, &frame->uc.uc_link);
err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
err |= __put_user(sas_ss_flags(regs->sp),
&frame->uc.uc_stack.ss_flags);
err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
regs, set->sig[0]);
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
if (err)
return -EFAULT;
/* Create the ucontext. */
if (cpu_has_xsave)
put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags);
else
put_user_ex(0, &frame->uc.uc_flags);
put_user_ex(0, &frame->uc.uc_link);
put_user_ex(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
put_user_ex(sas_ss_flags(regs->sp),
&frame->uc.uc_stack.ss_flags);
put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
regs, set->sig[0]);
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
/* Set up to return from userspace. */
restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
if (ka->sa.sa_flags & SA_RESTORER)
restorer = ka->sa.sa_restorer;
err |= __put_user(restorer, &frame->pretcode);
/* Set up to return from userspace. */
restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
if (ka->sa.sa_flags & SA_RESTORER)
restorer = ka->sa.sa_restorer;
put_user_ex(restorer, &frame->pretcode);
/*
* This is movl $__NR_rt_sigreturn, %ax ; int $0x80
*
* WE DO NOT USE IT ANY MORE! It's only left here for historical
* reasons and because gdb uses it as a signature to notice
* signal handler stack frames.
*/
err |= __put_user(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
/*
* This is movl $__NR_rt_sigreturn, %ax ; int $0x80
*
* WE DO NOT USE IT ANY MORE! It's only left here for historical
* reasons and because gdb uses it as a signature to notice
* signal handler stack frames.
*/
put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
} put_user_catch(err);
if (err)
return -EFAULT;
@ -436,28 +441,30 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
return -EFAULT;
}
/* Create the ucontext. */
if (cpu_has_xsave)
err |= __put_user(UC_FP_XSTATE, &frame->uc.uc_flags);
else
err |= __put_user(0, &frame->uc.uc_flags);
err |= __put_user(0, &frame->uc.uc_link);
err |= __put_user(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
err |= __put_user(sas_ss_flags(regs->sp),
&frame->uc.uc_stack.ss_flags);
err |= __put_user(me->sas_ss_size, &frame->uc.uc_stack.ss_size);
err |= setup_sigcontext(&frame->uc.uc_mcontext, fp, regs, set->sig[0]);
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
put_user_try {
/* Create the ucontext. */
if (cpu_has_xsave)
put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags);
else
put_user_ex(0, &frame->uc.uc_flags);
put_user_ex(0, &frame->uc.uc_link);
put_user_ex(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
put_user_ex(sas_ss_flags(regs->sp),
&frame->uc.uc_stack.ss_flags);
put_user_ex(me->sas_ss_size, &frame->uc.uc_stack.ss_size);
err |= setup_sigcontext(&frame->uc.uc_mcontext, fp, regs, set->sig[0]);
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
/* Set up to return from userspace. If provided, use a stub
already in userspace. */
/* x86-64 should always use SA_RESTORER. */
if (ka->sa.sa_flags & SA_RESTORER) {
err |= __put_user(ka->sa.sa_restorer, &frame->pretcode);
} else {
/* could use a vstub here */
return -EFAULT;
}
/* Set up to return from userspace. If provided, use a stub
already in userspace. */
/* x86-64 should always use SA_RESTORER. */
if (ka->sa.sa_flags & SA_RESTORER) {
put_user_ex(ka->sa.sa_restorer, &frame->pretcode);
} else {
/* could use a vstub here */
err |= -EFAULT;
}
} put_user_catch(err);
if (err)
return -EFAULT;
@ -509,31 +516,41 @@ sys_sigaction(int sig, const struct old_sigaction __user *act,
struct old_sigaction __user *oact)
{
struct k_sigaction new_ka, old_ka;
int ret;
int ret = 0;
if (act) {
old_sigset_t mask;
if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
__get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
__get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
if (!access_ok(VERIFY_READ, act, sizeof(*act)))
return -EFAULT;
__get_user(new_ka.sa.sa_flags, &act->sa_flags);
__get_user(mask, &act->sa_mask);
get_user_try {
get_user_ex(new_ka.sa.sa_handler, &act->sa_handler);
get_user_ex(new_ka.sa.sa_flags, &act->sa_flags);
get_user_ex(mask, &act->sa_mask);
get_user_ex(new_ka.sa.sa_restorer, &act->sa_restorer);
} get_user_catch(ret);
if (ret)
return -EFAULT;
siginitset(&new_ka.sa.sa_mask, mask);
}
ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
if (!ret && oact) {
if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
__put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
__put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)))
return -EFAULT;
__put_user(old_ka.sa.sa_flags, &oact->sa_flags);
__put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
put_user_try {
put_user_ex(old_ka.sa.sa_handler, &oact->sa_handler);
put_user_ex(old_ka.sa.sa_flags, &oact->sa_flags);
put_user_ex(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
put_user_ex(old_ka.sa.sa_restorer, &oact->sa_restorer);
} put_user_catch(ret);
if (ret)
return -EFAULT;
}
return ret;

View file

@ -23,6 +23,12 @@ int fixup_exception(struct pt_regs *regs)
fixup = search_exception_tables(regs->ip);
if (fixup) {
/* If fixup is less than 16, it means uaccess error */
if (fixup->fixup < 16) {
current_thread_info()->uaccess_err = -EFAULT;
regs->ip += fixup->fixup;
return 1;
}
regs->ip = fixup->fixup;
return 1;
}