1
0
Fork 0

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/signal

Pull big execve/kernel_thread/fork unification series from Al Viro:
 "All architectures are converted to new model.  Quite a bit of that
  stuff is actually shared with architecture trees; in such cases it's
  literally shared branch pulled by both, not a cherry-pick.

  A lot of ugliness and black magic is gone (-3KLoC total in this one):

   - kernel_thread()/kernel_execve()/sys_execve() redesign.

     We don't do syscalls from kernel anymore for either kernel_thread()
     or kernel_execve():

     kernel_thread() is essentially clone(2) with callback run before we
     return to userland, the callbacks either never return or do
     successful do_execve() before returning.

     kernel_execve() is a wrapper for do_execve() - it doesn't need to
     do transition to user mode anymore.

     As a result kernel_thread() and kernel_execve() are
     arch-independent now - they live in kernel/fork.c and fs/exec.c
     resp.  sys_execve() is also in fs/exec.c and it's completely
     architecture-independent.

   - daemonize() is gone, along with its parts in fs/*.c

   - struct pt_regs * is no longer passed to do_fork/copy_process/
     copy_thread/do_execve/search_binary_handler/->load_binary/do_coredump.

   - sys_fork()/sys_vfork()/sys_clone() unified; some architectures
     still need wrappers (ones with callee-saved registers not saved in
     pt_regs on syscall entry), but the main part of those suckers is in
     kernel/fork.c now."

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/signal: (113 commits)
  do_coredump(): get rid of pt_regs argument
  print_fatal_signal(): get rid of pt_regs argument
  ptrace_signal(): get rid of unused arguments
  get rid of ptrace_signal_deliver() arguments
  new helper: signal_pt_regs()
  unify default ptrace_signal_deliver
  flagday: kill pt_regs argument of do_fork()
  death to idle_regs()
  don't pass regs to copy_process()
  flagday: don't pass regs to copy_thread()
  bfin: switch to generic vfork, get rid of pointless wrappers
  xtensa: switch to generic clone()
  openrisc: switch to use of generic fork and clone
  unicore32: switch to generic clone(2)
  score: switch to generic fork/vfork/clone
  c6x: sanitize copy_thread(), get rid of clone(2) wrapper, switch to generic clone()
  take sys_fork/sys_vfork/sys_clone prototypes to linux/syscalls.h
  mn10300: switch to generic fork/vfork/clone
  h8300: switch to generic fork/vfork/clone
  tile: switch to generic clone()
  ...

Conflicts:
	arch/microblaze/include/asm/Kbuild
hifive-unleashed-5.1
Linus Torvalds 2012-12-12 12:22:13 -08:00
commit 9977d9b379
273 changed files with 1385 additions and 4231 deletions

View File

@ -342,4 +342,18 @@ config MODULES_USE_ELF_REL
Modules only use ELF REL relocations. Modules with ELF RELA
relocations will give an error.
#
# ABI hall of shame
#
config CLONE_BACKWARDS
bool
help
Architecture has tls passed as the 4th argument of clone(2),
not the 5th one.
config CLONE_BACKWARDS2
bool
help
Architecture has the first two arguments of clone(2) swapped.
source "kernel/gcov/Kconfig"

View File

@ -78,6 +78,7 @@ struct switch_stack {
#define current_pt_regs() \
((struct pt_regs *) ((char *)current_thread_info() + 2*PAGE_SIZE) - 1)
#define signal_pt_regs current_pt_regs
#define force_successful_syscall_return() (current_pt_regs()->r0 = 0)

View File

@ -164,9 +164,6 @@ struct sigstack {
#ifdef __KERNEL__
#include <asm/sigcontext.h>
#define ptrace_signal_deliver(regs, cookie) do { } while (0)
#endif
#endif

View File

@ -482,6 +482,9 @@
#define __ARCH_WANT_SYS_SIGPENDING
#define __ARCH_WANT_SYS_RT_SIGSUSPEND
#define __ARCH_WANT_SYS_EXECVE
#define __ARCH_WANT_SYS_FORK
#define __ARCH_WANT_SYS_VFORK
#define __ARCH_WANT_SYS_CLONE
/* "Conditional" syscalls. What we want is

View File

@ -5,7 +5,7 @@
#include <linux/binfmts.h>
#include <linux/a.out.h>
static int load_binary(struct linux_binprm *bprm, struct pt_regs *regs)
static int load_binary(struct linux_binprm *bprm)
{
struct exec *eh = (struct exec *)bprm->buf;
unsigned long loader;
@ -37,7 +37,7 @@ static int load_binary(struct linux_binprm *bprm, struct pt_regs *regs)
retval = prepare_binprm(bprm);
if (retval < 0)
return retval;
return search_binary_handler(bprm,regs);
return search_binary_handler(bprm);
}
static struct linux_binfmt loader_format = {

View File

@ -612,47 +612,24 @@ ret_from_kernel_thread:
* Special system calls. Most of these are special in that they either
* have to play switch_stack games or in some way use the pt_regs struct.
*/
.align 4
.globl sys_fork
.ent sys_fork
sys_fork:
.prologue 0
mov $sp, $21
bsr $1, do_switch_stack
bis $31, SIGCHLD, $16
mov $31, $17
mov $31, $18
mov $31, $19
mov $31, $20
jsr $26, alpha_clone
bsr $1, undo_switch_stack
ret
.end sys_fork
.macro fork_like name
.align 4
.globl sys_clone
.ent sys_clone
sys_clone:
.globl alpha_\name
.ent alpha_\name
alpha_\name:
.prologue 0
mov $sp, $21
bsr $1, do_switch_stack
/* $16, $17, $18, $19, $20 come from the user. */
jsr $26, alpha_clone
bsr $1, undo_switch_stack
jsr $26, sys_\name
ldq $26, 56($sp)
lda $sp, SWITCH_STACK_SIZE($sp)
ret
.end sys_clone
.end alpha_\name
.endm
.align 4
.globl sys_vfork
.ent sys_vfork
sys_vfork:
.prologue 0
mov $sp, $16
bsr $1, do_switch_stack
jsr $26, alpha_vfork
bsr $1, undo_switch_stack
ret
.end sys_vfork
fork_like fork
fork_like vfork
fork_like clone
.align 4
.globl sys_sigreturn
@ -661,8 +638,6 @@ sys_sigreturn:
.prologue 0
lda $9, ret_from_straced
cmpult $26, $9, $9
mov $sp, $17
lda $18, -SWITCH_STACK_SIZE($sp)
lda $sp, -SWITCH_STACK_SIZE($sp)
jsr $26, do_sigreturn
bne $9, 1f
@ -678,8 +653,6 @@ sys_rt_sigreturn:
.prologue 0
lda $9, ret_from_straced
cmpult $26, $9, $9
mov $sp, $17
lda $18, -SWITCH_STACK_SIZE($sp)
lda $sp, -SWITCH_STACK_SIZE($sp)
jsr $26, do_rt_sigreturn
bne $9, 1f

View File

@ -234,33 +234,6 @@ release_thread(struct task_struct *dead_task)
{
}
/*
* "alpha_clone()".. By the time we get here, the
* non-volatile registers have also been saved on the
* stack. We do some ugly pointer stuff here.. (see
* also copy_thread)
*
* Notice that "fork()" is implemented in terms of clone,
* with parameters (SIGCHLD, 0).
*/
int
alpha_clone(unsigned long clone_flags, unsigned long usp,
int __user *parent_tid, int __user *child_tid,
unsigned long tls_value, struct pt_regs *regs)
{
if (!usp)
usp = rdusp();
return do_fork(clone_flags, usp, regs, 0, parent_tid, child_tid);
}
int
alpha_vfork(struct pt_regs *regs)
{
return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, rdusp(),
regs, 0, NULL, NULL);
}
/*
* Copy an alpha thread..
*/
@ -268,18 +241,22 @@ alpha_vfork(struct pt_regs *regs)
int
copy_thread(unsigned long clone_flags, unsigned long usp,
unsigned long arg,
struct task_struct * p, struct pt_regs * regs)
struct task_struct *p)
{
extern void ret_from_fork(void);
extern void ret_from_kernel_thread(void);
struct thread_info *childti = task_thread_info(p);
struct pt_regs *childregs = task_pt_regs(p);
struct pt_regs *regs = current_pt_regs();
struct switch_stack *childstack, *stack;
unsigned long settls;
childstack = ((struct switch_stack *) childregs) - 1;
if (unlikely(!regs)) {
childti->pcb.ksp = (unsigned long) childstack;
childti->pcb.flags = 1; /* set FEN, clear everything else */
if (unlikely(p->flags & PF_KTHREAD)) {
/* kernel thread */
memset(childstack, 0,
sizeof(struct switch_stack) + sizeof(struct pt_regs));
@ -288,12 +265,17 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
childstack->r10 = arg;
childregs->hae = alpha_mv.hae_cache,
childti->pcb.usp = 0;
childti->pcb.ksp = (unsigned long) childstack;
childti->pcb.flags = 1; /* set FEN, clear everything else */
return 0;
}
/* Note: if CLONE_SETTLS is not set, then we must inherit the
value from the parent, which will have been set by the block
copy in dup_task_struct. This is non-intuitive, but is
required for proper operation in the case of a threaded
application calling fork. */
if (clone_flags & CLONE_SETTLS)
childti->pcb.unique = regs->r20;
childti->pcb.usp = usp ?: rdusp();
*childregs = *regs;
settls = regs->r20;
childregs->r0 = 0;
childregs->r19 = 0;
childregs->r20 = 1; /* OSF/1 has some strange fork() semantics. */
@ -301,22 +283,6 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
stack = ((struct switch_stack *) regs) - 1;
*childstack = *stack;
childstack->r26 = (unsigned long) ret_from_fork;
childti->pcb.usp = usp;
childti->pcb.ksp = (unsigned long) childstack;
childti->pcb.flags = 1; /* set FEN, clear everything else */
/* Set a new TLS for the child thread? Peek back into the
syscall arguments that we saved on syscall entry. Oops,
except we'd have clobbered it with the parent/child set
of r20. Read the saved copy. */
/* Note: if CLONE_SETTLS is not set, then we must inherit the
value from the parent, which will have been set by the block
copy in dup_task_struct. This is non-intuitive, but is
required for proper operation in the case of a threaded
application calling fork. */
if (clone_flags & CLONE_SETTLS)
childti->pcb.unique = settls;
return 0;
}

View File

@ -160,10 +160,10 @@ extern char compile_time_assert
#define INSN_CALLSYS 0x00000083
static long
restore_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
struct switch_stack *sw)
restore_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs)
{
unsigned long usp;
struct switch_stack *sw = (struct switch_stack *)regs - 1;
long i, err = __get_user(regs->pc, &sc->sc_pc);
current_thread_info()->restart_block.fn = do_no_restart_syscall;
@ -215,9 +215,9 @@ restore_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
registers and transfer control from userland. */
asmlinkage void
do_sigreturn(struct sigcontext __user *sc, struct pt_regs *regs,
struct switch_stack *sw)
do_sigreturn(struct sigcontext __user *sc)
{
struct pt_regs *regs = current_pt_regs();
sigset_t set;
/* Verify that it's a good sigcontext before using it */
@ -228,7 +228,7 @@ do_sigreturn(struct sigcontext __user *sc, struct pt_regs *regs,
set_current_blocked(&set);
if (restore_sigcontext(sc, regs, sw))
if (restore_sigcontext(sc, regs))
goto give_sigsegv;
/* Send SIGTRAP if we're single-stepping: */
@ -249,9 +249,9 @@ give_sigsegv:
}
asmlinkage void
do_rt_sigreturn(struct rt_sigframe __user *frame, struct pt_regs *regs,
struct switch_stack *sw)
do_rt_sigreturn(struct rt_sigframe __user *frame)
{
struct pt_regs *regs = current_pt_regs();
sigset_t set;
/* Verify that it's a good ucontext_t before using it */
@ -262,7 +262,7 @@ do_rt_sigreturn(struct rt_sigframe __user *frame, struct pt_regs *regs,
set_current_blocked(&set);
if (restore_sigcontext(&frame->uc.uc_mcontext, regs, sw))
if (restore_sigcontext(&frame->uc.uc_mcontext, regs))
goto give_sigsegv;
/* Send SIGTRAP if we're single-stepping: */

View File

@ -12,7 +12,7 @@
sys_call_table:
.quad alpha_ni_syscall /* 0 */
.quad sys_exit
.quad sys_fork
.quad alpha_fork
.quad sys_read
.quad sys_write
.quad alpha_ni_syscall /* 5 */
@ -76,7 +76,7 @@ sys_call_table:
.quad sys_getpgrp
.quad sys_getpagesize
.quad alpha_ni_syscall /* 65 */
.quad sys_vfork
.quad alpha_vfork
.quad sys_newstat
.quad sys_newlstat
.quad alpha_ni_syscall
@ -330,7 +330,7 @@ sys_call_table:
.quad sys_ni_syscall /* 309: old get_kernel_syms */
.quad sys_syslog /* 310 */
.quad sys_reboot
.quad sys_clone
.quad alpha_clone
.quad sys_uselib
.quad sys_mlock
.quad sys_munlock /* 315 */

View File

@ -57,6 +57,7 @@ config ARM
select SYS_SUPPORTS_APM_EMULATION
select HAVE_MOD_ARCH_SPECIFIC if ARM_UNWIND
select MODULES_USE_ELF_REL
select CLONE_BACKWARDS
help
The ARM series is a line of low-power-consumption RISC chip designs
licensed by ARM Ltd and targeted at embedded applications and

View File

@ -35,5 +35,4 @@ struct k_sigaction {
};
#include <asm/sigcontext.h>
#define ptrace_signal_deliver(regs, cookie) do { } while (0)
#endif

View File

@ -42,6 +42,9 @@
#define __ARCH_WANT_SYS_SOCKETCALL
#endif
#define __ARCH_WANT_SYS_EXECVE
#define __ARCH_WANT_SYS_FORK
#define __ARCH_WANT_SYS_VFORK
#define __ARCH_WANT_SYS_CLONE
/*
* "Conditional" syscalls

View File

@ -11,7 +11,7 @@
*/
/* 0 */ CALL(sys_restart_syscall)
CALL(sys_exit)
CALL(sys_fork_wrapper)
CALL(sys_fork)
CALL(sys_read)
CALL(sys_write)
/* 5 */ CALL(sys_open)
@ -129,7 +129,7 @@
CALL(OBSOLETE(ABI(sys_ipc, sys_oabi_ipc)))
CALL(sys_fsync)
CALL(sys_sigreturn_wrapper)
/* 120 */ CALL(sys_clone_wrapper)
/* 120 */ CALL(sys_clone)
CALL(sys_setdomainname)
CALL(sys_newuname)
CALL(sys_ni_syscall) /* modify_ldt */
@ -199,7 +199,7 @@
CALL(sys_sendfile)
CALL(sys_ni_syscall) /* getpmsg */
CALL(sys_ni_syscall) /* putpmsg */
/* 190 */ CALL(sys_vfork_wrapper)
/* 190 */ CALL(sys_vfork)
CALL(sys_getrlimit)
CALL(sys_mmap2)
CALL(ABI(sys_truncate64, sys_oabi_truncate64))

View File

@ -502,22 +502,6 @@ sys_syscall:
b sys_ni_syscall
ENDPROC(sys_syscall)
sys_fork_wrapper:
add r0, sp, #S_OFF
b sys_fork
ENDPROC(sys_fork_wrapper)
sys_vfork_wrapper:
add r0, sp, #S_OFF
b sys_vfork
ENDPROC(sys_vfork_wrapper)
sys_clone_wrapper:
add ip, sp, #S_OFF
str ip, [sp, #4]
b sys_clone
ENDPROC(sys_clone_wrapper)
sys_sigreturn_wrapper:
add r0, sp, #S_OFF
mov why, #0 @ prevent syscall restart handling

View File

@ -376,17 +376,18 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
int
copy_thread(unsigned long clone_flags, unsigned long stack_start,
unsigned long stk_sz, struct task_struct *p, struct pt_regs *regs)
unsigned long stk_sz, struct task_struct *p)
{
struct thread_info *thread = task_thread_info(p);
struct pt_regs *childregs = task_pt_regs(p);
memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
if (likely(regs)) {
*childregs = *regs;
if (likely(!(p->flags & PF_KTHREAD))) {
*childregs = *current_pt_regs();
childregs->ARM_r0 = 0;
childregs->ARM_sp = stack_start;
if (stack_start)
childregs->ARM_sp = stack_start;
} else {
memset(childregs, 0, sizeof(struct pt_regs));
thread->cpu_context.r4 = stk_sz;
@ -399,7 +400,7 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start,
clear_ptrace_hw_breakpoint(p);
if (clone_flags & CLONE_SETTLS)
thread->tp_value = regs->ARM_r3;
thread->tp_value = childregs->ARM_r3;
thread_notify(THREAD_NOTIFY_COPY, thread);

View File

@ -28,37 +28,6 @@
#include <linux/uaccess.h>
#include <linux/slab.h>
/* Fork a new task - this creates a new program thread.
* This is called indirectly via a small wrapper
*/
asmlinkage int sys_fork(struct pt_regs *regs)
{
#ifdef CONFIG_MMU
return do_fork(SIGCHLD, regs->ARM_sp, regs, 0, NULL, NULL);
#else
/* can not support in nommu mode */
return(-EINVAL);
#endif
}
/* Clone a task - this clones the calling program thread.
* This is called indirectly via a small wrapper
*/
asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
int __user *parent_tidptr, int tls_val,
int __user *child_tidptr, struct pt_regs *regs)
{
if (!newsp)
newsp = regs->ARM_sp;
return do_fork(clone_flags, newsp, regs, 0, parent_tidptr, child_tidptr);
}
asmlinkage int sys_vfork(struct pt_regs *regs)
{
return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->ARM_sp, regs, 0, NULL, NULL);
}
/*
* Since loff_t is a 64 bit type we avoid a lot of ABI hassle
* with a different argument ordering.

View File

@ -33,6 +33,7 @@ config ARM64
select RTC_LIB
select SPARSE_IRQ
select SYSCTL_EXCEPTION_TRACE
select CLONE_BACKWARDS
help
ARM 64-bit (AArch64) Linux support.

View File

@ -27,12 +27,6 @@ asmlinkage long sys_rt_sigreturn_wrapper(void);
asmlinkage long sys_sigaltstack_wrapper(const stack_t __user *uss,
stack_t __user *uoss);
/*
* AArch64 sys_clone implementation has a different prototype than the generic
* one (additional TLS value argument).
*/
#define sys_clone sys_clone
#include <asm-generic/syscalls.h>
#endif /* __ASM_SYSCALLS_H */

View File

@ -24,6 +24,9 @@
#define __ARCH_WANT_SYS_SIGPROCMASK
#define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND
#define __ARCH_WANT_COMPAT_SYS_SENDFILE
#define __ARCH_WANT_SYS_FORK
#define __ARCH_WANT_SYS_VFORK
#endif
#define __ARCH_WANT_SYS_EXECVE
#define __ARCH_WANT_SYS_CLONE
#include <uapi/asm/unistd.h>

View File

@ -23,7 +23,7 @@
__SYSCALL(0, sys_restart_syscall)
__SYSCALL(1, sys_exit)
__SYSCALL(2, compat_sys_fork)
__SYSCALL(2, sys_fork)
__SYSCALL(3, sys_read)
__SYSCALL(4, sys_write)
__SYSCALL(5, compat_sys_open)
@ -211,7 +211,7 @@ __SYSCALL(186, compat_sys_sigaltstack_wrapper)
__SYSCALL(187, compat_sys_sendfile)
__SYSCALL(188, sys_ni_syscall) /* 188 reserved */
__SYSCALL(189, sys_ni_syscall) /* 189 reserved */
__SYSCALL(190, compat_sys_vfork)
__SYSCALL(190, sys_vfork)
__SYSCALL(191, compat_sys_getrlimit) /* SuS compliant getrlimit */
__SYSCALL(192, sys_mmap_pgoff)
__SYSCALL(193, compat_sys_truncate64_wrapper)

View File

@ -234,16 +234,15 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
asmlinkage void ret_from_fork(void) asm("ret_from_fork");
int copy_thread(unsigned long clone_flags, unsigned long stack_start,
unsigned long stk_sz, struct task_struct *p,
struct pt_regs *regs)
unsigned long stk_sz, struct task_struct *p)
{
struct pt_regs *childregs = task_pt_regs(p);
unsigned long tls = p->thread.tp_value;
memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
if (likely(regs)) {
*childregs = *regs;
if (likely(!(p->flags & PF_KTHREAD))) {
*childregs = *current_pt_regs();
childregs->regs[0] = 0;
if (is_compat_thread(task_thread_info(p))) {
if (stack_start)
@ -266,7 +265,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
* for the new thread.
*/
if (clone_flags & CLONE_SETTLS)
tls = regs->regs[3];
tls = childregs->regs[3];
} else {
memset(childregs, 0, sizeof(struct pt_regs));
childregs->pstate = PSR_MODE_EL1h;

View File

@ -26,17 +26,6 @@
#include <linux/slab.h>
#include <linux/syscalls.h>
/*
* Clone a task - this clones the calling program thread.
*/
asmlinkage long sys_clone(unsigned long clone_flags, unsigned long newsp,
int __user *parent_tidptr, unsigned long tls_val,
int __user *child_tidptr)
{
return do_fork(clone_flags, newsp, current_pt_regs(), 0,
parent_tidptr, child_tidptr);
}
asmlinkage long sys_mmap(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
unsigned long fd, off_t off)

View File

@ -28,17 +28,6 @@
#include <asm/cacheflush.h>
#include <asm/unistd32.h>
asmlinkage int compat_sys_fork(void)
{
return do_fork(SIGCHLD, 0, current_pt_regs(), 0, NULL, NULL);
}
asmlinkage int compat_sys_vfork(void)
{
return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, 0,
current_pt_regs(), 0, NULL, NULL);
}
asmlinkage int compat_sys_sched_rr_get_interval(compat_pid_t pid,
struct compat_timespec __user *interval)
{

View File

@ -17,6 +17,8 @@ config AVR32
select GENERIC_CLOCKEVENTS
select HAVE_MOD_ARCH_SPECIFIC
select MODULES_USE_ELF_RELA
select GENERIC_KERNEL_THREAD
select GENERIC_KERNEL_EXECVE
help
AVR32 is a high-performance 32-bit RISC microprocessor core,
designed for cost-sensitive embedded applications, with particular

View File

@ -142,9 +142,6 @@ struct task_struct;
/* Free all resources held by a thread */
extern void release_thread(struct task_struct *);
/* Create a kernel thread without removing it from tasklists */
extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
/* Return saved PC of a blocked thread */
#define thread_saved_pc(tsk) ((tsk)->thread.cpu_context.pc)

View File

@ -37,6 +37,4 @@ struct k_sigaction {
#include <asm/sigcontext.h>
#undef __HAVE_ARCH_SIG_BITOPS
#define ptrace_signal_deliver(regs, cookie) do { } while (0)
#endif

View File

@ -39,6 +39,10 @@
#define __ARCH_WANT_SYS_GETPGRP
#define __ARCH_WANT_SYS_RT_SIGACTION
#define __ARCH_WANT_SYS_RT_SIGSUSPEND
#define __ARCH_WANT_SYS_EXECVE
#define __ARCH_WANT_SYS_FORK
#define __ARCH_WANT_SYS_VFORK
#define __ARCH_WANT_SYS_CLONE
/*
* "Conditional" syscalls

View File

@ -7,7 +7,7 @@ extra-y := head.o vmlinux.lds
obj-$(CONFIG_SUBARCH_AVR32B) += entry-avr32b.o
obj-y += syscall_table.o syscall-stubs.o irq.o
obj-y += setup.o traps.o ocd.o ptrace.o
obj-y += signal.o sys_avr32.o process.o time.o
obj-y += signal.o process.o time.o
obj-y += switch_to.o cpu.o
obj-$(CONFIG_MODULES) += module.o avr32_ksyms.o
obj-$(CONFIG_KPROBES) += kprobes.o

View File

@ -251,13 +251,15 @@ syscall_badsys:
.global ret_from_fork
ret_from_fork:
call schedule_tail
mov r12, 0
rjmp syscall_return
/* check for syscall tracing */
get_thread_info r0
ld.w r1, r0[TI_flags]
andl r1, _TIF_ALLWORK_MASK, COH
brne syscall_exit_work
rjmp syscall_exit_cont
.global ret_from_kernel_thread
ret_from_kernel_thread:
call schedule_tail
mov r12, r0
mov lr, r2 /* syscall_return */
mov pc, r1
syscall_trace_enter:
pushm r8-r12

View File

@ -68,44 +68,6 @@ void machine_restart(char *cmd)
while (1) ;
}
/*
* PC is actually discarded when returning from a system call -- the
* return address must be stored in LR. This function will make sure
* LR points to do_exit before starting the thread.
*
* Also, when returning from fork(), r12 is 0, so we must copy the
* argument as well.
*
* r0 : The argument to the main thread function
* r1 : The address of do_exit
* r2 : The address of the main thread function
*/
asmlinkage extern void kernel_thread_helper(void);
__asm__(" .type kernel_thread_helper, @function\n"
"kernel_thread_helper:\n"
" mov r12, r0\n"
" mov lr, r2\n"
" mov pc, r1\n"
" .size kernel_thread_helper, . - kernel_thread_helper");
int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
{
struct pt_regs regs;
memset(&regs, 0, sizeof(regs));
regs.r0 = (unsigned long)arg;
regs.r1 = (unsigned long)fn;
regs.r2 = (unsigned long)do_exit;
regs.lr = (unsigned long)kernel_thread_helper;
regs.pc = (unsigned long)kernel_thread_helper;
regs.sr = MODE_SUPERVISOR;
return do_fork(flags | CLONE_VM | CLONE_UNTRACED,
0, &regs, 0, NULL, NULL);
}
EXPORT_SYMBOL(kernel_thread);
/*
* Free current thread data structures etc
*/
@ -332,26 +294,32 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
}
asmlinkage void ret_from_fork(void);
asmlinkage void ret_from_kernel_thread(void);
asmlinkage void syscall_return(void);
int copy_thread(unsigned long clone_flags, unsigned long usp,
unsigned long unused,
struct task_struct *p, struct pt_regs *regs)
unsigned long arg,
struct task_struct *p)
{
struct pt_regs *childregs;
struct pt_regs *childregs = task_pt_regs(p);
childregs = ((struct pt_regs *)(THREAD_SIZE + (unsigned long)task_stack_page(p))) - 1;
*childregs = *regs;
if (user_mode(regs))
childregs->sp = usp;
else
childregs->sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
childregs->r12 = 0; /* Set return value for child */
if (unlikely(p->flags & PF_KTHREAD)) {
memset(childregs, 0, sizeof(struct pt_regs));
p->thread.cpu_context.r0 = arg;
p->thread.cpu_context.r1 = usp; /* fn */
p->thread.cpu_context.r2 = syscall_return;
p->thread.cpu_context.pc = (unsigned long)ret_from_kernel_thread;
childregs->sr = MODE_SUPERVISOR;
} else {
*childregs = *current_pt_regs();
if (usp)
childregs->sp = usp;
childregs->r12 = 0; /* Set return value for child */
p->thread.cpu_context.pc = (unsigned long)ret_from_fork;
}
p->thread.cpu_context.sr = MODE_SUPERVISOR | SR_GM;
p->thread.cpu_context.ksp = (unsigned long)childregs;
p->thread.cpu_context.pc = (unsigned long)ret_from_fork;
clear_tsk_thread_flag(p, TIF_DEBUG);
if ((clone_flags & CLONE_PTRACE) && test_thread_flag(TIF_DEBUG))
@ -360,49 +328,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
return 0;
}
/* r12-r8 are dummy parameters to force the compiler to use the stack */
asmlinkage int sys_fork(struct pt_regs *regs)
{
return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL);
}
asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
void __user *parent_tidptr, void __user *child_tidptr,
struct pt_regs *regs)
{
if (!newsp)
newsp = regs->sp;
return do_fork(clone_flags, newsp, regs, 0, parent_tidptr,
child_tidptr);
}
asmlinkage int sys_vfork(struct pt_regs *regs)
{
return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs,
0, NULL, NULL);
}
asmlinkage int sys_execve(const char __user *ufilename,
const char __user *const __user *uargv,
const char __user *const __user *uenvp,
struct pt_regs *regs)
{
int error;
struct filename *filename;
filename = getname(ufilename);
error = PTR_ERR(filename);
if (IS_ERR(filename))
goto out;
error = do_execve(filename->name, uargv, uenvp, regs);
putname(filename);
out:
return error;
}
/*
* This function is supposed to answer the question "who called
* schedule()?"

View File

@ -1,24 +0,0 @@
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/unistd.h>
int kernel_execve(const char *file,
const char *const *argv,
const char *const *envp)
{
register long scno asm("r8") = __NR_execve;
register long sc1 asm("r12") = (long)file;
register long sc2 asm("r11") = (long)argv;
register long sc3 asm("r10") = (long)envp;
asm volatile("scall"
: "=r"(sc1)
: "r"(scno), "0"(sc1), "r"(sc2), "r"(sc3)
: "cc", "memory");
return sc1;
}

View File

@ -32,30 +32,6 @@ __sys_rt_sigreturn:
mov r12, sp
rjmp sys_rt_sigreturn
.global __sys_fork
.type __sys_fork,@function
__sys_fork:
mov r12, sp
rjmp sys_fork
.global __sys_clone
.type __sys_clone,@function
__sys_clone:
mov r8, sp
rjmp sys_clone
.global __sys_vfork
.type __sys_vfork,@function
__sys_vfork:
mov r12, sp
rjmp sys_vfork
.global __sys_execve
.type __sys_execve,@function
__sys_execve:
mov r9, sp
rjmp sys_execve
.global __sys_mmap2
.type __sys_mmap2,@function
__sys_mmap2:

View File

@ -15,7 +15,7 @@
sys_call_table:
.long sys_restart_syscall
.long sys_exit
.long __sys_fork
.long sys_fork
.long sys_read
.long sys_write
.long sys_open /* 5 */
@ -24,7 +24,7 @@ sys_call_table:
.long sys_creat
.long sys_link
.long sys_unlink /* 10 */
.long __sys_execve
.long sys_execve
.long sys_chdir
.long sys_time
.long sys_mknod
@ -57,7 +57,7 @@ sys_call_table:
.long sys_dup
.long sys_pipe
.long sys_times
.long __sys_clone
.long sys_clone
.long sys_brk /* 45 */
.long sys_setgid
.long sys_getgid
@ -127,7 +127,7 @@ sys_call_table:
.long sys_newuname
.long sys_adjtimex
.long sys_mprotect
.long __sys_vfork
.long sys_vfork
.long sys_init_module /* 115 */
.long sys_delete_module
.long sys_quotactl

View File

@ -45,6 +45,8 @@ config BLACKFIN
select ARCH_USES_GETTIMEOFFSET if !GENERIC_CLOCKEVENTS
select HAVE_MOD_ARCH_SPECIFIC
select MODULES_USE_ELF_RELA
select GENERIC_KERNEL_THREAD
select GENERIC_KERNEL_EXECVE
config GENERIC_CSUM
def_bool y

View File

@ -75,8 +75,6 @@ static inline void release_thread(struct task_struct *dead_task)
{
}
extern int kernel_thread(int (*fn) (void *), void *arg, unsigned long flags);
/*
* Free current thread data structures etc..
*/

View File

@ -446,6 +446,8 @@
#define __ARCH_WANT_SYS_NICE
#define __ARCH_WANT_SYS_RT_SIGACTION
#define __ARCH_WANT_SYS_RT_SIGSUSPEND
#define __ARCH_WANT_SYS_EXECVE
#define __ARCH_WANT_SYS_VFORK
/*
* "Conditional" syscalls

View File

@ -46,53 +46,14 @@ ENTRY(_ret_from_fork)
SP += -12;
pseudo_long_call _schedule_tail, p5;
SP += 12;
r0 = [sp + PT_IPEND];
cc = bittst(r0,1);
if cc jump .Lin_kernel;
p1 = [sp++];
r0 = [sp++];
cc = p1 == 0;
if cc jump .Lfork;
sp += -12;
call (p1);
sp += 12;
.Lfork:
RESTORE_CONTEXT
rti;
.Lin_kernel:
bitclr(r0,1);
[sp + PT_IPEND] = r0;
/* do a 'fake' RTI by jumping to [RETI]
* to avoid clearing supervisor mode in child
*/
r0 = [sp + PT_PC];
[sp + PT_P0] = r0;
RESTORE_ALL_SYS
jump (p0);
ENDPROC(_ret_from_fork)
ENTRY(_sys_vfork)
r0 = sp;
r0 += 24;
[--sp] = rets;
SP += -12;
pseudo_long_call _bfin_vfork, p2;
SP += 12;
rets = [sp++];
rts;
ENDPROC(_sys_vfork)
ENTRY(_sys_clone)
r0 = sp;
r0 += 24;
[--sp] = rets;
SP += -12;
pseudo_long_call _bfin_clone, p2;
SP += 12;
rets = [sp++];
rts;
ENDPROC(_sys_clone)
ENTRY(_sys_rt_sigreturn)
r0 = sp;
r0 += 24;
[--sp] = rets;
SP += -12;
pseudo_long_call _do_rt_sigreturn, p2;
SP += 12;
rets = [sp++];
rts;
ENDPROC(_sys_rt_sigreturn)

View File

@ -101,40 +101,6 @@ void cpu_idle(void)
}
}
/*
* This gets run with P1 containing the
* function to call, and R1 containing
* the "args". Note P0 is clobbered on the way here.
*/
void kernel_thread_helper(void);
__asm__(".section .text\n"
".align 4\n"
"_kernel_thread_helper:\n\t"
"\tsp += -12;\n\t"
"\tr0 = r1;\n\t" "\tcall (p1);\n\t" "\tcall _do_exit;\n" ".previous");
/*
* Create a kernel thread.
*/
pid_t kernel_thread(int (*fn) (void *), void *arg, unsigned long flags)
{
struct pt_regs regs;
memset(&regs, 0, sizeof(regs));
regs.r1 = (unsigned long)arg;
regs.p1 = (unsigned long)fn;
regs.pc = (unsigned long)kernel_thread_helper;
regs.orig_p0 = -1;
/* Set bit 2 to tell ret_from_fork we should be returning to kernel
mode. */
regs.ipend = 0x8002;
__asm__ __volatile__("%0 = syscfg;":"=da"(regs.syscfg):);
return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL,
NULL);
}
EXPORT_SYMBOL(kernel_thread);
/*
* Do necessary setup to start up a newly executed thread.
*
@ -161,70 +127,48 @@ void flush_thread(void)
{
}
asmlinkage int bfin_vfork(struct pt_regs *regs)
asmlinkage int bfin_clone(unsigned long clone_flags, unsigned long newsp)
{
return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, rdusp(), regs, 0, NULL,
NULL);
}
asmlinkage int bfin_clone(struct pt_regs *regs)
{
unsigned long clone_flags;
unsigned long newsp;
#ifdef __ARCH_SYNC_CORE_DCACHE
if (current->nr_cpus_allowed == num_possible_cpus())
set_cpus_allowed_ptr(current, cpumask_of(smp_processor_id()));
#endif
/* syscall2 puts clone_flags in r0 and usp in r1 */
clone_flags = regs->r0;
newsp = regs->r1;
if (!newsp)
newsp = rdusp();
else
if (newsp)
newsp -= 12;
return do_fork(clone_flags, newsp, regs, 0, NULL, NULL);
return do_fork(clone_flags, newsp, 0, NULL, NULL);
}
int
copy_thread(unsigned long clone_flags,
unsigned long usp, unsigned long topstk,
struct task_struct *p, struct pt_regs *regs)
struct task_struct *p)
{
struct pt_regs *childregs;
unsigned long *v;
childregs = (struct pt_regs *) (task_stack_page(p) + THREAD_SIZE) - 1;
*childregs = *regs;
childregs->r0 = 0;
v = ((unsigned long *)childregs) - 2;
if (unlikely(p->flags & PF_KTHREAD)) {
memset(childregs, 0, sizeof(struct pt_regs));
v[0] = usp;
v[1] = topstk;
childregs->orig_p0 = -1;
childregs->ipend = 0x8000;
__asm__ __volatile__("%0 = syscfg;":"=da"(childregs->syscfg):);
p->thread.usp = 0;
} else {
*childregs = *current_pt_regs();
childregs->r0 = 0;
p->thread.usp = usp ? : rdusp();
v[0] = v[1] = 0;
}
p->thread.usp = usp;
p->thread.ksp = (unsigned long)childregs;
p->thread.ksp = (unsigned long)v;
p->thread.pc = (unsigned long)ret_from_fork;
return 0;
}
/*
* sys_execve() executes a new program.
*/
asmlinkage int sys_execve(const char __user *name,
const char __user *const __user *argv,
const char __user *const __user *envp)
{
int error;
struct filename *filename;
struct pt_regs *regs = (struct pt_regs *)((&name) + 6);
filename = getname(name);
error = PTR_ERR(filename);
if (IS_ERR(filename))
return error;
error = do_execve(filename->name, argv, envp, regs);
putname(filename);
return error;
}
unsigned long get_wchan(struct task_struct *p)
{
unsigned long fp, pc;

View File

@ -82,9 +82,9 @@ rt_restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *p
return err;
}
asmlinkage int do_rt_sigreturn(unsigned long __unused)
asmlinkage int sys_rt_sigreturn(void)
{
struct pt_regs *regs = (struct pt_regs *)__unused;
struct pt_regs *regs = current_pt_regs();
unsigned long usp = rdusp();
struct rt_sigframe *frame = (struct rt_sigframe *)(usp);
sigset_t set;

View File

@ -530,61 +530,6 @@ ENTRY(_trap) /* Exception: 4th entry into system event table(supervisor mode)*/
jump .Lsyscall_really_exit;
ENDPROC(_trap)
ENTRY(_kernel_execve)
link SIZEOF_PTREGS;
p0 = sp;
r3 = SIZEOF_PTREGS / 4;
r4 = 0(x);
.Lclear_regs:
[p0++] = r4;
r3 += -1;
cc = r3 == 0;
if !cc jump .Lclear_regs (bp);
p0 = sp;
sp += -16;
[sp + 12] = p0;
pseudo_long_call _do_execve, p5;
SP += 16;
cc = r0 == 0;
if ! cc jump .Lexecve_failed;
/* Success. Copy our temporary pt_regs to the top of the kernel
* stack and do a normal exception return.
*/
r1 = sp;
r0 = (-KERNEL_STACK_SIZE) (x);
r1 = r1 & r0;
p2 = r1;
p3 = [p2];
r0 = KERNEL_STACK_SIZE - 4 (z);
p1 = r0;
p1 = p1 + p2;
p0 = fp;
r4 = [p0--];
r3 = SIZEOF_PTREGS / 4;
.Lcopy_regs:
r4 = [p0--];
[p1--] = r4;
r3 += -1;
cc = r3 == 0;
if ! cc jump .Lcopy_regs (bp);
r0 = (KERNEL_STACK_SIZE - SIZEOF_PTREGS) (z);
p1 = r0;
p1 = p1 + p2;
sp = p1;
r0 = syscfg;
[SP + PT_SYSCFG] = r0;
[p3 + (TASK_THREAD + THREAD_KSP)] = sp;
RESTORE_CONTEXT;
rti;
.Lexecve_failed:
unlink;
rts;
ENDPROC(_kernel_execve)
ENTRY(_system_call)
/* Store IPEND */
p2.l = lo(IPEND);
@ -1486,7 +1431,7 @@ ENTRY(_sys_call_table)
.long _sys_ni_syscall /* old sys_ipc */
.long _sys_fsync
.long _sys_ni_syscall /* old sys_sigreturn */
.long _sys_clone /* 120 */
.long _bfin_clone /* 120 */
.long _sys_setdomainname
.long _sys_newuname
.long _sys_ni_syscall /* old sys_modify_ldt */

View File

@ -18,6 +18,7 @@ config C6X
select OF_EARLY_FLATTREE
select GENERIC_CLOCKEVENTS
select GENERIC_KERNEL_THREAD
select GENERIC_KERNEL_EXECVE
select MODULES_USE_ELF_RELA
config MMU

View File

@ -41,10 +41,6 @@ extern long sys_fallocate_c6x(int fd, int mode,
u32 len_lo, u32 len_hi);
extern int sys_cache_sync(unsigned long s, unsigned long e);
struct pt_regs;
extern asmlinkage long sys_c6x_clone(struct pt_regs *regs);
#include <asm-generic/syscalls.h>
#endif /* __ASM_C6X_SYSCALLS_H */

View File

@ -14,8 +14,8 @@
* more details.
*/
#define __ARCH_WANT_KERNEL_EXECVE
#define __ARCH_WANT_SYS_EXECVE
#define __ARCH_WANT_SYS_CLONE
/* Use the standard ABI for syscalls. */
#include <asm-generic/unistd.h>

View File

@ -415,19 +415,9 @@ ENTRY(ret_from_kernel_thread)
0:
B .S2 B10 /* call fn */
LDW .D2T1 *+SP(REGS_A1+8),A4 /* get arg */
MVKL .S2 sys_exit,B11
MVKH .S2 sys_exit,B11
ADDKPC .S2 0f,B3,1
0:
BNOP .S2 B11,5 /* jump to sys_exit */
ADDKPC .S2 ret_from_fork_2,B3,3
ENDPROC(ret_from_kernel_thread)
ENTRY(ret_from_kernel_execve)
GET_THREAD_INFO A12
BNOP .S2 syscall_exit,4
ADD .D2X A4,-8,SP
ENDPROC(ret_from_kernel_execve)
;;
;; These are the interrupt handlers, responsible for calling c6x_do_IRQ()
;;
@ -624,18 +614,6 @@ ENDPROC(sys_sigaltstack)
;; Special system calls
;; return address is in B3
;;
ENTRY(sys_clone)
ADD .D1X SP,8,A4
#ifdef CONFIG_C6X_BIG_KERNEL
|| MVKL .S1 sys_c6x_clone,A0
MVKH .S1 sys_c6x_clone,A0
BNOP .S2X A0,5
#else
|| B .S2 sys_c6x_clone
NOP 5
#endif
ENDPROC(sys_clone)
ENTRY(sys_rt_sigreturn)
ADD .D1X SP,8,A4
#ifdef CONFIG_C6X_BIG_KERNEL

View File

@ -112,22 +112,6 @@ void exit_thread(void)
{
}
SYSCALL_DEFINE1(c6x_clone, struct pt_regs *, regs)
{
unsigned long clone_flags;
unsigned long newsp;
/* syscall puts clone_flags in A4 and usp in B4 */
clone_flags = regs->orig_a4;
if (regs->b4)
newsp = regs->b4;
else
newsp = regs->sp;
return do_fork(clone_flags, newsp, regs, 0, (int __user *)regs->a6,
(int __user *)regs->b6);
}
/*
* Do necessary setup to start up a newly executed thread.
*/
@ -155,13 +139,13 @@ void start_thread(struct pt_regs *regs, unsigned int pc, unsigned long usp)
*/
int copy_thread(unsigned long clone_flags, unsigned long usp,
unsigned long ustk_size,
struct task_struct *p, struct pt_regs *regs)
struct task_struct *p)
{
struct pt_regs *childregs;
childregs = task_pt_regs(p);
if (!regs) {
if (unlikely(p->flags & PF_KTHREAD)) {
/* case of __kernel_thread: we return to supervisor space */
memset(childregs, 0, sizeof(struct pt_regs));
childregs->sp = (unsigned long)(childregs + 1);
@ -170,8 +154,9 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
childregs->a1 = ustk_size; /* argument */
} else {
/* Otherwise use the given stack */
*childregs = *regs;
childregs->sp = usp;
*childregs = *current_pt_regs();
if (usp)
childregs->sp = usp;
p->thread.pc = (unsigned long) ret_from_fork;
}

View File

@ -49,6 +49,9 @@ config CRIS
select GENERIC_SMP_IDLE_THREAD if ETRAX_ARCH_V32
select GENERIC_CMOS_UPDATE
select MODULES_USE_ELF_RELA
select GENERIC_KERNEL_THREAD
select GENERIC_KERNEL_EXECVE
select CLONE_BACKWARDS2
config HZ
int

View File

@ -35,6 +35,7 @@
.globl system_call
.globl ret_from_intr
.globl ret_from_fork
.globl ret_from_kernel_thread
.globl resume
.globl multiple_interrupt
.globl hwbreakpoint
@ -81,7 +82,14 @@ ret_from_fork:
jsr schedule_tail
ba ret_from_sys_call
nop
ret_from_kernel_thread:
jsr schedule_tail
move.d $r2, $r10 ; argument is here
jsr $r1 ; call the payload
moveq 0, $r9 ; no syscall restarts, TYVM...
ba ret_from_sys_call
ret_from_intr:
;; check for resched if preemptive kernel or if we're going back to user-mode
;; this test matches the user_regs(regs) macro
@ -586,13 +594,6 @@ _ugdb_handle_breakpoint:
ba do_sigtrap ; SIGTRAP the offending process.
pop $dccr ; Restore dccr in delay slot.
.global kernel_execve
kernel_execve:
move.d __NR_execve, $r9
break 13
ret
nop
.data
hw_bp_trigs:

View File

@ -17,6 +17,7 @@
#include <arch/svinto.h>
#include <linux/init.h>
#include <arch/system.h>
#include <linux/ptrace.h>
#ifdef CONFIG_ETRAX_GPIO
void etrax_gpio_wake_up_check(void); /* drivers/gpio.c */
@ -81,31 +82,6 @@ unsigned long thread_saved_pc(struct task_struct *t)
return task_pt_regs(t)->irp;
}
static void kernel_thread_helper(void* dummy, int (*fn)(void *), void * arg)
{
fn(arg);
do_exit(-1); /* Should never be called, return bad exit value */
}
/*
* Create a kernel thread
*/
int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
{
struct pt_regs regs;
memset(&regs, 0, sizeof(regs));
/* Don't use r10 since that is set to 0 in copy_thread */
regs.r11 = (unsigned long)fn;
regs.r12 = (unsigned long)arg;
regs.irp = (unsigned long)kernel_thread_helper;
regs.dccr = 1 << I_DCCR_BITNR;
/* Ok, create the new process.. */
return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
}
/* setup the child's kernel stack with a pt_regs and switch_stack on it.
* it will be un-nested during _resume and _ret_from_sys_call when the
* new thread is scheduled.
@ -115,29 +91,34 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
*
*/
asmlinkage void ret_from_fork(void);
asmlinkage void ret_from_kernel_thread(void);
int copy_thread(unsigned long clone_flags, unsigned long usp,
unsigned long unused,
struct task_struct *p, struct pt_regs *regs)
unsigned long arg, struct task_struct *p)
{
struct pt_regs * childregs;
struct switch_stack *swstack;
struct pt_regs *childregs = task_pt_regs(p);
struct switch_stack *swstack = ((struct switch_stack *)childregs) - 1;
/* put the pt_regs structure at the end of the new kernel stack page and fix it up
* remember that the task_struct doubles as the kernel stack for the task
*/
childregs = task_pt_regs(p);
*childregs = *regs; /* struct copy of pt_regs */
p->set_child_tid = p->clear_child_tid = NULL;
if (unlikely(p->flags & PF_KTHREAD)) {
memset(swstack, 0,
sizeof(struct switch_stack) + sizeof(struct pt_regs));
swstack->r1 = usp;
swstack->r2 = arg;
childregs->dccr = 1 << I_DCCR_BITNR;
swstack->return_ip = (unsigned long) ret_from_kernel_thread;
p->thread.ksp = (unsigned long) swstack;
p->thread.usp = 0;
return 0;
}
*childregs = *current_pt_regs(); /* struct copy of pt_regs */
childregs->r10 = 0; /* child returns 0 after a fork/clone */
/* put the switch stack right below the pt_regs */
swstack = ((struct switch_stack *)childregs) - 1;
/* put the switch stack right below the pt_regs */
swstack->r9 = 0; /* parameter to ret_from_sys_call, 0 == dont restart the syscall */
@ -147,7 +128,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
/* fix the user-mode stackpointer */
p->thread.usp = usp;
p->thread.usp = usp ?: rdusp();
/* and the kernel-mode one */
@ -161,70 +142,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
return 0;
}
/*
* Be aware of the "magic" 7th argument in the four system-calls below.
* They need the latest stackframe, which is put as the 7th argument by
* entry.S. The previous arguments are dummies or actually used, but need
* to be defined to reach the 7th argument.
*
* N.B.: Another method to get the stackframe is to use current_regs(). But
* it returns the latest stack-frame stacked when going from _user mode_ and
* some of these (at least sys_clone) are called from kernel-mode sometimes
* (for example during kernel_thread, above) and thus cannot use it. Thus,
* to be sure not to get any surprises, we use the method for the other calls
* as well.
*/
asmlinkage int sys_fork(long r10, long r11, long r12, long r13, long mof, long srp,
struct pt_regs *regs)
{
return do_fork(SIGCHLD, rdusp(), regs, 0, NULL, NULL);
}
/* if newusp is 0, we just grab the old usp */
/* FIXME: Is parent_tid/child_tid really third/fourth argument? Update lib? */
asmlinkage int sys_clone(unsigned long newusp, unsigned long flags,
int* parent_tid, int* child_tid, long mof, long srp,
struct pt_regs *regs)
{
if (!newusp)
newusp = rdusp();
return do_fork(flags, newusp, regs, 0, parent_tid, child_tid);
}
/* vfork is a system call in i386 because of register-pressure - maybe
* we can remove it and handle it in libc but we put it here until then.
*/
asmlinkage int sys_vfork(long r10, long r11, long r12, long r13, long mof, long srp,
struct pt_regs *regs)
{
return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, rdusp(), regs, 0, NULL, NULL);
}
/*
* sys_execve() executes a new program.
*/
asmlinkage int sys_execve(const char *fname,
const char *const *argv,
const char *const *envp,
long r13, long mof, long srp,
struct pt_regs *regs)
{
int error;
struct filename *filename;
filename = getname(fname);
error = PTR_ERR(filename);
if (IS_ERR(filename))
goto out;
error = do_execve(filename->name, argv, envp, regs);
putname(filename);
out:
return error;
}
unsigned long get_wchan(struct task_struct *p)
{
#if 0

View File

@ -31,6 +31,7 @@
.globl system_call
.globl ret_from_intr
.globl ret_from_fork
.globl ret_from_kernel_thread
.globl resume
.globl multiple_interrupt
.globl nmi_interrupt
@ -84,6 +85,18 @@ ret_from_fork:
nop
.size ret_from_fork, . - ret_from_fork
.type ret_from_kernel_thread,@function
ret_from_kernel_thread:
jsr schedule_tail
nop
move.d $r2, $r10
jsr $r1
nop
moveq 0, $r9 ; no syscall restarts, TYVM...
ba ret_from_sys_call
nop
.size ret_from_kernel_thread, . - ret_from_kernel_thread
.type ret_from_intr,@function
ret_from_intr:
;; Check for resched if preemptive kernel, or if we're going back to
@ -531,15 +544,6 @@ _ugdb_handle_exception:
ba do_sigtrap ; SIGTRAP the offending process.
move.d [$sp+], $r0 ; Restore R0 in delay slot.
.global kernel_execve
.type kernel_execve,@function
kernel_execve:
move.d __NR_execve, $r9
break 13
ret
nop
.size kernel_execve, . - kernel_execve
.data
.section .rodata,"a"

View File

@ -16,6 +16,7 @@
#include <hwregs/reg_map.h>
#include <hwregs/timer_defs.h>
#include <hwregs/intr_vect_defs.h>
#include <linux/ptrace.h>
extern void stop_watchdog(void);
@ -94,31 +95,6 @@ unsigned long thread_saved_pc(struct task_struct *t)
return task_pt_regs(t)->erp;
}
static void
kernel_thread_helper(void* dummy, int (*fn)(void *), void * arg)
{
fn(arg);
do_exit(-1); /* Should never be called, return bad exit value. */
}
/* Create a kernel thread. */
int
kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
{
struct pt_regs regs;
memset(&regs, 0, sizeof(regs));
/* Don't use r10 since that is set to 0 in copy_thread. */
regs.r11 = (unsigned long) fn;
regs.r12 = (unsigned long) arg;
regs.erp = (unsigned long) kernel_thread_helper;
regs.ccs = 1 << (I_CCS_BITNR + CCS_SHIFT);
/* Create the new process. */
return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
}
/*
* Setup the child's kernel stack with a pt_regs and call switch_stack() on it.
* It will be unnested during _resume and _ret_from_sys_call when the new thread
@ -129,34 +105,42 @@ kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
*/
extern asmlinkage void ret_from_fork(void);
extern asmlinkage void ret_from_kernel_thread(void);
int
copy_thread(unsigned long clone_flags, unsigned long usp,
unsigned long unused,
struct task_struct *p, struct pt_regs *regs)
unsigned long arg, struct task_struct *p)
{
struct pt_regs *childregs;
struct switch_stack *swstack;
struct pt_regs *childregs = task_pt_regs(p);
struct switch_stack *swstack = ((struct switch_stack *) childregs) - 1;
/*
* Put the pt_regs structure at the end of the new kernel stack page and
* fix it up. Note: the task_struct doubles as the kernel stack for the
* task.
*/
childregs = task_pt_regs(p);
*childregs = *regs; /* Struct copy of pt_regs. */
p->set_child_tid = p->clear_child_tid = NULL;
if (unlikely(p->flags & PF_KTHREAD)) {
memset(swstack, 0,
sizeof(struct switch_stack) + sizeof(struct pt_regs));
swstack->r1 = usp;
swstack->r2 = arg;
childregs->ccs = 1 << (I_CCS_BITNR + CCS_SHIFT);
swstack->return_ip = (unsigned long) ret_from_kernel_thread;
p->thread.ksp = (unsigned long) swstack;
p->thread.usp = 0;
return 0;
}
*childregs = *current_pt_regs(); /* Struct copy of pt_regs. */
childregs->r10 = 0; /* Child returns 0 after a fork/clone. */
/* Set a new TLS ?
* The TLS is in $mof because it is the 5th argument to sys_clone.
*/
if (p->mm && (clone_flags & CLONE_SETTLS)) {
task_thread_info(p)->tls = regs->mof;
task_thread_info(p)->tls = childregs->mof;
}
/* Put the switch stack right below the pt_regs. */
swstack = ((struct switch_stack *) childregs) - 1;
/* Parameter to ret_from_sys_call. 0 is don't restart the syscall. */
swstack->r9 = 0;
@ -168,76 +152,12 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
swstack->return_ip = (unsigned long) ret_from_fork;
/* Fix the user-mode and kernel-mode stackpointer. */
p->thread.usp = usp;
p->thread.usp = usp ?: rdusp();
p->thread.ksp = (unsigned long) swstack;
return 0;
}
/*
* Be aware of the "magic" 7th argument in the four system-calls below.
* They need the latest stackframe, which is put as the 7th argument by
* entry.S. The previous arguments are dummies or actually used, but need
* to be defined to reach the 7th argument.
*
* N.B.: Another method to get the stackframe is to use current_regs(). But
* it returns the latest stack-frame stacked when going from _user mode_ and
* some of these (at least sys_clone) are called from kernel-mode sometimes
* (for example during kernel_thread, above) and thus cannot use it. Thus,
* to be sure not to get any surprises, we use the method for the other calls
* as well.
*/
asmlinkage int
sys_fork(long r10, long r11, long r12, long r13, long mof, long srp,
struct pt_regs *regs)
{
return do_fork(SIGCHLD, rdusp(), regs, 0, NULL, NULL);
}
/* FIXME: Is parent_tid/child_tid really third/fourth argument? Update lib? */
asmlinkage int
sys_clone(unsigned long newusp, unsigned long flags, int *parent_tid, int *child_tid,
unsigned long tls, long srp, struct pt_regs *regs)
{
if (!newusp)
newusp = rdusp();
return do_fork(flags, newusp, regs, 0, parent_tid, child_tid);
}
/*
* vfork is a system call in i386 because of register-pressure - maybe
* we can remove it and handle it in libc but we put it here until then.
*/
asmlinkage int
sys_vfork(long r10, long r11, long r12, long r13, long mof, long srp,
struct pt_regs *regs)
{
return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, rdusp(), regs, 0, NULL, NULL);
}
/* sys_execve() executes a new program. */
asmlinkage int
sys_execve(const char *fname,
const char *const *argv,
const char *const *envp, long r13, long mof, long srp,
struct pt_regs *regs)
{
int error;
struct filename *filename;
filename = getname(fname);
error = PTR_ERR(filename);
if (IS_ERR(filename))
goto out;
error = do_execve(filename->name, argv, envp, regs);
putname(filename);
out:
return error;
}
unsigned long
get_wchan(struct task_struct *p)
{

View File

@ -49,8 +49,6 @@ struct task_struct;
#define task_pt_regs(task) user_regs(task_thread_info(task))
#define current_regs() task_pt_regs(current)
extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
unsigned long get_wchan(struct task_struct *p);
#define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp)

View File

@ -152,12 +152,6 @@ typedef struct sigaltstack {
#ifdef __KERNEL__
#include <asm/sigcontext.h>
/* here we could define asm-optimized sigaddset, sigdelset etc. operations.
* if we don't, generic ones are used from linux/signal.h
*/
#define ptrace_signal_deliver(regs, cookie) do { } while (0)
#endif /* __KERNEL__ */
#endif

View File

@ -371,6 +371,10 @@
#define __ARCH_WANT_SYS_SIGPROCMASK
#define __ARCH_WANT_SYS_RT_SIGACTION
#define __ARCH_WANT_SYS_RT_SIGSUSPEND
#define __ARCH_WANT_SYS_EXECVE
#define __ARCH_WANT_SYS_FORK
#define __ARCH_WANT_SYS_VFORK
#define __ARCH_WANT_SYS_CLONE
/*
* "Conditional" syscalls

View File

@ -30,7 +30,6 @@ extern void __negdi2(void);
extern void iounmap(volatile void * __iomem);
/* Platform dependent support */
EXPORT_SYMBOL(kernel_thread);
EXPORT_SYMBOL(get_cmos_time);
EXPORT_SYMBOL(loops_per_usec);

View File

@ -30,6 +30,9 @@
#define __ARCH_WANT_SYS_RT_SIGACTION
#define __ARCH_WANT_SYS_RT_SIGSUSPEND
#define __ARCH_WANT_SYS_EXECVE
#define __ARCH_WANT_SYS_FORK
#define __ARCH_WANT_SYS_VFORK
#define __ARCH_WANT_SYS_CLONE
/*
* "Conditional" syscalls

View File

@ -139,42 +139,12 @@ inline unsigned long user_stack(const struct pt_regs *regs)
return user_mode(regs) ? regs->sp : 0;
}
asmlinkage int sys_fork(void)
{
#ifndef CONFIG_MMU
/* fork almost works, enough to trick you into looking elsewhere:-( */
return -EINVAL;
#else
return do_fork(SIGCHLD, user_stack(__frame), __frame, 0, NULL, NULL);
#endif
}
asmlinkage int sys_vfork(void)
{
return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, user_stack(__frame), __frame, 0,
NULL, NULL);
}
/*****************************************************************************/
/*
* clone a process
* - tlsptr is retrieved by copy_thread()
*/
asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
int __user *parent_tidptr, int __user *child_tidptr,
int __user *tlsptr)
{
if (!newsp)
newsp = user_stack(__frame);
return do_fork(clone_flags, newsp, __frame, 0, parent_tidptr, child_tidptr);
} /* end sys_clone() */
/*
* set up the kernel stack and exception frames for a new process
*/
int copy_thread(unsigned long clone_flags,
unsigned long usp, unsigned long arg,
struct task_struct *p, struct pt_regs *regs)
struct task_struct *p)
{
struct pt_regs *childregs;
@ -182,9 +152,7 @@ int copy_thread(unsigned long clone_flags,
(task_stack_page(p) + THREAD_SIZE - FRV_FRAME0_SIZE);
/* set up the userspace frame (the only place that the USP is stored) */
*childregs = *__kernel_frame0_ptr;
p->set_child_tid = p->clear_child_tid = NULL;
*childregs = *current_pt_regs();
p->thread.frame = childregs;
p->thread.curr = p;
@ -193,18 +161,15 @@ int copy_thread(unsigned long clone_flags,
p->thread.lr = 0;
p->thread.frame0 = childregs;
if (unlikely(!regs)) {
if (unlikely(p->flags & PF_KTHREAD)) {
childregs->gr9 = usp; /* function */
childregs->gr8 = arg;
p->thread.pc = (unsigned long) ret_from_kernel_thread;
save_user_regs(p->thread.user);
return 0;
}
/* set up the userspace frame (the only place that the USP is stored) */
*childregs = *regs;
childregs->sp = usp;
if (usp)
childregs->sp = usp;
childregs->next_frame = NULL;
p->thread.pc = (unsigned long) ret_from_fork;

View File

@ -8,6 +8,8 @@ config H8300
select GENERIC_IRQ_SHOW
select GENERIC_CPU_DEVICES
select MODULES_USE_ELF_RELA
select GENERIC_KERNEL_THREAD
select GENERIC_KERNEL_EXECVE
config SYMBOL_PREFIX
string

View File

@ -107,8 +107,6 @@ static inline void release_thread(struct task_struct *dead_task)
{
}
extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
/*
* Free current thread data structures etc..
*/

View File

@ -60,6 +60,9 @@ struct pt_regs {
#define user_mode(regs) (!((regs)->ccr & PS_S))
#define instruction_pointer(regs) ((regs)->pc)
#define profile_pc(regs) instruction_pointer(regs)
#define current_pt_regs() ((struct pt_regs *) \
(THREAD_SIZE + (unsigned long)current_thread_info()) - 1)
#define signal_pt_regs() ((struct pt_regs *)current->thread.esp0)
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
#endif /* _H8300_PTRACE_H */

View File

@ -154,8 +154,6 @@ typedef struct sigaltstack {
#include <asm/sigcontext.h>
#undef __HAVE_ARCH_SIG_BITOPS
#define ptrace_signal_deliver(regs, cookie) do { } while (0)
#endif /* __KERNEL__ */
#endif /* _H8300_SIGNAL_H */

View File

@ -356,6 +356,10 @@
#define __ARCH_WANT_SYS_SIGPROCMASK
#define __ARCH_WANT_SYS_RT_SIGACTION
#define __ARCH_WANT_SYS_RT_SIGSUSPEND
#define __ARCH_WANT_SYS_EXECVE
#define __ARCH_WANT_SYS_FORK
#define __ARCH_WANT_SYS_VFORK
#define __ARCH_WANT_SYS_CLONE
/*
* "Conditional" syscalls

View File

@ -158,6 +158,7 @@ INTERRUPTS = 128
.globl SYMBOL_NAME(system_call)
.globl SYMBOL_NAME(ret_from_exception)
.globl SYMBOL_NAME(ret_from_fork)
.globl SYMBOL_NAME(ret_from_kernel_thread)
.globl SYMBOL_NAME(ret_from_interrupt)
.globl SYMBOL_NAME(interrupt_redirect_table)
.globl SYMBOL_NAME(sw_ksp),SYMBOL_NAME(sw_usp)
@ -330,6 +331,14 @@ SYMBOL_NAME_LABEL(ret_from_fork)
jsr @SYMBOL_NAME(schedule_tail)
jmp @SYMBOL_NAME(ret_from_exception)
SYMBOL_NAME_LABEL(ret_from_kernel_thread)
mov.l er2,er0
jsr @SYMBOL_NAME(schedule_tail)
mov.l @(LER4:16,sp),er0
mov.l @(LER5:16,sp),er1
jsr @er1
jmp @SYMBOL_NAME(ret_from_exception)
SYMBOL_NAME_LABEL(resume)
/*
* Beware - when entering resume, offset of tss is in d1,

View File

@ -33,7 +33,6 @@ EXPORT_SYMBOL(strncmp);
EXPORT_SYMBOL(ip_fast_csum);
EXPORT_SYMBOL(kernel_thread);
EXPORT_SYMBOL(enable_irq);
EXPORT_SYMBOL(disable_irq);

View File

@ -47,6 +47,7 @@ void (*pm_power_off)(void) = NULL;
EXPORT_SYMBOL(pm_power_off);
asmlinkage void ret_from_fork(void);
asmlinkage void ret_from_kernel_thread(void);
/*
* The idle loop on an H8/300..
@ -122,113 +123,34 @@ void show_regs(struct pt_regs * regs)
printk("\n");
}
/*
* Create a kernel thread
*/
int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
{
long retval;
long clone_arg;
mm_segment_t fs;
fs = get_fs();
set_fs (KERNEL_DS);
clone_arg = flags | CLONE_VM;
__asm__("mov.l sp,er3\n\t"
"sub.l er2,er2\n\t"
"mov.l %2,er1\n\t"
"mov.l %1,er0\n\t"
"trapa #0\n\t"
"cmp.l sp,er3\n\t"
"beq 1f\n\t"
"mov.l %4,er0\n\t"
"mov.l %3,er1\n\t"
"jsr @er1\n\t"
"mov.l %5,er0\n\t"
"trapa #0\n"
"1:\n\t"
"mov.l er0,%0"
:"=r"(retval)
:"i"(__NR_clone),"g"(clone_arg),"g"(fn),"g"(arg),"i"(__NR_exit)
:"er0","er1","er2","er3");
set_fs (fs);
return retval;
}
void flush_thread(void)
{
}
/*
* "h8300_fork()".. By the time we get here, the
* non-volatile registers have also been saved on the
* stack. We do some ugly pointer stuff here.. (see
* also copy_thread)
*/
asmlinkage int h8300_fork(struct pt_regs *regs)
{
return -EINVAL;
}
asmlinkage int h8300_vfork(struct pt_regs *regs)
{
return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, rdusp(), regs, 0, NULL, NULL);
}
asmlinkage int h8300_clone(struct pt_regs *regs)
{
unsigned long clone_flags;
unsigned long newsp;
/* syscall2 puts clone_flags in er1 and usp in er2 */
clone_flags = regs->er1;
newsp = regs->er2;
if (!newsp)
newsp = rdusp();
return do_fork(clone_flags, newsp, regs, 0, NULL, NULL);
}
int copy_thread(unsigned long clone_flags,
unsigned long usp, unsigned long topstk,
struct task_struct * p, struct pt_regs * regs)
struct task_struct * p)
{
struct pt_regs * childregs;
childregs = (struct pt_regs *) (THREAD_SIZE + task_stack_page(p)) - 1;
*childregs = *regs;
if (unlikely(p->flags & PF_KTHREAD)) {
memset(childregs, 0, sizeof(struct pt_regs));
childregs->retpc = (unsigned long) ret_from_kernel_thread;
childregs->er4 = topstk; /* arg */
childregs->er5 = usp; /* fn */
p->thread.ksp = (unsigned long)childregs;
}
*childregs = *current_pt_regs();
childregs->retpc = (unsigned long) ret_from_fork;
childregs->er0 = 0;
p->thread.usp = usp;
p->thread.usp = usp ?: rdusp();
p->thread.ksp = (unsigned long)childregs;
return 0;
}
/*
* sys_execve() executes a new program.
*/
asmlinkage int sys_execve(const char *name,
const char *const *argv,
const char *const *envp,
int dummy, ...)
{
int error;
struct filename *filename;
struct pt_regs *regs = (struct pt_regs *) ((unsigned char *)&dummy-4);
filename = getname(name);
error = PTR_ERR(filename);
if (IS_ERR(filename))
return error;
error = do_execve(filename->name, argv, envp, regs);
putname(filename);
return error;
}
unsigned long thread_saved_pc(struct task_struct *tsk)
{
return ((struct pt_regs *)tsk->thread.esp0)->pc;

View File

@ -46,29 +46,3 @@ asmlinkage void syscall_print(void *dummy,...)
((regs->pc)&0xffffff)-2,regs->orig_er0,regs->er1,regs->er2,regs->er3,regs->er0);
}
#endif
/*
* Do a system call from kernel instead of calling sys_execve so we
* end up with proper pt_regs.
*/
asmlinkage
int kernel_execve(const char *filename,
const char *const argv[],
const char *const envp[])
{
register long res __asm__("er0");
register const char *const *_c __asm__("er3") = envp;
register const char *const *_b __asm__("er2") = argv;
register const char * _a __asm__("er1") = filename;
__asm__ __volatile__ ("mov.l %1,er0\n\t"
"trapa #0\n\t"
: "=r" (res)
: "g" (__NR_execve),
"g" (_a),
"g" (_b),
"g" (_c)
: "cc", "memory");
return res;
}

View File

@ -340,21 +340,12 @@ SYMBOL_NAME_LABEL(sys_call_table)
bra SYMBOL_NAME(syscall_trampoline):8
.endm
SYMBOL_NAME_LABEL(sys_clone)
call_sp h8300_clone
SYMBOL_NAME_LABEL(sys_sigreturn)
call_sp do_sigreturn
SYMBOL_NAME_LABEL(sys_rt_sigreturn)
call_sp do_rt_sigreturn
SYMBOL_NAME_LABEL(sys_fork)
call_sp h8300_fork
SYMBOL_NAME_LABEL(sys_vfork)
call_sp h8300_vfork
SYMBOL_NAME_LABEL(syscall_trampoline)
mov.l sp,er0
jmp @er6

View File

@ -31,6 +31,8 @@ config HEXAGON
select GENERIC_CLOCKEVENTS
select GENERIC_CLOCKEVENTS_BROADCAST
select MODULES_USE_ELF_RELA
select GENERIC_KERNEL_THREAD
select GENERIC_KERNEL_EXECVE
---help---
Qualcomm Hexagon is a processor architecture designed for high
performance and low power across a wide variety of applications.

View File

@ -34,7 +34,6 @@
struct task_struct;
/* this is defined in arch/process.c */
extern pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
extern unsigned long thread_saved_pc(struct task_struct *tsk);
extern void start_thread(struct pt_regs *, unsigned long, unsigned long);

View File

@ -25,14 +25,6 @@ typedef long (*syscall_fn)(unsigned long, unsigned long,
unsigned long, unsigned long,
unsigned long, unsigned long);
asmlinkage int sys_execve(char __user *ufilename, char __user * __user *argv,
char __user * __user *envp);
asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
unsigned long parent_tidp, unsigned long child_tidp);
#define sys_execve sys_execve
#define sys_clone sys_clone
#include <asm-generic/syscalls.h>
extern void *sys_call_table[];

View File

@ -32,4 +32,8 @@
extern int regs_query_register_offset(const char *name);
extern const char *regs_query_register_name(unsigned int offset);
#define current_pt_regs() \
((struct pt_regs *) \
((unsigned long)current_thread_info() + THREAD_SIZE) - 1)
#endif

View File

@ -27,5 +27,7 @@
*/
#define sys_mmap2 sys_mmap_pgoff
#define __ARCH_WANT_SYS_EXECVE
#define __ARCH_WANT_SYS_CLONE
#include <asm-generic/unistd.h>

View File

@ -3,8 +3,7 @@ extra-y := head.o vmlinux.lds
obj-$(CONFIG_SMP) += smp.o topology.o
obj-y += setup.o irq_cpu.o traps.o syscalltab.o signal.o time.o
obj-y += process.o syscall.o trampoline.o reset.o ptrace.o
obj-y += vdso.o
obj-y += process.o trampoline.o reset.o ptrace.o vdso.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_MODULES) += module.o hexagon_ksyms.o

View File

@ -25,33 +25,6 @@
#include <linux/uaccess.h>
#include <linux/slab.h>
/*
* Kernel thread creation. The desired kernel function is "wrapped"
* in the kernel_thread_helper function, which does cleanup
* afterwards.
*/
static void __noreturn kernel_thread_helper(void *arg, int (*fn)(void *))
{
do_exit(fn(arg));
}
int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
{
struct pt_regs regs;
memset(&regs, 0, sizeof(regs));
/*
* Yes, we're exploting illicit knowledge of the ABI here.
*/
regs.r00 = (unsigned long) arg;
regs.r01 = (unsigned long) fn;
pt_set_elr(&regs, (unsigned long)kernel_thread_helper);
pt_set_kmode(&regs);
return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
}
EXPORT_SYMBOL(kernel_thread);
/*
* Program thread launch. Often defined as a macro in processor.h,
* but we're shooting for a small footprint and it's not an inner-loop
@ -114,8 +87,7 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
* Copy architecture-specific thread state
*/
int copy_thread(unsigned long clone_flags, unsigned long usp,
unsigned long unused, struct task_struct *p,
struct pt_regs *regs)
unsigned long arg, struct task_struct *p)
{
struct thread_info *ti = task_thread_info(p);
struct hexagon_switch_stack *ss;
@ -125,61 +97,51 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
childregs = (struct pt_regs *) (((unsigned long) ti + THREAD_SIZE) -
sizeof(*childregs));
memcpy(childregs, regs, sizeof(*childregs));
ti->regs = childregs;
/*
* Establish kernel stack pointer and initial PC for new thread
* Note that unlike the usual situation, we do not copy the
* parent's callee-saved here; those are in pt_regs and whatever
* we leave here will be overridden on return to userland.
*/
ss = (struct hexagon_switch_stack *) ((unsigned long) childregs -
sizeof(*ss));
ss->lr = (unsigned long)ret_from_fork;
p->thread.switch_sp = ss;
if (unlikely(p->flags & PF_KTHREAD)) {
memset(childregs, 0, sizeof(struct pt_regs));
/* r24 <- fn, r25 <- arg */
ss->r2524 = usp | ((u64)arg << 32);
pt_set_kmode(childregs);
return 0;
}
memcpy(childregs, current_pt_regs(), sizeof(*childregs));
ss->r2524 = 0;
/* If User mode thread, set pt_reg stack pointer as per parameter */
if (user_mode(childregs)) {
if (usp)
pt_set_rte_sp(childregs, usp);
/* Child sees zero return value */
childregs->r00 = 0;
/*
* The clone syscall has the C signature:
* int [r0] clone(int flags [r0],
* void *child_frame [r1],
* void *parent_tid [r2],
* void *child_tid [r3],
* void *thread_control_block [r4]);
* ugp is used to provide TLS support.
*/
if (clone_flags & CLONE_SETTLS)
childregs->ugp = childregs->r04;
/*
* Parent sees new pid -- not necessary, not even possible at
* this point in the fork process
* Might also want to set things like ti->addr_limit
*/
} else {
/*
* If kernel thread, resume stack is kernel stack base.
* Note that this is pointer arithmetic on pt_regs *
*/
pt_set_rte_sp(childregs, (unsigned long)(childregs + 1));
/*
* We need the current thread_info fast path pointer
* set up in pt_regs. The register to be used is
* parametric for assembler code, but the mechanism
* doesn't drop neatly into C. Needs to be fixed.
*/
childregs->THREADINFO_REG = (unsigned long) ti;
}
/* Child sees zero return value */
childregs->r00 = 0;
/*
* thread_info pointer is pulled out of task_struct "stack"
* field on switch_to.
* The clone syscall has the C signature:
* int [r0] clone(int flags [r0],
* void *child_frame [r1],
* void *parent_tid [r2],
* void *child_tid [r3],
* void *thread_control_block [r4]);
* ugp is used to provide TLS support.
*/
if (clone_flags & CLONE_SETTLS)
childregs->ugp = childregs->r04;
/*
* Parent sees new pid -- not necessary, not even possible at
* this point in the fork process
* Might also want to set things like ti->addr_limit
*/
p->stack = (void *)ti;
return 0;
}

View File

@ -249,14 +249,14 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
*/
asmlinkage int sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss)
{
struct pt_regs *regs = current_thread_info()->regs;
struct pt_regs *regs = current_pt_regs();
return do_sigaltstack(uss, uoss, regs->r29);
}
asmlinkage int sys_rt_sigreturn(void)
{
struct pt_regs *regs = current_thread_info()->regs;
struct pt_regs *regs = current_pt_regs();
struct rt_sigframe __user *frame;
sigset_t blocked;

View File

@ -1,89 +0,0 @@
/*
* Hexagon system calls
*
* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/linkage.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/syscalls.h>
#include <linux/unistd.h>
#include <asm/mman.h>
#include <asm/registers.h>
/*
* System calls with architecture-specific wrappers.
* See signal.c for signal-related system call wrappers.
*/
asmlinkage int sys_execve(char __user *ufilename,
const char __user *const __user *argv,
const char __user *const __user *envp)
{
struct pt_regs *pregs = current_thread_info()->regs;
struct filename *filename;
int retval;
filename = getname(ufilename);
retval = PTR_ERR(filename);
if (IS_ERR(filename))
return retval;
retval = do_execve(filename->name, argv, envp, pregs);
putname(filename);
return retval;
}
asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
unsigned long parent_tidp, unsigned long child_tidp)
{
struct pt_regs *pregs = current_thread_info()->regs;
if (!newsp)
newsp = pregs->SP;
return do_fork(clone_flags, newsp, pregs, 0, (int __user *)parent_tidp,
(int __user *)child_tidp);
}
/*
* Do a system call from the kernel, so as to have a proper pt_regs
* and recycle the sys_execvpe infrustructure.
*/
int kernel_execve(const char *filename,
const char *const argv[], const char *const envp[])
{
register unsigned long __a0 asm("r0") = (unsigned long) filename;
register unsigned long __a1 asm("r1") = (unsigned long) argv;
register unsigned long __a2 asm("r2") = (unsigned long) envp;
int retval;
__asm__ volatile(
" R6 = #%4;\n"
" trap0(#1);\n"
" %0 = R0;\n"
: "=r" (retval)
: "r" (__a0), "r" (__a1), "r" (__a2), "i" (__NR_execve)
);
return retval;
}

View File

@ -266,4 +266,8 @@ _K_enter_machcheck:
.globl ret_from_fork
ret_from_fork:
call schedule_tail
P0 = cmp.eq(R24, #0);
if P0 jump return_from_syscall
R0 = R25;
callr R24
jump return_from_syscall

View File

@ -42,6 +42,8 @@ config IA64
select GENERIC_TIME_VSYSCALL_OLD
select HAVE_MOD_ARCH_SPECIFIC
select MODULES_USE_ELF_RELA
select GENERIC_KERNEL_THREAD
select GENERIC_KERNEL_EXECVE
default y
help
The Itanium Processor Family is Intel's 64-bit successor to

View File

@ -340,22 +340,6 @@ struct task_struct;
*/
#define release_thread(dead_task)
/*
* This is the mechanism for creating a new kernel thread.
*
* NOTE 1: Only a kernel-only process (ie the swapper or direct
* descendants who haven't done an "execve()") should use this: it
* will work within a system call from a "real" process, but the
* process memory space will not be free'd until both the parent and
* the child have exited.
*
* NOTE 2: This MUST NOT be an inlined function. Otherwise, we get
* into trouble in init/main.c when the child thread returns to
* do_basic_setup() and the timing is such that free_initmem() has
* been called already.
*/
extern pid_t kernel_thread (int (*fn)(void *), void *arg, unsigned long flags);
/* Get wait channel for task P. */
extern unsigned long get_wchan (struct task_struct *p);

View File

@ -38,7 +38,5 @@ struct k_sigaction {
# include <asm/sigcontext.h>
#define ptrace_signal_deliver(regs, cookie) do { } while (0)
# endif /* !__ASSEMBLY__ */
#endif /* _ASM_IA64_SIGNAL_H */

View File

@ -29,6 +29,7 @@
#define __ARCH_WANT_SYS_RT_SIGACTION
#define __ARCH_WANT_SYS_RT_SIGSUSPEND
#define __ARCH_WANT_SYS_EXECVE
#if !defined(__ASSEMBLY__) && !defined(ASSEMBLER)

View File

@ -61,14 +61,13 @@ ENTRY(ia64_execve)
* Allocate 8 input registers since ptrace() may clobber them
*/
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
alloc loc1=ar.pfs,8,2,4,0
alloc loc1=ar.pfs,8,2,3,0
mov loc0=rp
.body
mov out0=in0 // filename
;; // stop bit between alloc and call
mov out1=in1 // argv
mov out2=in2 // envp
add out3=16,sp // regs
br.call.sptk.many rp=sys_execve
.ret0:
cmp4.ge p6,p7=r8,r0
@ -76,7 +75,6 @@ ENTRY(ia64_execve)
sxt4 r8=r8 // return 64-bit result
;;
stf.spill [sp]=f0
(p6) cmp.ne pKStk,pUStk=r0,r0 // a successful execve() lands us in user-mode...
mov rp=loc0
(p6) mov ar.pfs=r0 // clear ar.pfs on success
(p7) br.ret.sptk.many rp
@ -118,13 +116,12 @@ GLOBAL_ENTRY(sys_clone2)
mov loc1=r16 // save ar.pfs across do_fork
.body
mov out1=in1
mov out3=in2
mov out2=in2
tbit.nz p6,p0=in0,CLONE_SETTLS_BIT
mov out4=in3 // parent_tidptr: valid only w/CLONE_PARENT_SETTID
mov out3=in3 // parent_tidptr: valid only w/CLONE_PARENT_SETTID
;;
(p6) st8 [r2]=in5 // store TLS in r16 for copy_thread()
mov out5=in4 // child_tidptr: valid only w/CLONE_CHILD_SETTID or CLONE_CHILD_CLEARTID
adds out2=IA64_SWITCH_STACK_SIZE+16,sp // out2 = &regs
mov out4=in4 // child_tidptr: valid only w/CLONE_CHILD_SETTID or CLONE_CHILD_CLEARTID
mov out0=in0 // out0 = clone_flags
br.call.sptk.many rp=do_fork
.ret1: .restore sp
@ -150,13 +147,12 @@ GLOBAL_ENTRY(sys_clone)
mov loc1=r16 // save ar.pfs across do_fork
.body
mov out1=in1
mov out3=16 // stacksize (compensates for 16-byte scratch area)
mov out2=16 // stacksize (compensates for 16-byte scratch area)
tbit.nz p6,p0=in0,CLONE_SETTLS_BIT
mov out4=in2 // parent_tidptr: valid only w/CLONE_PARENT_SETTID
mov out3=in2 // parent_tidptr: valid only w/CLONE_PARENT_SETTID
;;
(p6) st8 [r2]=in4 // store TLS in r13 (tp)
mov out5=in3 // child_tidptr: valid only w/CLONE_CHILD_SETTID or CLONE_CHILD_CLEARTID
adds out2=IA64_SWITCH_STACK_SIZE+16,sp // out2 = &regs
mov out4=in3 // child_tidptr: valid only w/CLONE_CHILD_SETTID or CLONE_CHILD_CLEARTID
mov out0=in0 // out0 = clone_flags
br.call.sptk.many rp=do_fork
.ret2: .restore sp
@ -484,19 +480,6 @@ GLOBAL_ENTRY(prefetch_stack)
br.ret.sptk.many rp
END(prefetch_stack)
GLOBAL_ENTRY(kernel_execve)
rum psr.ac
mov r15=__NR_execve // put syscall number in place
break __BREAK_SYSCALL
br.ret.sptk.many rp
END(kernel_execve)
GLOBAL_ENTRY(clone)
mov r15=__NR_clone // put syscall number in place
break __BREAK_SYSCALL
br.ret.sptk.many rp
END(clone)
/*
* Invoke a system call, but do some tracing before and after the call.
* We MUST preserve the current register frame throughout this routine
@ -600,6 +583,27 @@ GLOBAL_ENTRY(ia64_strace_leave_kernel)
.ret4: br.cond.sptk ia64_leave_kernel
END(ia64_strace_leave_kernel)
ENTRY(call_payload)
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(0)
/* call the kernel_thread payload; fn is in r4, arg - in r5 */
alloc loc1=ar.pfs,0,3,1,0
mov loc0=rp
mov loc2=gp
mov out0=r5 // arg
ld8 r14 = [r4], 8 // fn.address
;;
mov b6 = r14
ld8 gp = [r4] // fn.gp
;;
br.call.sptk.many rp=b6 // fn(arg)
.ret12: mov gp=loc2
mov rp=loc0
mov ar.pfs=loc1
/* ... and if it has returned, we are going to userland */
cmp.ne pKStk,pUStk=r0,r0
br.ret.sptk.many rp
END(call_payload)
GLOBAL_ENTRY(ia64_ret_from_clone)
PT_REGS_UNWIND_INFO(0)
{ /*
@ -616,6 +620,7 @@ GLOBAL_ENTRY(ia64_ret_from_clone)
br.call.sptk.many rp=ia64_invoke_schedule_tail
}
.ret8:
(pKStk) br.call.sptk.many rp=call_payload
adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
;;
ld4 r2=[r2]

View File

@ -1093,19 +1093,6 @@ GLOBAL_ENTRY(cycle_to_cputime)
END(cycle_to_cputime)
#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
GLOBAL_ENTRY(start_kernel_thread)
.prologue
.save rp, r0 // this is the end of the call-chain
.body
alloc r2 = ar.pfs, 0, 0, 2, 0
mov out0 = r9
mov out1 = r11;;
br.call.sptk.many rp = kernel_thread_helper;;
mov out0 = r8
br.call.sptk.many rp = sys_exit;;
1: br.sptk.few 1b // not reached
END(start_kernel_thread)
#ifdef CONFIG_IA64_BRL_EMU
/*

View File

@ -393,72 +393,24 @@ ia64_load_extra (struct task_struct *task)
int
copy_thread(unsigned long clone_flags,
unsigned long user_stack_base, unsigned long user_stack_size,
struct task_struct *p, struct pt_regs *regs)
struct task_struct *p)
{
extern char ia64_ret_from_clone;
struct switch_stack *child_stack, *stack;
unsigned long rbs, child_rbs, rbs_size;
struct pt_regs *child_ptregs;
struct pt_regs *regs = current_pt_regs();
int retval = 0;
#ifdef CONFIG_SMP
/*
* For SMP idle threads, fork_by_hand() calls do_fork with
* NULL regs.
*/
if (!regs)
return 0;
#endif
stack = ((struct switch_stack *) regs) - 1;
child_ptregs = (struct pt_regs *) ((unsigned long) p + IA64_STK_OFFSET) - 1;
child_stack = (struct switch_stack *) child_ptregs - 1;
/* copy parent's switch_stack & pt_regs to child: */
memcpy(child_stack, stack, sizeof(*child_ptregs) + sizeof(*child_stack));
rbs = (unsigned long) current + IA64_RBS_OFFSET;
child_rbs = (unsigned long) p + IA64_RBS_OFFSET;
rbs_size = stack->ar_bspstore - rbs;
/* copy the parent's register backing store to the child: */
memcpy((void *) child_rbs, (void *) rbs, rbs_size);
if (likely(user_mode(child_ptregs))) {
if (clone_flags & CLONE_SETTLS)
child_ptregs->r13 = regs->r16; /* see sys_clone2() in entry.S */
if (user_stack_base) {
child_ptregs->r12 = user_stack_base + user_stack_size - 16;
child_ptregs->ar_bspstore = user_stack_base;
child_ptregs->ar_rnat = 0;
child_ptregs->loadrs = 0;
}
} else {
/*
* Note: we simply preserve the relative position of
* the stack pointer here. There is no need to
* allocate a scratch area here, since that will have
* been taken care of by the caller of sys_clone()
* already.
*/
child_ptregs->r12 = (unsigned long) child_ptregs - 16; /* kernel sp */
child_ptregs->r13 = (unsigned long) p; /* set `current' pointer */
}
child_stack->ar_bspstore = child_rbs + rbs_size;
child_stack->b0 = (unsigned long) &ia64_ret_from_clone;
/* copy parts of thread_struct: */
p->thread.ksp = (unsigned long) child_stack - 16;
/* stop some PSR bits from being inherited.
* the psr.up/psr.pp bits must be cleared on fork but inherited on execve()
* therefore we must specify them explicitly here and not include them in
* IA64_PSR_BITS_TO_CLEAR.
*/
child_ptregs->cr_ipsr = ((child_ptregs->cr_ipsr | IA64_PSR_BITS_TO_SET)
& ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_PP | IA64_PSR_UP));
/*
* NOTE: The calling convention considers all floating point
* registers in the high partition (fph) to be scratch. Since
@ -480,8 +432,66 @@ copy_thread(unsigned long clone_flags,
# define THREAD_FLAGS_TO_SET 0
p->thread.flags = ((current->thread.flags & ~THREAD_FLAGS_TO_CLEAR)
| THREAD_FLAGS_TO_SET);
ia64_drop_fpu(p); /* don't pick up stale state from a CPU's fph */
if (unlikely(p->flags & PF_KTHREAD)) {
if (unlikely(!user_stack_base)) {
/* fork_idle() called us */
return 0;
}
memset(child_stack, 0, sizeof(*child_ptregs) + sizeof(*child_stack));
child_stack->r4 = user_stack_base; /* payload */
child_stack->r5 = user_stack_size; /* argument */
/*
* Preserve PSR bits, except for bits 32-34 and 37-45,
* which we can't read.
*/
child_ptregs->cr_ipsr = ia64_getreg(_IA64_REG_PSR) | IA64_PSR_BN;
/* mark as valid, empty frame */
child_ptregs->cr_ifs = 1UL << 63;
child_stack->ar_fpsr = child_ptregs->ar_fpsr
= ia64_getreg(_IA64_REG_AR_FPSR);
child_stack->pr = (1 << PRED_KERNEL_STACK);
child_stack->ar_bspstore = child_rbs;
child_stack->b0 = (unsigned long) &ia64_ret_from_clone;
/* stop some PSR bits from being inherited.
* the psr.up/psr.pp bits must be cleared on fork but inherited on execve()
* therefore we must specify them explicitly here and not include them in
* IA64_PSR_BITS_TO_CLEAR.
*/
child_ptregs->cr_ipsr = ((child_ptregs->cr_ipsr | IA64_PSR_BITS_TO_SET)
& ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_PP | IA64_PSR_UP));
return 0;
}
stack = ((struct switch_stack *) regs) - 1;
/* copy parent's switch_stack & pt_regs to child: */
memcpy(child_stack, stack, sizeof(*child_ptregs) + sizeof(*child_stack));
/* copy the parent's register backing store to the child: */
rbs_size = stack->ar_bspstore - rbs;
memcpy((void *) child_rbs, (void *) rbs, rbs_size);
if (clone_flags & CLONE_SETTLS)
child_ptregs->r13 = regs->r16; /* see sys_clone2() in entry.S */
if (user_stack_base) {
child_ptregs->r12 = user_stack_base + user_stack_size - 16;
child_ptregs->ar_bspstore = user_stack_base;
child_ptregs->ar_rnat = 0;
child_ptregs->loadrs = 0;
}
child_stack->ar_bspstore = child_rbs + rbs_size;
child_stack->b0 = (unsigned long) &ia64_ret_from_clone;
/* stop some PSR bits from being inherited.
* the psr.up/psr.pp bits must be cleared on fork but inherited on execve()
* therefore we must specify them explicitly here and not include them in
* IA64_PSR_BITS_TO_CLEAR.
*/
child_ptregs->cr_ipsr = ((child_ptregs->cr_ipsr | IA64_PSR_BITS_TO_SET)
& ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_PP | IA64_PSR_UP));
#ifdef CONFIG_PERFMON
if (current->thread.pfm_context)
pfm_inherit(p, child_ptregs);
@ -608,57 +618,6 @@ dump_fpu (struct pt_regs *pt, elf_fpregset_t dst)
return 1; /* f0-f31 are always valid so we always return 1 */
}
long
sys_execve (const char __user *filename,
const char __user *const __user *argv,
const char __user *const __user *envp,
struct pt_regs *regs)
{
struct filename *fname;
int error;
fname = getname(filename);
error = PTR_ERR(fname);
if (IS_ERR(fname))
goto out;
error = do_execve(fname->name, argv, envp, regs);
putname(fname);
out:
return error;
}
pid_t
kernel_thread (int (*fn)(void *), void *arg, unsigned long flags)
{
extern void start_kernel_thread (void);
unsigned long *helper_fptr = (unsigned long *) &start_kernel_thread;
struct {
struct switch_stack sw;
struct pt_regs pt;
} regs;
memset(&regs, 0, sizeof(regs));
regs.pt.cr_iip = helper_fptr[0]; /* set entry point (IP) */
regs.pt.r1 = helper_fptr[1]; /* set GP */
regs.pt.r9 = (unsigned long) fn; /* 1st argument */
regs.pt.r11 = (unsigned long) arg; /* 2nd argument */
/* Preserve PSR bits, except for bits 32-34 and 37-45, which we can't read. */
regs.pt.cr_ipsr = ia64_getreg(_IA64_REG_PSR) | IA64_PSR_BN;
regs.pt.cr_ifs = 1UL << 63; /* mark as valid, empty frame */
regs.sw.ar_fpsr = regs.pt.ar_fpsr = ia64_getreg(_IA64_REG_AR_FPSR);
regs.sw.ar_bspstore = (unsigned long) current + IA64_RBS_OFFSET;
regs.sw.pr = (1 << PRED_KERNEL_STACK);
return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs.pt, 0, NULL, NULL);
}
EXPORT_SYMBOL(kernel_thread);
/* This gets called from kernel_thread() via ia64_invoke_thread_helper(). */
int
kernel_thread_helper (int (*fn)(void *), void *arg)
{
return (*fn)(arg);
}
/*
* Flush thread state. This is called when a thread does an execve().
*/

View File

@ -460,11 +460,6 @@ start_secondary (void *unused)
return 0;
}
struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
{
return NULL;
}
static int __cpuinit
do_boot_cpu (int sapicid, int cpu, struct task_struct *idle)
{

View File

@ -15,6 +15,8 @@ config M32R
select GENERIC_ATOMIC64
select ARCH_USES_GETTIMEOFFSET
select MODULES_USE_ELF_RELA
select GENERIC_KERNEL_THREAD
select GENERIC_KERNEL_EXECVE
config SBUS
bool

View File

@ -118,11 +118,6 @@ struct mm_struct;
/* Free all resources held by a thread. */
extern void release_thread(struct task_struct *);
/*
* create a kernel thread without removing it from tasklists
*/
extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
/* Copy and release all segment info associated with a VM */
extern void copy_segments(struct task_struct *p, struct mm_struct * mm);
extern void release_segments(struct mm_struct * mm);

View File

@ -139,6 +139,8 @@ extern void withdraw_debug_trap(struct pt_regs *regs);
#define task_pt_regs(task) \
((struct pt_regs *)(task_stack_page(task) + THREAD_SIZE) - 1)
#define current_pt_regs() ((struct pt_regs *) \
((unsigned long)current_thread_info() + THREAD_SIZE) - 1)
#endif /* __KERNEL */

View File

@ -149,10 +149,6 @@ typedef struct sigaltstack {
#undef __HAVE_ARCH_SIG_BITOPS
struct pt_regs;
#define ptrace_signal_deliver(regs, cookie) do { } while (0)
#endif /* __KERNEL__ */
#endif /* _ASM_M32R_SIGNAL_H */

View File

@ -352,6 +352,10 @@
#define __ARCH_WANT_SYS_OLDUMOUNT
#define __ARCH_WANT_SYS_RT_SIGACTION
#define __ARCH_WANT_SYS_RT_SIGSUSPEND
#define __ARCH_WANT_SYS_EXECVE
#define __ARCH_WANT_SYS_CLONE
#define __ARCH_WANT_SYS_FORK
#define __ARCH_WANT_SYS_VFORK
#define __IGNORE_lchown
#define __IGNORE_setuid

View File

@ -125,6 +125,15 @@
and \reg, sp
.endm
ENTRY(ret_from_kernel_thread)
pop r0
bl schedule_tail
GET_THREAD_INFO(r8)
ld r0, R0(r8)
ld r1, R1(r8)
jl r1
bra syscall_exit
ENTRY(ret_from_fork)
pop r0
bl schedule_tail

View File

@ -21,7 +21,6 @@ EXPORT_SYMBOL(boot_cpu_data);
EXPORT_SYMBOL(dump_fpu);
EXPORT_SYMBOL(__ioremap);
EXPORT_SYMBOL(iounmap);
EXPORT_SYMBOL(kernel_thread);
EXPORT_SYMBOL(strncpy_from_user);
EXPORT_SYMBOL(__strncpy_from_user);

View File

@ -164,41 +164,6 @@ void show_regs(struct pt_regs * regs)
#endif
}
/*
* Create a kernel thread
*/
/*
* This is the mechanism for creating a new kernel thread.
*
* NOTE! Only a kernel-only process(ie the swapper or direct descendants
* who haven't done an "execve()") should use this: it will work within
* a system call from a "real" process, but the process memory space will
* not be free'd until both the parent and the child have exited.
*/
static void kernel_thread_helper(void *nouse, int (*fn)(void *), void *arg)
{
fn(arg);
do_exit(-1);
}
int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
{
struct pt_regs regs;
memset(&regs, 0, sizeof (regs));
regs.r1 = (unsigned long)fn;
regs.r2 = (unsigned long)arg;
regs.bpc = (unsigned long)kernel_thread_helper;
regs.psw = M32R_PSW_BIE;
/* Ok, create the new process. */
return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL,
NULL);
}
/*
* Free current thread data structures etc..
*/
@ -227,88 +192,31 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
}
int copy_thread(unsigned long clone_flags, unsigned long spu,
unsigned long unused, struct task_struct *tsk, struct pt_regs *regs)
unsigned long arg, struct task_struct *tsk)
{
struct pt_regs *childregs = task_pt_regs(tsk);
extern void ret_from_fork(void);
extern void ret_from_kernel_thread(void);
/* Copy registers */
*childregs = *regs;
childregs->spu = spu;
childregs->r0 = 0; /* Child gets zero as return value */
regs->r0 = tsk->pid;
if (unlikely(tsk->flags & PF_KTHREAD)) {
memset(childregs, 0, sizeof(struct pt_regs));
childregs->psw = M32R_PSW_BIE;
childregs->r1 = spu; /* fn */
childregs->r0 = arg;
tsk->thread.lr = (unsigned long)ret_from_kernel_thread;
} else {
/* Copy registers */
*childregs = *current_pt_regs();
if (spu)
childregs->spu = spu;
childregs->r0 = 0; /* Child gets zero as return value */
tsk->thread.lr = (unsigned long)ret_from_fork;
}
tsk->thread.sp = (unsigned long)childregs;
tsk->thread.lr = (unsigned long)ret_from_fork;
return 0;
}
asmlinkage int sys_fork(unsigned long r0, unsigned long r1, unsigned long r2,
unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6,
struct pt_regs regs)
{
#ifdef CONFIG_MMU
return do_fork(SIGCHLD, regs.spu, &regs, 0, NULL, NULL);
#else
return -EINVAL;
#endif /* CONFIG_MMU */
}
asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
unsigned long parent_tidptr,
unsigned long child_tidptr,
unsigned long r4, unsigned long r5, unsigned long r6,
struct pt_regs regs)
{
if (!newsp)
newsp = regs.spu;
return do_fork(clone_flags, newsp, &regs, 0,
(int __user *)parent_tidptr, (int __user *)child_tidptr);
}
/*
* This is trivial, and on the face of it looks like it
* could equally well be done in user mode.
*
* Not so, for quite unobvious reasons - register pressure.
* In user mode vfork() cannot have a stack frame, and if
* done by calling the "clone()" system call directly, you
* do not have enough call-clobbered registers to hold all
* the information you need.
*/
asmlinkage int sys_vfork(unsigned long r0, unsigned long r1, unsigned long r2,
unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6,
struct pt_regs regs)
{
return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.spu, &regs, 0,
NULL, NULL);
}
/*
* sys_execve() executes a new program.
*/
asmlinkage int sys_execve(const char __user *ufilename,
const char __user *const __user *uargv,
const char __user *const __user *uenvp,
unsigned long r3, unsigned long r4, unsigned long r5,
unsigned long r6, struct pt_regs regs)
{
int error;
struct filename *filename;
filename = getname(ufilename);
error = PTR_ERR(filename);
if (IS_ERR(filename))
goto out;
error = do_execve(filename->name, uargv, uenvp, &regs);
putname(filename);
out:
return error;
}
/*
* These bracket the sleeping functions..
*/

View File

@ -88,24 +88,3 @@ asmlinkage int sys_cachectl(char *addr, int nbytes, int op)
/* Not implemented yet. */
return -ENOSYS;
}
/*
* Do a system call from kernel instead of calling sys_execve so we
* end up with proper pt_regs.
*/
int kernel_execve(const char *filename,
const char *const argv[],
const char *const envp[])
{
register long __scno __asm__ ("r7") = __NR_execve;
register long __arg3 __asm__ ("r2") = (long)(envp);
register long __arg2 __asm__ ("r1") = (long)(argv);
register long __res __asm__ ("r0") = (long)(filename);
__asm__ __volatile__ (
"trap #" SYSCALL_VECTOR "|| nop"
: "=r" (__res)
: "r" (__scno), "0" (__res), "r" (__arg2),
"r" (__arg3)
: "memory");
return __res;
}

View File

@ -16,6 +16,7 @@ config M68K
select ARCH_WANT_IPC_PARSE_VERSION
select ARCH_USES_GETTIMEOFFSET if MMU && !COLDFIRE
select GENERIC_KERNEL_THREAD
select GENERIC_KERNEL_EXECVE
select HAVE_MOD_ARCH_SPECIFIC
select MODULES_USE_ELF_REL
select MODULES_USE_ELF_RELA

View File

@ -86,11 +86,9 @@ static inline int sigfindinword(unsigned long word)
#endif /* !CONFIG_CPU_HAS_NO_BITFIELDS */
#ifdef __uClinux__
#define ptrace_signal_deliver(regs, cookie) do { } while (0)
#else
struct pt_regs;
extern void ptrace_signal_deliver(struct pt_regs *regs, void *cookie);
#ifndef __uClinux__
extern void ptrace_signal_deliver(void);
#define ptrace_signal_deliver ptrace_signal_deliver
#endif /* __uClinux__ */
#endif /* _M68K_SIGNAL_H */

View File

@ -32,7 +32,8 @@
#define __ARCH_WANT_SYS_RT_SIGACTION
#define __ARCH_WANT_SYS_RT_SIGSUSPEND
#define __ARCH_WANT_SYS_EXECVE
#define __ARCH_WANT_KERNEL_EXECVE
#define __ARCH_WANT_SYS_FORK
#define __ARCH_WANT_SYS_VFORK
/*
* "Conditional" syscalls

View File

@ -44,34 +44,29 @@
.globl system_call, buserr, trap, resume
.globl sys_call_table
.globl sys_fork, sys_clone, sys_vfork
.globl __sys_fork, __sys_clone, __sys_vfork
.globl ret_from_interrupt, bad_interrupt
.globl auto_irqhandler_fixup
.globl user_irqvec_fixup
.text
ENTRY(sys_fork)
ENTRY(__sys_fork)
SAVE_SWITCH_STACK
pea %sp@(SWITCH_STACK_SIZE)
jbsr m68k_fork
addql #4,%sp
RESTORE_SWITCH_STACK
jbsr sys_fork
lea %sp@(24),%sp
rts
ENTRY(sys_clone)
ENTRY(__sys_clone)
SAVE_SWITCH_STACK
pea %sp@(SWITCH_STACK_SIZE)
jbsr m68k_clone
addql #4,%sp
RESTORE_SWITCH_STACK
lea %sp@(28),%sp
rts
ENTRY(sys_vfork)
ENTRY(__sys_vfork)
SAVE_SWITCH_STACK
pea %sp@(SWITCH_STACK_SIZE)
jbsr m68k_vfork
addql #4,%sp
RESTORE_SWITCH_STACK
jbsr sys_vfork
lea %sp@(24),%sp
rts
ENTRY(sys_sigreturn)
@ -115,16 +110,9 @@ ENTRY(ret_from_kernel_thread)
| a3 contains the kernel thread payload, d7 - its argument
movel %d1,%sp@-
jsr schedule_tail
GET_CURRENT(%d0)
movel %d7,(%sp)
jsr %a3@
addql #4,%sp
movel %d0,(%sp)
jra sys_exit
ENTRY(ret_from_kernel_execve)
movel 4(%sp), %sp
GET_CURRENT(%d0)
jra ret_from_exception
#if defined(CONFIG_COLDFIRE) || !defined(CONFIG_MMU)

View File

@ -136,57 +136,35 @@ void flush_thread(void)
}
/*
* "m68k_fork()".. By the time we get here, the
* non-volatile registers have also been saved on the
* stack. We do some ugly pointer stuff here.. (see
* also copy_thread)
* Why not generic sys_clone, you ask? m68k passes all arguments on stack.
* And we need all registers saved, which means a bunch of stuff pushed
* on top of pt_regs, which means that sys_clone() arguments would be
* buried. We could, of course, copy them, but it's too costly for no
* good reason - generic clone() would have to copy them *again* for
* do_fork() anyway. So in this case it's actually better to pass pt_regs *
* and extract arguments for do_fork() from there. Eventually we might
* go for calling do_fork() directly from the wrapper, but only after we
* are finished with do_fork() prototype conversion.
*/
asmlinkage int m68k_fork(struct pt_regs *regs)
{
#ifdef CONFIG_MMU
return do_fork(SIGCHLD, rdusp(), regs, 0, NULL, NULL);
#else
return -EINVAL;
#endif
}
asmlinkage int m68k_vfork(struct pt_regs *regs)
{
return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, rdusp(), regs, 0,
NULL, NULL);
}
asmlinkage int m68k_clone(struct pt_regs *regs)
{
unsigned long clone_flags;
unsigned long newsp;
int __user *parent_tidptr, *child_tidptr;
/* syscall2 puts clone_flags in d1 and usp in d2 */
clone_flags = regs->d1;
newsp = regs->d2;
parent_tidptr = (int __user *)regs->d3;
child_tidptr = (int __user *)regs->d4;
if (!newsp)
newsp = rdusp();
return do_fork(clone_flags, newsp, regs, 0,
parent_tidptr, child_tidptr);
/* regs will be equal to current_pt_regs() */
return do_fork(regs->d1, regs->d2, 0,
(int __user *)regs->d3, (int __user *)regs->d4);
}
int copy_thread(unsigned long clone_flags, unsigned long usp,
unsigned long arg,
struct task_struct * p, struct pt_regs * regs)
unsigned long arg, struct task_struct *p)
{
struct pt_regs * childregs;
struct switch_stack *childstack;
struct fork_frame {
struct switch_stack sw;
struct pt_regs regs;
} *frame;
childregs = (struct pt_regs *) (task_stack_page(p) + THREAD_SIZE) - 1;
childstack = ((struct switch_stack *) childregs) - 1;
frame = (struct fork_frame *) (task_stack_page(p) + THREAD_SIZE) - 1;
p->thread.usp = usp;
p->thread.ksp = (unsigned long)childstack;
p->thread.esp0 = (unsigned long)childregs;
p->thread.ksp = (unsigned long)frame;
p->thread.esp0 = (unsigned long)&frame->regs;
/*
* Must save the current SFC/DFC value, NOT the value when
@ -194,25 +172,24 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
*/
p->thread.fs = get_fs().seg;
if (unlikely(!regs)) {
if (unlikely(p->flags & PF_KTHREAD)) {
/* kernel thread */
memset(childstack, 0,
sizeof(struct switch_stack) + sizeof(struct pt_regs));
childregs->sr = PS_S;
childstack->a3 = usp; /* function */
childstack->d7 = arg;
childstack->retpc = (unsigned long)ret_from_kernel_thread;
memset(frame, 0, sizeof(struct fork_frame));
frame->regs.sr = PS_S;
frame->sw.a3 = usp; /* function */
frame->sw.d7 = arg;
frame->sw.retpc = (unsigned long)ret_from_kernel_thread;
p->thread.usp = 0;
return 0;
}
*childregs = *regs;
childregs->d0 = 0;
*childstack = ((struct switch_stack *) regs)[-1];
childstack->retpc = (unsigned long)ret_from_fork;
memcpy(frame, container_of(current_pt_regs(), struct fork_frame, regs),
sizeof(struct fork_frame));
frame->regs.d0 = 0;
frame->sw.retpc = (unsigned long)ret_from_fork;
p->thread.usp = usp ?: rdusp();
if (clone_flags & CLONE_SETTLS)
task_thread_info(p)->tp_value = regs->d5;
task_thread_info(p)->tp_value = frame->regs.d5;
#ifdef CONFIG_FPU
if (!FPU_IS_EMU) {

View File

@ -108,8 +108,9 @@ int handle_kernel_fault(struct pt_regs *regs)
return 1;
}
void ptrace_signal_deliver(struct pt_regs *regs, void *cookie)
void ptrace_signal_deliver(void)
{
struct pt_regs *regs = signal_pt_regs();
if (regs->orig_d0 < 0)
return;
switch (regs->d0) {

View File

@ -22,7 +22,7 @@ ALIGN
ENTRY(sys_call_table)
.long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
.long sys_exit
.long sys_fork
.long __sys_fork
.long sys_read
.long sys_write
.long sys_open /* 5 */
@ -140,7 +140,7 @@ ENTRY(sys_call_table)
.long sys_ipc
.long sys_fsync
.long sys_sigreturn
.long sys_clone /* 120 */
.long __sys_clone /* 120 */
.long sys_setdomainname
.long sys_newuname
.long sys_cacheflush /* modify_ldt for i386 */
@ -210,7 +210,7 @@ ENTRY(sys_call_table)
.long sys_sendfile
.long sys_ni_syscall /* streams1 */
.long sys_ni_syscall /* streams2 */
.long sys_vfork /* 190 */
.long __sys_vfork /* 190 */
.long sys_getrlimit
.long sys_mmap2
.long sys_truncate64

Some files were not shown because too many files have changed in this diff Show More