From 567bd98017d9c9f2ac1c148ddc78c062e8abd398 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 17 Dec 2005 15:25:42 +0000 Subject: [PATCH 1/2] [ARM] Fix sys_sendto and sys_recvfrom 6-arg syscalls Rather than providing more wrappers for 6-arg syscalls, arrange for them to be supported as standard. This just means that we always store the 6th argument on the stack, rather than in the wrappers. This means we eliminate the wrappers for: * sys_futex * sys_arm_fadvise64_64 * sys_mbind * sys_ipc Signed-off-by: Russell King --- arch/arm/kernel/calls.S | 8 ++++---- arch/arm/kernel/entry-common.S | 20 ++------------------ 2 files changed, 6 insertions(+), 22 deletions(-) diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S index 2ad4aa2a1536..55076a75e5bf 100644 --- a/arch/arm/kernel/calls.S +++ b/arch/arm/kernel/calls.S @@ -131,7 +131,7 @@ __syscall_start: .long sys_wait4 /* 115 */ .long sys_swapoff .long sys_sysinfo - .long sys_ipc_wrapper + .long sys_ipc .long sys_fsync .long sys_sigreturn_wrapper /* 120 */ .long sys_clone_wrapper @@ -254,7 +254,7 @@ __syscall_start: .long sys_fremovexattr .long sys_tkill .long sys_sendfile64 -/* 240 */ .long sys_futex_wrapper +/* 240 */ .long sys_futex .long sys_sched_setaffinity .long sys_sched_getaffinity .long sys_io_setup @@ -284,7 +284,7 @@ __syscall_start: .long sys_fstatfs64 .long sys_tgkill .long sys_utimes -/* 270 */ .long sys_arm_fadvise64_64_wrapper +/* 270 */ .long sys_arm_fadvise64_64 .long sys_pciconfig_iobase .long sys_pciconfig_read .long sys_pciconfig_write @@ -333,7 +333,7 @@ __syscall_start: .long sys_inotify_init .long sys_inotify_add_watch .long sys_inotify_rm_watch - .long sys_mbind_wrapper + .long sys_mbind /* 320 */ .long sys_get_mempolicy .long sys_set_mempolicy __syscall_end: diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index f7f183075237..e2b42997ad33 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -145,7 +145,7 @@ ENTRY(vector_swi) #endif enable_irq - str r4, [sp, #-S_OFF]! @ push fifth arg + stmdb sp!, {r4, r5} @ push fifth and sixth args get_thread_info tsk ldr ip, [tsk, #TI_FLAGS] @ check for syscall tracing @@ -204,7 +204,7 @@ ENTRY(sys_call_table) * Special system call wrappers */ @ r0 = syscall number -@ r5 = syscall table +@ r8 = syscall table .type sys_syscall, #function sys_syscall: eor scno, r0, #__NR_SYSCALL_BASE @@ -255,22 +255,6 @@ sys_sigaltstack_wrapper: ldr r2, [sp, #S_OFF + S_SP] b do_sigaltstack -sys_futex_wrapper: - str r5, [sp, #4] @ push sixth arg - b sys_futex - -sys_arm_fadvise64_64_wrapper: - str r5, [sp, #4] @ push r5 to stack - b sys_arm_fadvise64_64 - -sys_mbind_wrapper: - str r5, [sp, #4] - b sys_mbind - -sys_ipc_wrapper: - str r5, [sp, #4] @ push sixth arg - b sys_ipc - /* * Note: off_4k (r5) is always units of 4K. If we can't do the requested * offset, we return EINVAL. From 7c612bfd4ed3064fd48a4877a114c8186547367b Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Mon, 19 Dec 2005 22:20:51 +0000 Subject: [PATCH 2/2] [ARM] 3210/1: add missing memory barrier helper for NPTL support Patch from Nicolas Pitre Strictly speaking, the NPTL kernel helpers are required for pre ARMv6 only. They are available on ARMv6+ as well for obvious compatibility reasons. However there are cases where extra memory barriers are needed when using an SMP ARMv6 machine but not on pre-ARMv6. This patch adds a memory barrier kernel helper that glibc can use as needed for pre-ARMv6 binaries to be forward compatible with an SMP kernel on ARMv6, as well as the necessary dmb instructions to the cmpxchg helper. Signed-off-by: Nicolas Pitre Acked-by: Daniel Jacobowitz Signed-off-by: Russell King --- arch/arm/kernel/entry-armv.S | 49 ++++++++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index d9fb819bf7cc..2a8d27e18fa7 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -611,6 +611,47 @@ ENTRY(__switch_to) .globl __kuser_helper_start __kuser_helper_start: +/* + * Reference prototype: + * + * void __kernel_memory_barrier(void) + * + * Input: + * + * lr = return address + * + * Output: + * + * none + * + * Clobbered: + * + * the Z flag might be lost + * + * Definition and user space usage example: + * + * typedef void (__kernel_dmb_t)(void); + * #define __kernel_dmb (*(__kernel_dmb_t *)0xffff0fa0) + * + * Apply any needed memory barrier to preserve consistency with data modified + * manually and __kuser_cmpxchg usage. + * + * This could be used as follows: + * + * #define __kernel_dmb() \ + * asm volatile ( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #95" \ + * : : : "lr","cc" ) + */ + +__kuser_memory_barrier: @ 0xffff0fa0 + +#if __LINUX_ARM_ARCH__ >= 6 && defined(CONFIG_SMP) + mcr p15, 0, r0, c7, c10, 5 @ dmb +#endif + mov pc, lr + + .align 5 + /* * Reference prototype: * @@ -642,6 +683,8 @@ __kuser_helper_start: * The C flag is also set if *ptr was changed to allow for assembly * optimization in the calling code. * + * Note: this routine already includes memory barriers as needed. + * * For example, a user space atomic_add implementation could look like this: * * #define atomic_add(ptr, val) \ @@ -698,10 +741,16 @@ __kuser_cmpxchg: @ 0xffff0fc0 #else +#ifdef CONFIG_SMP + mcr p15, 0, r0, c7, c10, 5 @ dmb +#endif ldrex r3, [r2] subs r3, r3, r0 strexeq r3, r1, [r2] rsbs r0, r3, #0 +#ifdef CONFIG_SMP + mcr p15, 0, r0, c7, c10, 5 @ dmb +#endif mov pc, lr #endif