1
0
Fork 0

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc

Pull sparc changes from David S. Miller:
 "This has the generic strncpy_from_user() implementation architectures
  can now use, which we've been developing on linux-arch over the past
  few days.

  For good measure I ran both a 32-bit and a 64-bit glibc testsuite run,
  and the latter of which pointed out an adjustment I needed to make to
  sparc's user_addr_max() definition.  Linus, you were right, STACK_TOP
  was not the right thing to use, even on sparc itself :-)

  From Sam Ravnborg, we have a conversion of sparc32 over to the common
  alloc_thread_info_node(), since the aspect which originally blocked
  our doing so (sun4c) has been removed."

Fix up trivial arch/sparc/Kconfig and lib/Makefile conflicts.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc:
  sparc: Fix user_addr_max() definition.
  lib: Sparc's strncpy_from_user is generic enough, move under lib/
  kernel: Move REPEAT_BYTE definition into linux/kernel.h
  sparc: Increase portability of strncpy_from_user() implementation.
  sparc: Optimize strncpy_from_user() zero byte search.
  sparc: Add full proper error handling to strncpy_from_user().
  sparc32: use the common implementation of alloc_thread_info_node()
hifive-unleashed-5.1
Linus Torvalds 2012-05-24 15:10:28 -07:00
commit ce004178be
18 changed files with 170 additions and 238 deletions

View File

@ -34,12 +34,12 @@ config SPARC
select GENERIC_SMP_IDLE_THREAD
select GENERIC_CMOS_UPDATE
select GENERIC_CLOCKEVENTS
select GENERIC_STRNCPY_FROM_USER
config SPARC32
def_bool !64BIT
select GENERIC_ATOMIC64
select CLZ_TAB
select ARCH_THREAD_INFO_ALLOCATOR
select ARCH_USES_GETTIMEOFFSET
config SPARC64

View File

@ -42,7 +42,9 @@
#define TASK_SIZE_OF(tsk) \
(test_tsk_thread_flag(tsk,TIF_32BIT) ? \
(1UL << 32UL) : ((unsigned long)-VPTE_SIZE))
#define TASK_SIZE TASK_SIZE_OF(current)
#define TASK_SIZE \
(test_thread_flag(TIF_32BIT) ? \
(1UL << 32UL) : ((unsigned long)-VPTE_SIZE))
#ifdef __KERNEL__
#define STACK_TOP32 ((1UL << 32UL) - PAGE_SIZE)

View File

@ -77,18 +77,11 @@ register struct thread_info *current_thread_info_reg asm("g6");
/*
* thread information allocation
*/
#define THREAD_INFO_ORDER 1
struct thread_info * alloc_thread_info_node(struct task_struct *tsk, int node);
void free_thread_info(struct thread_info *);
#define THREAD_SIZE_ORDER 1
#endif /* __ASSEMBLY__ */
/*
* Size of kernel stack for each process.
* Observe the order of get_free_pages() in alloc_thread_info_node().
* The sun4 has 8K stack too, because it's short on memory, and 16K is a waste.
*/
/* Size of kernel stack for each process */
#define THREAD_SIZE (2 * PAGE_SIZE)
/*

View File

@ -5,4 +5,10 @@
#else
#include <asm/uaccess_32.h>
#endif
#define user_addr_max() \
(segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL)
extern long strncpy_from_user(char *dest, const char __user *src, long count);
#endif

View File

@ -304,16 +304,6 @@ static inline unsigned long clear_user(void __user *addr, unsigned long n)
return n;
}
extern long __strncpy_from_user(char *dest, const char __user *src, long count);
static inline long strncpy_from_user(char *dest, const char __user *src, long count)
{
if (__access_ok((unsigned long) src, count))
return __strncpy_from_user(dest, src, count);
else
return -EFAULT;
}
extern long __strlen_user(const char __user *);
extern long __strnlen_user(const char __user *, long len);

View File

@ -257,10 +257,6 @@ extern unsigned long __must_check __clear_user(void __user *, unsigned long);
#define clear_user __clear_user
extern long __must_check __strncpy_from_user(char *dest, const char __user *src, long count);
#define strncpy_from_user __strncpy_from_user
extern long __strlen_user(const char __user *);
extern long __strnlen_user(const char __user *, long len);

View File

@ -10,7 +10,7 @@ lib-y += strlen.o
lib-y += checksum_$(BITS).o
lib-$(CONFIG_SPARC32) += blockops.o
lib-y += memscan_$(BITS).o memcmp.o strncmp_$(BITS).o
lib-y += strncpy_from_user_$(BITS).o strlen_user_$(BITS).o
lib-y += strlen_user_$(BITS).o
lib-$(CONFIG_SPARC32) += divdi3.o udivdi3.o
lib-$(CONFIG_SPARC32) += copy_user.o locks.o
lib-$(CONFIG_SPARC64) += atomic_64.o

View File

@ -33,9 +33,6 @@ EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(__bzero);
/* Moving data to/from/in userspace. */
EXPORT_SYMBOL(__strncpy_from_user);
/* Networking helper routines. */
EXPORT_SYMBOL(csum_partial);

View File

@ -1,47 +0,0 @@
/* strncpy_from_user.S: Sparc strncpy from userspace.
*
* Copyright(C) 1996 David S. Miller
*/
#include <linux/linkage.h>
#include <asm/ptrace.h>
#include <asm/errno.h>
.text
/* Must return:
*
* -EFAULT for an exception
* count if we hit the buffer limit
* bytes copied if we hit a null byte
*/
ENTRY(__strncpy_from_user)
/* %o0=dest, %o1=src, %o2=count */
mov %o2, %o3
1:
subcc %o2, 1, %o2
bneg 2f
nop
10:
ldub [%o1], %o4
add %o0, 1, %o0
cmp %o4, 0
add %o1, 1, %o1
bne 1b
stb %o4, [%o0 - 1]
2:
add %o2, 1, %o0
retl
sub %o3, %o0, %o0
ENDPROC(__strncpy_from_user)
.section .fixup,#alloc,#execinstr
.align 4
4:
retl
mov -EFAULT, %o0
.section __ex_table,#alloc
.align 4
.word 10b, 4b

View File

@ -1,133 +0,0 @@
/*
* strncpy_from_user.S: Sparc64 strncpy from userspace.
*
* Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
*/
#include <linux/linkage.h>
#include <asm/asi.h>
#include <asm/errno.h>
.data
.align 8
0: .xword 0x0101010101010101
.text
/* Must return:
*
* -EFAULT for an exception
* count if we hit the buffer limit
* bytes copied if we hit a null byte
* (without the null byte)
*
* This implementation assumes:
* %o1 is 8 aligned => !(%o2 & 7)
* %o0 is 8 aligned (if not, it will be slooooow, but will work)
*
* This is optimized for the common case:
* in my stats, 90% of src are 8 aligned (even on sparc32)
* and average length is 18 or so.
*/
ENTRY(__strncpy_from_user)
/* %o0=dest, %o1=src, %o2=count */
andcc %o1, 7, %g0 ! IEU1 Group
bne,pn %icc, 30f ! CTI
add %o0, %o2, %g3 ! IEU0
60: ldxa [%o1] %asi, %g1 ! Load Group
brlez,pn %o2, 10f ! CTI
mov %o0, %o3 ! IEU0
50: sethi %hi(0b), %o4 ! IEU0 Group
ldx [%o4 + %lo(0b)], %o4 ! Load
sllx %o4, 7, %o5 ! IEU1 Group
1: sub %g1, %o4, %g2 ! IEU0 Group
stx %g1, [%o0] ! Store
add %o0, 8, %o0 ! IEU1
andcc %g2, %o5, %g0 ! IEU1 Group
bne,pn %xcc, 5f ! CTI
add %o1, 8, %o1 ! IEU0
cmp %o0, %g3 ! IEU1 Group
bl,a,pt %xcc, 1b ! CTI
61: ldxa [%o1] %asi, %g1 ! Load
10: retl ! CTI Group
mov %o2, %o0 ! IEU0
5: srlx %g2, 32, %g7 ! IEU0 Group
sethi %hi(0xff00), %o4 ! IEU1
andcc %g7, %o5, %g0 ! IEU1 Group
be,pn %icc, 2f ! CTI
or %o4, %lo(0xff00), %o4 ! IEU0
srlx %g1, 48, %g7 ! IEU0 Group
andcc %g7, %o4, %g0 ! IEU1 Group
be,pn %icc, 50f ! CTI
andcc %g7, 0xff, %g0 ! IEU1 Group
be,pn %icc, 51f ! CTI
srlx %g1, 32, %g7 ! IEU0
andcc %g7, %o4, %g0 ! IEU1 Group
be,pn %icc, 52f ! CTI
andcc %g7, 0xff, %g0 ! IEU1 Group
be,pn %icc, 53f ! CTI
2: andcc %g2, %o5, %g0 ! IEU1 Group
be,pn %icc, 2f ! CTI
srl %g1, 16, %g7 ! IEU0
andcc %g7, %o4, %g0 ! IEU1 Group
be,pn %icc, 54f ! CTI
andcc %g7, 0xff, %g0 ! IEU1 Group
be,pn %icc, 55f ! CTI
andcc %g1, %o4, %g0 ! IEU1 Group
be,pn %icc, 56f ! CTI
andcc %g1, 0xff, %g0 ! IEU1 Group
be,a,pn %icc, 57f ! CTI
sub %o0, %o3, %o0 ! IEU0
2: cmp %o0, %g3 ! IEU1 Group
bl,a,pt %xcc, 50b ! CTI
62: ldxa [%o1] %asi, %g1 ! Load
retl ! CTI Group
mov %o2, %o0 ! IEU0
50: sub %o0, %o3, %o0
retl
sub %o0, 8, %o0
51: sub %o0, %o3, %o0
retl
sub %o0, 7, %o0
52: sub %o0, %o3, %o0
retl
sub %o0, 6, %o0
53: sub %o0, %o3, %o0
retl
sub %o0, 5, %o0
54: sub %o0, %o3, %o0
retl
sub %o0, 4, %o0
55: sub %o0, %o3, %o0
retl
sub %o0, 3, %o0
56: sub %o0, %o3, %o0
retl
sub %o0, 2, %o0
57: retl
sub %o0, 1, %o0
30: brlez,pn %o2, 3f
sub %g0, %o2, %o3
add %o0, %o2, %o0
63: lduba [%o1] %asi, %o4
1: add %o1, 1, %o1
brz,pn %o4, 2f
stb %o4, [%o0 + %o3]
addcc %o3, 1, %o3
bne,pt %xcc, 1b
64: lduba [%o1] %asi, %o4
3: retl
mov %o2, %o0
2: retl
add %o2, %o3, %o0
ENDPROC(__strncpy_from_user)
.section __ex_table,"a"
.align 4
.word 60b, __retl_efault
.word 61b, __retl_efault
.word 62b, __retl_efault
.word 63b, __retl_efault
.word 64b, __retl_efault
.previous

View File

@ -1,4 +1,5 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/bug.h>
void copy_from_user_overflow(void)

View File

@ -467,33 +467,6 @@ void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len)
flush_tlb_all();
}
/*
* On the SRMMU we do not have the problems with limited tlb entries
* for mapping kernel pages, so we just take things from the free page
* pool. As a side effect we are putting a little too much pressure
* on the gfp() subsystem. This setup also makes the logic of the
* iommu mapping code a lot easier as we can transparently handle
* mappings on the kernel stack without any special code.
*/
struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node)
{
struct thread_info *ret;
ret = (struct thread_info *)__get_free_pages(GFP_KERNEL,
THREAD_INFO_ORDER);
#ifdef CONFIG_DEBUG_STACK_USAGE
if (ret)
memset(ret, 0, PAGE_SIZE << THREAD_INFO_ORDER);
#endif /* DEBUG_STACK_USAGE */
return ret;
}
void free_thread_info(struct thread_info *ti)
{
free_pages((unsigned long)ti, THREAD_INFO_ORDER);
}
/* tsunami.S */
extern void tsunami_flush_cache_all(void);
extern void tsunami_flush_cache_mm(struct mm_struct *mm);

View File

@ -1,6 +1,8 @@
#ifndef _ASM_WORD_AT_A_TIME_H
#define _ASM_WORD_AT_A_TIME_H
#include <linux/kernel.h>
/*
* This is largely generic for little-endian machines, but the
* optimal byte mask counting is probably going to be something
@ -35,8 +37,6 @@ static inline long count_masked_bytes(long mask)
#endif
#define REPEAT_BYTE(x) ((~0ul / 0xff) * (x))
/* Return the high bit set in the first byte that is a zero */
static inline unsigned long has_zero(unsigned long a)
{

View File

@ -16,6 +16,7 @@
#include <linux/init.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/namei.h>

View File

@ -38,6 +38,8 @@
#define STACK_MAGIC 0xdeadbeef
#define REPEAT_BYTE(x) ((~0ul / 0xff) * (x))
#define ALIGN(x, a) __ALIGN_KERNEL((x), (a))
#define __ALIGN_MASK(x, mask) __ALIGN_KERNEL_MASK((x), (mask))
#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a)))

View File

@ -16,6 +16,9 @@ config BITREVERSE
config RATIONAL
boolean
config GENERIC_STRNCPY_FROM_USER
bool
config GENERIC_FIND_FIRST_BIT
bool

View File

@ -125,6 +125,8 @@ obj-$(CONFIG_CLZ_TAB) += clz_tab.o
obj-$(CONFIG_DDR) += jedec_ddr_data.o
obj-$(CONFIG_GENERIC_STRNCPY_FROM_USER) += strncpy_from_user.o
hostprogs-y := gen_crc32table
clean-files := crc32table.h

View File

@ -0,0 +1,146 @@
#include <linux/module.h>
#include <linux/uaccess.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <asm/byteorder.h>
static inline long find_zero(unsigned long mask)
{
long byte = 0;
#ifdef __BIG_ENDIAN
#ifdef CONFIG_64BIT
if (mask >> 32)
mask >>= 32;
else
byte = 4;
#endif
if (mask >> 16)
mask >>= 16;
else
byte += 2;
return (mask >> 8) ? byte : byte + 1;
#else
#ifdef CONFIG_64BIT
if (!((unsigned int) mask)) {
mask >>= 32;
byte = 4;
}
#endif
if (!(mask & 0xffff)) {
mask >>= 16;
byte += 2;
}
return (mask & 0xff) ? byte : byte + 1;
#endif
}
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
#define IS_UNALIGNED(src, dst) 0
#else
#define IS_UNALIGNED(src, dst) \
(((long) dst | (long) src) & (sizeof(long) - 1))
#endif
/*
* Do a strncpy, return length of string without final '\0'.
* 'count' is the user-supplied count (return 'count' if we
* hit it), 'max' is the address space maximum (and we return
* -EFAULT if we hit it).
*/
static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
{
const unsigned long high_bits = REPEAT_BYTE(0xfe) + 1;
const unsigned long low_bits = REPEAT_BYTE(0x7f);
long res = 0;
/*
* Truncate 'max' to the user-specified limit, so that
* we only have one limit we need to check in the loop
*/
if (max > count)
max = count;
if (IS_UNALIGNED(src, dst))
goto byte_at_a_time;
while (max >= sizeof(unsigned long)) {
unsigned long c, v, rhs;
/* Fall back to byte-at-a-time if we get a page fault */
if (unlikely(__get_user(c,(unsigned long __user *)(src+res))))
break;
rhs = c | low_bits;
v = (c + high_bits) & ~rhs;
*(unsigned long *)(dst+res) = c;
if (v) {
v = (c & low_bits) + low_bits;
v = ~(v | rhs);
return res + find_zero(v);
}
res += sizeof(unsigned long);
max -= sizeof(unsigned long);
}
byte_at_a_time:
while (max) {
char c;
if (unlikely(__get_user(c,src+res)))
return -EFAULT;
dst[res] = c;
if (!c)
return res;
res++;
max--;
}
/*
* Uhhuh. We hit 'max'. But was that the user-specified maximum
* too? If so, that's ok - we got as much as the user asked for.
*/
if (res >= count)
return res;
/*
* Nope: we hit the address space limit, and we still had more
* characters the caller would have wanted. That's an EFAULT.
*/
return -EFAULT;
}
/**
* strncpy_from_user: - Copy a NUL terminated string from userspace.
* @dst: Destination address, in kernel space. This buffer must be at
* least @count bytes long.
* @src: Source address, in user space.
* @count: Maximum number of bytes to copy, including the trailing NUL.
*
* Copies a NUL-terminated string from userspace to kernel space.
*
* On success, returns the length of the string (not including the trailing
* NUL).
*
* If access to userspace fails, returns -EFAULT (some data may have been
* copied).
*
* If @count is smaller than the length of the string, copies @count bytes
* and returns @count.
*/
long strncpy_from_user(char *dst, const char __user *src, long count)
{
unsigned long max_addr, src_addr;
if (unlikely(count <= 0))
return 0;
max_addr = user_addr_max();
src_addr = (unsigned long)src;
if (likely(src_addr < max_addr)) {
unsigned long max = max_addr - src_addr;
return do_strncpy_from_user(dst, src, count, max);
}
return -EFAULT;
}
EXPORT_SYMBOL(strncpy_from_user);