[PATCH] fix i386 memcpy

This patch shortens non-constant memcpy() by two bytes and fixes spurious
out-of-line constant memcpy().

# size vmlinux.org vmlinux
   text    data     bss     dec     hex filename
3954591 1553426  236544 5744561  57a7b1 vmlinux.org
3952615 1553426  236544 5742585  579ff9 vmlinux

Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Denis Vlasenko 2005-05-01 08:58:48 -07:00 committed by Linus Torvalds
parent d637413f3f
commit d5b63d78f1

View file

@ -198,46 +198,79 @@ static inline void * __memcpy(void * to, const void * from, size_t n)
int d0, d1, d2; int d0, d1, d2;
__asm__ __volatile__( __asm__ __volatile__(
"rep ; movsl\n\t" "rep ; movsl\n\t"
"testb $2,%b4\n\t" "movl %4,%%ecx\n\t"
"je 1f\n\t" "andl $3,%%ecx\n\t"
"movsw\n" #if 1 /* want to pay 2 byte penalty for a chance to skip microcoded rep? */
"1:\ttestb $1,%b4\n\t" "jz 1f\n\t"
"je 2f\n\t" #endif
"movsb\n" "rep ; movsb\n\t"
"2:" "1:"
: "=&c" (d0), "=&D" (d1), "=&S" (d2) : "=&c" (d0), "=&D" (d1), "=&S" (d2)
:"0" (n/4), "q" (n),"1" ((long) to),"2" ((long) from) : "0" (n/4), "g" (n), "1" ((long) to), "2" ((long) from)
: "memory"); : "memory");
return (to); return (to);
} }
/* /*
* This looks horribly ugly, but the compiler can optimize it totally, * This looks ugly, but the compiler can optimize it totally,
* as the count is constant. * as the count is constant.
*/ */
static inline void * __constant_memcpy(void * to, const void * from, size_t n) static inline void * __constant_memcpy(void * to, const void * from, size_t n)
{ {
if (n <= 128) long esi, edi;
return __builtin_memcpy(to, from, n); if (!n) return to;
#if 1 /* want to do small copies with non-string ops? */
#define COMMON(x) \ switch (n) {
__asm__ __volatile__( \ case 1: *(char*)to = *(char*)from; return to;
"rep ; movsl" \ case 2: *(short*)to = *(short*)from; return to;
x \ case 4: *(int*)to = *(int*)from; return to;
: "=&c" (d0), "=&D" (d1), "=&S" (d2) \ #if 1 /* including those doable with two moves? */
: "0" (n/4),"1" ((long) to),"2" ((long) from) \ case 3: *(short*)to = *(short*)from;
: "memory"); *((char*)to+2) = *((char*)from+2); return to;
{ case 5: *(int*)to = *(int*)from;
int d0, d1, d2; *((char*)to+4) = *((char*)from+4); return to;
switch (n % 4) { case 6: *(int*)to = *(int*)from;
case 0: COMMON(""); return to; *((short*)to+2) = *((short*)from+2); return to;
case 1: COMMON("\n\tmovsb"); return to; case 8: *(int*)to = *(int*)from;
case 2: COMMON("\n\tmovsw"); return to; *((int*)to+1) = *((int*)from+1); return to;
default: COMMON("\n\tmovsw\n\tmovsb"); return to; #endif
}
#endif
esi = (long) from;
edi = (long) to;
if (n >= 5*4) {
/* large block: use rep prefix */
int ecx;
__asm__ __volatile__(
"rep ; movsl"
: "=&c" (ecx), "=&D" (edi), "=&S" (esi)
: "0" (n/4), "1" (edi),"2" (esi)
: "memory"
);
} else {
/* small block: don't clobber ecx + smaller code */
if (n >= 4*4) __asm__ __volatile__("movsl"
:"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory");
if (n >= 3*4) __asm__ __volatile__("movsl"
:"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory");
if (n >= 2*4) __asm__ __volatile__("movsl"
:"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory");
if (n >= 1*4) __asm__ __volatile__("movsl"
:"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory");
}
switch (n % 4) {
/* tail */
case 0: return to;
case 1: __asm__ __volatile__("movsb"
:"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory");
return to;
case 2: __asm__ __volatile__("movsw"
:"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory");
return to;
default: __asm__ __volatile__("movsw\n\tmovsb"
:"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory");
return to;
} }
}
#undef COMMON
} }
#define __HAVE_ARCH_MEMCPY #define __HAVE_ARCH_MEMCPY