1
0
Fork 0

xtensa: propagate the calling conventions change down into csum_partial_copy_generic()

turn the exception handlers into returning 0.

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
zero-sugar-mainline-defconfig
Al Viro 2020-07-19 18:03:51 -04:00
parent 1cd95ab85d
commit 2a5d2bd159
2 changed files with 19 additions and 68 deletions

View File

@ -37,9 +37,7 @@ asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum);
* better 64-bit) boundary
*/
asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
int len, __wsum sum,
int *src_err_ptr, int *dst_err_ptr);
asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, int len);
#define _HAVE_ARCH_CSUM_AND_COPY
/*
@ -49,7 +47,7 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
static inline
__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len)
{
return csum_partial_copy_generic(src, dst, len, 0, NULL, NULL);
return csum_partial_copy_generic(src, dst, len);
}
#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
@ -57,14 +55,9 @@ static inline
__wsum csum_and_copy_from_user(const void __user *src, void *dst,
int len)
{
int err = 0;
if (!access_ok(src, len))
return 0;
sum = csum_partial_copy_generic((__force const void *)src, dst,
len, ~0U, &err, NULL);
return err ? 0 : sum;
return csum_partial_copy_generic((__force const void *)src, dst, len);
}
/*
@ -247,13 +240,8 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
static __inline__ __wsum csum_and_copy_to_user(const void *src,
void __user *dst, int len)
{
int err = 0;
__wsum sum = ~0U;
if (!access_ok(dst, len))
return 0;
sum = csum_partial_copy_generic(src,dst,len,sum,NULL,&err);
return err ? 0 : sum;
return csum_partial_copy_generic(src, (__force void *)dst, len);
}
#endif

View File

@ -175,19 +175,14 @@ ENDPROC(csum_partial)
*/
/*
unsigned int csum_partial_copy_generic (const char *src, char *dst, int len,
int sum, int *src_err_ptr, int *dst_err_ptr)
unsigned int csum_partial_copy_generic (const char *src, char *dst, int len)
a2 = src
a3 = dst
a4 = len
a5 = sum
a6 = src_err_ptr
a7 = dst_err_ptr
a8 = temp
a9 = temp
a10 = temp
a11 = original len for exception handling
a12 = original dst for exception handling
This function is optimized for 4-byte aligned addresses. Other
alignments work, but not nearly as efficiently.
@ -196,8 +191,7 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst, int len,
ENTRY(csum_partial_copy_generic)
abi_entry_default
mov a12, a3
mov a11, a4
movi a5, -1
or a10, a2, a3
/* We optimize the following alignment tests for the 4-byte
@ -228,26 +222,26 @@ ENTRY(csum_partial_copy_generic)
#endif
EX(10f) l32i a9, a2, 0
EX(10f) l32i a8, a2, 4
EX(11f) s32i a9, a3, 0
EX(11f) s32i a8, a3, 4
EX(10f) s32i a9, a3, 0
EX(10f) s32i a8, a3, 4
ONES_ADD(a5, a9)
ONES_ADD(a5, a8)
EX(10f) l32i a9, a2, 8
EX(10f) l32i a8, a2, 12
EX(11f) s32i a9, a3, 8
EX(11f) s32i a8, a3, 12
EX(10f) s32i a9, a3, 8
EX(10f) s32i a8, a3, 12
ONES_ADD(a5, a9)
ONES_ADD(a5, a8)
EX(10f) l32i a9, a2, 16
EX(10f) l32i a8, a2, 20
EX(11f) s32i a9, a3, 16
EX(11f) s32i a8, a3, 20
EX(10f) s32i a9, a3, 16
EX(10f) s32i a8, a3, 20
ONES_ADD(a5, a9)
ONES_ADD(a5, a8)
EX(10f) l32i a9, a2, 24
EX(10f) l32i a8, a2, 28
EX(11f) s32i a9, a3, 24
EX(11f) s32i a8, a3, 28
EX(10f) s32i a9, a3, 24
EX(10f) s32i a8, a3, 28
ONES_ADD(a5, a9)
ONES_ADD(a5, a8)
addi a2, a2, 32
@ -267,7 +261,7 @@ EX(11f) s32i a8, a3, 28
.Loop6:
#endif
EX(10f) l32i a9, a2, 0
EX(11f) s32i a9, a3, 0
EX(10f) s32i a9, a3, 0
ONES_ADD(a5, a9)
addi a2, a2, 4
addi a3, a3, 4
@ -298,7 +292,7 @@ EX(11f) s32i a9, a3, 0
.Loop7:
#endif
EX(10f) l16ui a9, a2, 0
EX(11f) s16i a9, a3, 0
EX(10f) s16i a9, a3, 0
ONES_ADD(a5, a9)
addi a2, a2, 2
addi a3, a3, 2
@ -309,7 +303,7 @@ EX(11f) s16i a9, a3, 0
/* This section processes a possible trailing odd byte. */
_bbci.l a4, 0, 8f /* 1-byte chunk */
EX(10f) l8ui a9, a2, 0
EX(11f) s8i a9, a3, 0
EX(10f) s8i a9, a3, 0
#ifdef __XTENSA_EB__
slli a9, a9, 8 /* shift byte to bits 8..15 */
#endif
@ -334,8 +328,8 @@ EX(11f) s8i a9, a3, 0
#endif
EX(10f) l8ui a9, a2, 0
EX(10f) l8ui a8, a2, 1
EX(11f) s8i a9, a3, 0
EX(11f) s8i a8, a3, 1
EX(10f) s8i a9, a3, 0
EX(10f) s8i a8, a3, 1
#ifdef __XTENSA_EB__
slli a9, a9, 8 /* combine into a single 16-bit value */
#else /* for checksum computation */
@ -356,38 +350,7 @@ ENDPROC(csum_partial_copy_generic)
# Exception handler:
.section .fixup, "ax"
/*
a6 = src_err_ptr
a7 = dst_err_ptr
a11 = original len for exception handling
a12 = original dst for exception handling
*/
10:
_movi a2, -EFAULT
s32i a2, a6, 0 /* src_err_ptr */
# clear the complete destination - computing the rest
# is too much work
movi a2, 0
#if XCHAL_HAVE_LOOPS
loopgtz a11, 2f
#else
beqz a11, 2f
add a11, a11, a12 /* a11 = ending address */
.Leloop:
#endif
s8i a2, a12, 0
addi a12, a12, 1
#if !XCHAL_HAVE_LOOPS
blt a12, a11, .Leloop
#endif
2:
abi_ret_default
11:
movi a2, -EFAULT
s32i a2, a7, 0 /* dst_err_ptr */
movi a2, 0
abi_ret_default