1
0
Fork 0

xtensa: abstract 'entry' and 'retw' in assembly code

Provide abi_entry, abi_entry_default, abi_ret and abi_ret_default macros
that allocate aligned stack frame in windowed and call0 ABIs.
Provide XTENSA_SPILL_STACK_RESERVE macro that specifies required stack
frame size when register spilling is involved.
Replace all uses of 'entry' and 'retw' with the above macros.
This makes most of the xtensa assembly code ready for XEA3 and call0 ABI.

Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
alistair/sunxi64-5.4-dsi
Max Filippov 2019-05-12 20:28:25 -07:00
parent 831c4f3da8
commit d6d5f19e21
11 changed files with 153 additions and 102 deletions

View File

@ -191,4 +191,50 @@
#endif #endif
.endm .endm
#define XTENSA_STACK_ALIGNMENT 16
#if defined(__XTENSA_WINDOWED_ABI__)
#define XTENSA_FRAME_SIZE_RESERVE 16
#define XTENSA_SPILL_STACK_RESERVE 32
#define abi_entry(frame_size) \
entry sp, (XTENSA_FRAME_SIZE_RESERVE + \
(((frame_size) + XTENSA_STACK_ALIGNMENT - 1) & \
-XTENSA_STACK_ALIGNMENT))
#define abi_entry_default abi_entry(0)
#define abi_ret(frame_size) retw
#define abi_ret_default retw
#elif defined(__XTENSA_CALL0_ABI__)
#define XTENSA_SPILL_STACK_RESERVE 0
#define abi_entry(frame_size) __abi_entry (frame_size)
.macro __abi_entry frame_size
.ifgt \frame_size
addi sp, sp, -(((\frame_size) + XTENSA_STACK_ALIGNMENT - 1) & \
-XTENSA_STACK_ALIGNMENT)
.endif
.endm
#define abi_entry_default
#define abi_ret(frame_size) __abi_ret (frame_size)
.macro __abi_ret frame_size
.ifgt \frame_size
addi sp, sp, (((\frame_size) + XTENSA_STACK_ALIGNMENT - 1) & \
-XTENSA_STACK_ALIGNMENT)
.endif
ret
.endm
#define abi_ret_default ret
#else
#error Unsupported Xtensa ABI
#endif
#endif /* _XTENSA_ASMMACRO_H */ #endif /* _XTENSA_ASMMACRO_H */

View File

@ -121,7 +121,9 @@
ENTRY(coprocessor_flush) ENTRY(coprocessor_flush)
entry a1, 32 /* reserve 4 bytes on stack to save a0 */
abi_entry(4)
s32i a0, a1, 0 s32i a0, a1, 0
movi a0, .Lsave_cp_regs_jump_table movi a0, .Lsave_cp_regs_jump_table
addx8 a3, a3, a0 addx8 a3, a3, a0
@ -131,7 +133,8 @@ ENTRY(coprocessor_flush)
beqz a3, 1f beqz a3, 1f
callx0 a3 callx0 a3
1: l32i a0, a1, 0 1: l32i a0, a1, 0
retw
abi_ret(4)
ENDPROC(coprocessor_flush) ENDPROC(coprocessor_flush)

View File

@ -1842,7 +1842,8 @@ ENDPROC(fast_store_prohibited)
ENTRY(system_call) ENTRY(system_call)
entry a1, 32 /* reserve 4 bytes on stack for function parameter */
abi_entry(4)
/* regs->syscall = regs->areg[2] */ /* regs->syscall = regs->areg[2] */
@ -1892,7 +1893,7 @@ ENTRY(system_call)
s32i a6, a2, PT_AREG2 s32i a6, a2, PT_AREG2
bnez a3, 1f bnez a3, 1f
retw abi_ret(4)
1: 1:
l32i a4, a1, 4 l32i a4, a1, 4
@ -1901,7 +1902,7 @@ ENTRY(system_call)
mov a6, a2 mov a6, a2
call4 do_syscall_trace_leave call4 do_syscall_trace_leave
s32i a3, a2, PT_SYSCALL s32i a3, a2, PT_SYSCALL
retw abi_ret(4)
ENDPROC(system_call) ENDPROC(system_call)
@ -1952,7 +1953,7 @@ ENDPROC(system_call)
ENTRY(_switch_to) ENTRY(_switch_to)
entry a1, 48 abi_entry(XTENSA_SPILL_STACK_RESERVE)
mov a11, a3 # and 'next' (a3) mov a11, a3 # and 'next' (a3)
@ -2013,7 +2014,7 @@ ENTRY(_switch_to)
wsr a14, ps wsr a14, ps
rsync rsync
retw abi_ret(XTENSA_SPILL_STACK_RESERVE)
ENDPROC(_switch_to) ENDPROC(_switch_to)

View File

@ -11,6 +11,7 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/asmmacro.h>
#include <asm/ftrace.h> #include <asm/ftrace.h>
/* /*
@ -21,13 +22,13 @@
ENTRY(_mcount) ENTRY(_mcount)
entry a1, 16 abi_entry_default
movi a4, ftrace_trace_function movi a4, ftrace_trace_function
l32i a4, a4, 0 l32i a4, a4, 0
movi a3, ftrace_stub movi a3, ftrace_stub
bne a3, a4, 1f bne a3, a4, 1f
retw abi_ret_default
1: xor a7, a2, a1 1: xor a7, a2, a1
movi a3, 0x3fffffff movi a3, 0x3fffffff
@ -40,11 +41,11 @@ ENTRY(_mcount)
addi a6, a6, -MCOUNT_INSN_SIZE addi a6, a6, -MCOUNT_INSN_SIZE
callx4 a4 callx4 a4
retw abi_ret_default
ENDPROC(_mcount) ENDPROC(_mcount)
ENTRY(ftrace_stub) ENTRY(ftrace_stub)
entry a1, 16 abi_entry_default
retw abi_ret_default
ENDPROC(ftrace_stub) ENDPROC(ftrace_stub)

View File

@ -43,7 +43,7 @@ ENTRY(csum_partial)
* Experiments with Ethernet and SLIP connections show that buf * Experiments with Ethernet and SLIP connections show that buf
* is aligned on either a 2-byte or 4-byte boundary. * is aligned on either a 2-byte or 4-byte boundary.
*/ */
entry sp, 32 abi_entry_default
extui a5, a2, 0, 2 extui a5, a2, 0, 2
bnez a5, 8f /* branch if 2-byte aligned */ bnez a5, 8f /* branch if 2-byte aligned */
/* Fall-through on common case, 4-byte alignment */ /* Fall-through on common case, 4-byte alignment */
@ -107,7 +107,7 @@ ENTRY(csum_partial)
ONES_ADD(a4, a6) ONES_ADD(a4, a6)
7: 7:
mov a2, a4 mov a2, a4
retw abi_ret_default
/* uncommon case, buf is 2-byte aligned */ /* uncommon case, buf is 2-byte aligned */
8: 8:
@ -195,7 +195,7 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst, int len,
ENTRY(csum_partial_copy_generic) ENTRY(csum_partial_copy_generic)
entry sp, 32 abi_entry_default
mov a12, a3 mov a12, a3
mov a11, a4 mov a11, a4
or a10, a2, a3 or a10, a2, a3
@ -316,7 +316,7 @@ EX(11f) s8i a9, a3, 0
ONES_ADD(a5, a9) ONES_ADD(a5, a9)
8: 8:
mov a2, a5 mov a2, a5
retw abi_ret_default
5: 5:
/* Control branch to here when either src or dst is odd. We /* Control branch to here when either src or dst is odd. We
@ -383,12 +383,12 @@ ENDPROC(csum_partial_copy_generic)
blt a12, a11, .Leloop blt a12, a11, .Leloop
#endif #endif
2: 2:
retw abi_ret_default
11: 11:
movi a2, -EFAULT movi a2, -EFAULT
s32i a2, a7, 0 /* dst_err_ptr */ s32i a2, a7, 0 /* dst_err_ptr */
movi a2, 0 movi a2, 0
retw abi_ret_default
.previous .previous

View File

@ -79,7 +79,7 @@
bne a3, a7, .Lnextbyte # continue loop if $a3:src != $a7:src_end bne a3, a7, .Lnextbyte # continue loop if $a3:src != $a7:src_end
#endif /* !XCHAL_HAVE_LOOPS */ #endif /* !XCHAL_HAVE_LOOPS */
.Lbytecopydone: .Lbytecopydone:
retw abi_ret_default
/* /*
* Destination is unaligned * Destination is unaligned
@ -112,7 +112,7 @@
ENTRY(__memcpy) ENTRY(__memcpy)
WEAK(memcpy) WEAK(memcpy)
entry sp, 16 # minimal stack frame abi_entry_default
# a2/ dst, a3/ src, a4/ len # a2/ dst, a3/ src, a4/ len
mov a5, a2 # copy dst so that a2 is return value mov a5, a2 # copy dst so that a2 is return value
.Lcommon: .Lcommon:
@ -161,7 +161,7 @@ WEAK(memcpy)
bbsi.l a4, 2, .L3 bbsi.l a4, 2, .L3
bbsi.l a4, 1, .L4 bbsi.l a4, 1, .L4
bbsi.l a4, 0, .L5 bbsi.l a4, 0, .L5
retw abi_ret_default
.L3: .L3:
# copy 4 bytes # copy 4 bytes
l32i a6, a3, 0 l32i a6, a3, 0
@ -170,7 +170,7 @@ WEAK(memcpy)
addi a5, a5, 4 addi a5, a5, 4
bbsi.l a4, 1, .L4 bbsi.l a4, 1, .L4
bbsi.l a4, 0, .L5 bbsi.l a4, 0, .L5
retw abi_ret_default
.L4: .L4:
# copy 2 bytes # copy 2 bytes
l16ui a6, a3, 0 l16ui a6, a3, 0
@ -178,12 +178,12 @@ WEAK(memcpy)
s16i a6, a5, 0 s16i a6, a5, 0
addi a5, a5, 2 addi a5, a5, 2
bbsi.l a4, 0, .L5 bbsi.l a4, 0, .L5
retw abi_ret_default
.L5: .L5:
# copy 1 byte # copy 1 byte
l8ui a6, a3, 0 l8ui a6, a3, 0
s8i a6, a5, 0 s8i a6, a5, 0
retw abi_ret_default
/* /*
* Destination is aligned, Source is unaligned * Destination is aligned, Source is unaligned
@ -255,7 +255,7 @@ WEAK(memcpy)
#endif #endif
bbsi.l a4, 1, .L14 bbsi.l a4, 1, .L14
bbsi.l a4, 0, .L15 bbsi.l a4, 0, .L15
.Ldone: retw .Ldone: abi_ret_default
.L14: .L14:
# copy 2 bytes # copy 2 bytes
l8ui a6, a3, 0 l8ui a6, a3, 0
@ -265,12 +265,12 @@ WEAK(memcpy)
s8i a7, a5, 1 s8i a7, a5, 1
addi a5, a5, 2 addi a5, a5, 2
bbsi.l a4, 0, .L15 bbsi.l a4, 0, .L15
retw abi_ret_default
.L15: .L15:
# copy 1 byte # copy 1 byte
l8ui a6, a3, 0 l8ui a6, a3, 0
s8i a6, a5, 0 s8i a6, a5, 0
retw abi_ret_default
ENDPROC(__memcpy) ENDPROC(__memcpy)
@ -280,7 +280,7 @@ ENDPROC(__memcpy)
ENTRY(bcopy) ENTRY(bcopy)
entry sp, 16 # minimal stack frame abi_entry_default
# a2=src, a3=dst, a4=len # a2=src, a3=dst, a4=len
mov a5, a3 mov a5, a3
mov a3, a2 mov a3, a2
@ -346,7 +346,7 @@ ENDPROC(bcopy)
# $a3:src != $a7:src_start # $a3:src != $a7:src_start
#endif /* !XCHAL_HAVE_LOOPS */ #endif /* !XCHAL_HAVE_LOOPS */
.Lbackbytecopydone: .Lbackbytecopydone:
retw abi_ret_default
/* /*
* Destination is unaligned * Destination is unaligned
@ -380,7 +380,7 @@ ENDPROC(bcopy)
ENTRY(__memmove) ENTRY(__memmove)
WEAK(memmove) WEAK(memmove)
entry sp, 16 # minimal stack frame abi_entry_default
# a2/ dst, a3/ src, a4/ len # a2/ dst, a3/ src, a4/ len
mov a5, a2 # copy dst so that a2 is return value mov a5, a2 # copy dst so that a2 is return value
.Lmovecommon: .Lmovecommon:
@ -435,7 +435,7 @@ WEAK(memmove)
bbsi.l a4, 2, .Lback3 bbsi.l a4, 2, .Lback3
bbsi.l a4, 1, .Lback4 bbsi.l a4, 1, .Lback4
bbsi.l a4, 0, .Lback5 bbsi.l a4, 0, .Lback5
retw abi_ret_default
.Lback3: .Lback3:
# copy 4 bytes # copy 4 bytes
addi a3, a3, -4 addi a3, a3, -4
@ -444,7 +444,7 @@ WEAK(memmove)
s32i a6, a5, 0 s32i a6, a5, 0
bbsi.l a4, 1, .Lback4 bbsi.l a4, 1, .Lback4
bbsi.l a4, 0, .Lback5 bbsi.l a4, 0, .Lback5
retw abi_ret_default
.Lback4: .Lback4:
# copy 2 bytes # copy 2 bytes
addi a3, a3, -2 addi a3, a3, -2
@ -452,14 +452,14 @@ WEAK(memmove)
addi a5, a5, -2 addi a5, a5, -2
s16i a6, a5, 0 s16i a6, a5, 0
bbsi.l a4, 0, .Lback5 bbsi.l a4, 0, .Lback5
retw abi_ret_default
.Lback5: .Lback5:
# copy 1 byte # copy 1 byte
addi a3, a3, -1 addi a3, a3, -1
l8ui a6, a3, 0 l8ui a6, a3, 0
addi a5, a5, -1 addi a5, a5, -1
s8i a6, a5, 0 s8i a6, a5, 0
retw abi_ret_default
/* /*
* Destination is aligned, Source is unaligned * Destination is aligned, Source is unaligned
@ -531,7 +531,7 @@ WEAK(memmove)
bbsi.l a4, 1, .Lback14 bbsi.l a4, 1, .Lback14
bbsi.l a4, 0, .Lback15 bbsi.l a4, 0, .Lback15
.Lbackdone: .Lbackdone:
retw abi_ret_default
.Lback14: .Lback14:
# copy 2 bytes # copy 2 bytes
addi a3, a3, -2 addi a3, a3, -2
@ -541,13 +541,13 @@ WEAK(memmove)
s8i a6, a5, 0 s8i a6, a5, 0
s8i a7, a5, 1 s8i a7, a5, 1
bbsi.l a4, 0, .Lback15 bbsi.l a4, 0, .Lback15
retw abi_ret_default
.Lback15: .Lback15:
# copy 1 byte # copy 1 byte
addi a3, a3, -1 addi a3, a3, -1
addi a5, a5, -1 addi a5, a5, -1
l8ui a6, a3, 0 l8ui a6, a3, 0
s8i a6, a5, 0 s8i a6, a5, 0
retw abi_ret_default
ENDPROC(__memmove) ENDPROC(__memmove)

View File

@ -34,7 +34,7 @@
ENTRY(__memset) ENTRY(__memset)
WEAK(memset) WEAK(memset)
entry sp, 16 # minimal stack frame abi_entry_default
# a2/ dst, a3/ c, a4/ length # a2/ dst, a3/ c, a4/ length
extui a3, a3, 0, 8 # mask to just 8 bits extui a3, a3, 0, 8 # mask to just 8 bits
slli a7, a3, 8 # duplicate character in all bytes of word slli a7, a3, 8 # duplicate character in all bytes of word
@ -48,7 +48,7 @@ WEAK(memset)
srli a7, a4, 4 # number of loop iterations with 16B srli a7, a4, 4 # number of loop iterations with 16B
# per iteration # per iteration
bnez a4, .Laligned bnez a4, .Laligned
retw abi_ret_default
/* /*
* Destination is word-aligned. * Destination is word-aligned.
@ -95,7 +95,7 @@ EX(10f) s16i a3, a5, 0
EX(10f) s8i a3, a5, 0 EX(10f) s8i a3, a5, 0
.L5: .L5:
.Lret1: .Lret1:
retw abi_ret_default
/* /*
* Destination is unaligned * Destination is unaligned
@ -139,7 +139,7 @@ EX(10f) s8i a3, a5, 0
blt a5, a6, .Lbyteloop blt a5, a6, .Lbyteloop
#endif /* !XCHAL_HAVE_LOOPS */ #endif /* !XCHAL_HAVE_LOOPS */
.Lbytesetdone: .Lbytesetdone:
retw abi_ret_default
ENDPROC(__memset) ENDPROC(__memset)
@ -150,4 +150,4 @@ ENDPROC(__memset)
10: 10:
movi a2, 0 movi a2, 0
retw abi_ret_default

View File

@ -50,7 +50,7 @@
.text .text
ENTRY(__strncpy_user) ENTRY(__strncpy_user)
entry sp, 16 # minimal stack frame abi_entry_default
# a2/ dst, a3/ src, a4/ len # a2/ dst, a3/ src, a4/ len
mov a11, a2 # leave dst in return value register mov a11, a2 # leave dst in return value register
beqz a4, .Lret # if len is zero beqz a4, .Lret # if len is zero
@ -93,7 +93,7 @@ EX(10f) s8i a9, a11, 0 # store byte 0
bnez a4, .Lsrcaligned # if len is nonzero bnez a4, .Lsrcaligned # if len is nonzero
.Lret: .Lret:
sub a2, a11, a2 # compute strlen sub a2, a11, a2 # compute strlen
retw abi_ret_default
/* /*
* dst is word-aligned, src is word-aligned * dst is word-aligned, src is word-aligned
@ -148,14 +148,14 @@ EX(10f) s8i a9, a11, 0
.Lz3: # byte 3 is zero .Lz3: # byte 3 is zero
addi a11, a11, 3 # advance dst pointer addi a11, a11, 3 # advance dst pointer
sub a2, a11, a2 # compute strlen sub a2, a11, a2 # compute strlen
retw abi_ret_default
.Lz0: # byte 0 is zero .Lz0: # byte 0 is zero
#ifdef __XTENSA_EB__ #ifdef __XTENSA_EB__
movi a9, 0 movi a9, 0
#endif /* __XTENSA_EB__ */ #endif /* __XTENSA_EB__ */
EX(10f) s8i a9, a11, 0 EX(10f) s8i a9, a11, 0
sub a2, a11, a2 # compute strlen sub a2, a11, a2 # compute strlen
retw abi_ret_default
.Lz1: # byte 1 is zero .Lz1: # byte 1 is zero
#ifdef __XTENSA_EB__ #ifdef __XTENSA_EB__
extui a9, a9, 16, 16 extui a9, a9, 16, 16
@ -163,7 +163,7 @@ EX(10f) s8i a9, a11, 0
EX(10f) s16i a9, a11, 0 EX(10f) s16i a9, a11, 0
addi a11, a11, 1 # advance dst pointer addi a11, a11, 1 # advance dst pointer
sub a2, a11, a2 # compute strlen sub a2, a11, a2 # compute strlen
retw abi_ret_default
.Lz2: # byte 2 is zero .Lz2: # byte 2 is zero
#ifdef __XTENSA_EB__ #ifdef __XTENSA_EB__
extui a9, a9, 16, 16 extui a9, a9, 16, 16
@ -173,7 +173,7 @@ EX(10f) s16i a9, a11, 0
EX(10f) s8i a9, a11, 2 EX(10f) s8i a9, a11, 2
addi a11, a11, 2 # advance dst pointer addi a11, a11, 2 # advance dst pointer
sub a2, a11, a2 # compute strlen sub a2, a11, a2 # compute strlen
retw abi_ret_default
.align 4 # 1 mod 4 alignment for LOOPNEZ .align 4 # 1 mod 4 alignment for LOOPNEZ
.byte 0 # (0 mod 4 alignment for LBEG) .byte 0 # (0 mod 4 alignment for LBEG)
@ -199,7 +199,7 @@ EX(10f) s8i a9, a11, 0
.Lunalignedend: .Lunalignedend:
sub a2, a11, a2 # compute strlen sub a2, a11, a2 # compute strlen
retw abi_ret_default
ENDPROC(__strncpy_user) ENDPROC(__strncpy_user)
@ -214,4 +214,4 @@ ENDPROC(__strncpy_user)
10: 10:
11: 11:
movi a2, -EFAULT movi a2, -EFAULT
retw abi_ret_default

View File

@ -45,7 +45,7 @@
.text .text
ENTRY(__strnlen_user) ENTRY(__strnlen_user)
entry sp, 16 # minimal stack frame abi_entry_default
# a2/ s, a3/ len # a2/ s, a3/ len
addi a4, a2, -4 # because we overincrement at the end; addi a4, a2, -4 # because we overincrement at the end;
# we compensate with load offsets of 4 # we compensate with load offsets of 4
@ -96,7 +96,7 @@ EX(10f) l32i a9, a4, 4 # load 4 bytes for remaining checks
addi a4, a4, 1 # advance string pointer addi a4, a4, 1 # advance string pointer
.L101: .L101:
sub a2, a4, a2 # compute length sub a2, a4, a2 # compute length
retw abi_ret_default
# NOTE that in several places below, we point to the byte just after # NOTE that in several places below, we point to the byte just after
# the zero byte in order to include the NULL terminator in the count. # the zero byte in order to include the NULL terminator in the count.
@ -106,15 +106,15 @@ EX(10f) l32i a9, a4, 4 # load 4 bytes for remaining checks
.Lz0: # byte 0 is zero .Lz0: # byte 0 is zero
addi a4, a4, 1 # point just beyond zero byte addi a4, a4, 1 # point just beyond zero byte
sub a2, a4, a2 # subtract to get length sub a2, a4, a2 # subtract to get length
retw abi_ret_default
.Lz1: # byte 1 is zero .Lz1: # byte 1 is zero
addi a4, a4, 1+1 # point just beyond zero byte addi a4, a4, 1+1 # point just beyond zero byte
sub a2, a4, a2 # subtract to get length sub a2, a4, a2 # subtract to get length
retw abi_ret_default
.Lz2: # byte 2 is zero .Lz2: # byte 2 is zero
addi a4, a4, 2+1 # point just beyond zero byte addi a4, a4, 2+1 # point just beyond zero byte
sub a2, a4, a2 # subtract to get length sub a2, a4, a2 # subtract to get length
retw abi_ret_default
.L1mod2: # address is odd .L1mod2: # address is odd
EX(10f) l8ui a9, a4, 4 # get byte 0 EX(10f) l8ui a9, a4, 4 # get byte 0
@ -130,7 +130,7 @@ EX(10f) l32i a9, a4, 0 # get word with first two bytes of string
# byte 3 is zero # byte 3 is zero
addi a4, a4, 3+1 # point just beyond zero byte addi a4, a4, 3+1 # point just beyond zero byte
sub a2, a4, a2 # subtract to get length sub a2, a4, a2 # subtract to get length
retw abi_ret_default
ENDPROC(__strnlen_user) ENDPROC(__strnlen_user)
@ -138,4 +138,4 @@ ENDPROC(__strnlen_user)
.align 4 .align 4
10: 10:
movi a2, 0 movi a2, 0
retw abi_ret_default

View File

@ -60,7 +60,7 @@
.text .text
ENTRY(__xtensa_copy_user) ENTRY(__xtensa_copy_user)
entry sp, 16 # minimal stack frame abi_entry_default
# a2/ dst, a3/ src, a4/ len # a2/ dst, a3/ src, a4/ len
mov a5, a2 # copy dst so that a2 is return value mov a5, a2 # copy dst so that a2 is return value
mov a11, a4 # preserve original len for error case mov a11, a4 # preserve original len for error case
@ -75,7 +75,7 @@ ENTRY(__xtensa_copy_user)
__ssa8 a3 # set shift amount from byte offset __ssa8 a3 # set shift amount from byte offset
bnez a4, .Lsrcunaligned bnez a4, .Lsrcunaligned
movi a2, 0 # return success for len==0 movi a2, 0 # return success for len==0
retw abi_ret_default
/* /*
* Destination is unaligned * Destination is unaligned
@ -127,7 +127,7 @@ EX(10f) s8i a6, a5, 0
#endif /* !XCHAL_HAVE_LOOPS */ #endif /* !XCHAL_HAVE_LOOPS */
.Lbytecopydone: .Lbytecopydone:
movi a2, 0 # return success for len bytes copied movi a2, 0 # return success for len bytes copied
retw abi_ret_default
/* /*
* Destination and source are word-aligned. * Destination and source are word-aligned.
@ -187,7 +187,7 @@ EX(10f) l8ui a6, a3, 0
EX(10f) s8i a6, a5, 0 EX(10f) s8i a6, a5, 0
.L5: .L5:
movi a2, 0 # return success for len bytes copied movi a2, 0 # return success for len bytes copied
retw abi_ret_default
/* /*
* Destination is aligned, Source is unaligned * Destination is aligned, Source is unaligned
@ -264,7 +264,7 @@ EX(10f) l8ui a6, a3, 0
EX(10f) s8i a6, a5, 0 EX(10f) s8i a6, a5, 0
.L15: .L15:
movi a2, 0 # return success for len bytes copied movi a2, 0 # return success for len bytes copied
retw abi_ret_default
ENDPROC(__xtensa_copy_user) ENDPROC(__xtensa_copy_user)
@ -281,4 +281,4 @@ ENDPROC(__xtensa_copy_user)
10: 10:
sub a2, a5, a2 /* a2 <-- bytes copied */ sub a2, a5, a2 /* a2 <-- bytes copied */
sub a2, a11, a2 /* a2 <-- bytes not copied */ sub a2, a11, a2 /* a2 <-- bytes not copied */
retw abi_ret_default

View File

@ -30,7 +30,7 @@
ENTRY(clear_page) ENTRY(clear_page)
entry a1, 16 abi_entry_default
movi a3, 0 movi a3, 0
__loopi a2, a7, PAGE_SIZE, 32 __loopi a2, a7, PAGE_SIZE, 32
@ -44,7 +44,7 @@ ENTRY(clear_page)
s32i a3, a2, 28 s32i a3, a2, 28
__endla a2, a7, 32 __endla a2, a7, 32
retw abi_ret_default
ENDPROC(clear_page) ENDPROC(clear_page)
@ -57,7 +57,7 @@ ENDPROC(clear_page)
ENTRY(copy_page) ENTRY(copy_page)
entry a1, 16 abi_entry_default
__loopi a2, a4, PAGE_SIZE, 32 __loopi a2, a4, PAGE_SIZE, 32
@ -86,7 +86,7 @@ ENTRY(copy_page)
__endl a2, a4 __endl a2, a4
retw abi_ret_default
ENDPROC(copy_page) ENDPROC(copy_page)
@ -116,7 +116,7 @@ ENTRY(__tlbtemp_mapping_start)
ENTRY(clear_page_alias) ENTRY(clear_page_alias)
entry a1, 32 abi_entry_default
/* Skip setting up a temporary DTLB if not aliased low page. */ /* Skip setting up a temporary DTLB if not aliased low page. */
@ -144,14 +144,14 @@ ENTRY(clear_page_alias)
__endla a2, a7, 32 __endla a2, a7, 32
bnez a6, 1f bnez a6, 1f
retw abi_ret_default
/* We need to invalidate the temporary idtlb entry, if any. */ /* We need to invalidate the temporary idtlb entry, if any. */
1: idtlb a4 1: idtlb a4
dsync dsync
retw abi_ret_default
ENDPROC(clear_page_alias) ENDPROC(clear_page_alias)
@ -164,7 +164,7 @@ ENDPROC(clear_page_alias)
ENTRY(copy_page_alias) ENTRY(copy_page_alias)
entry a1, 32 abi_entry_default
/* Skip setting up a temporary DTLB for destination if not aliased. */ /* Skip setting up a temporary DTLB for destination if not aliased. */
@ -221,19 +221,19 @@ ENTRY(copy_page_alias)
bnez a6, 1f bnez a6, 1f
bnez a7, 2f bnez a7, 2f
retw abi_ret_default
1: addi a2, a2, -PAGE_SIZE 1: addi a2, a2, -PAGE_SIZE
idtlb a2 idtlb a2
dsync dsync
bnez a7, 2f bnez a7, 2f
retw abi_ret_default
2: addi a3, a3, -PAGE_SIZE+1 2: addi a3, a3, -PAGE_SIZE+1
idtlb a3 idtlb a3
dsync dsync
retw abi_ret_default
ENDPROC(copy_page_alias) ENDPROC(copy_page_alias)
@ -248,7 +248,7 @@ ENDPROC(copy_page_alias)
ENTRY(__flush_invalidate_dcache_page_alias) ENTRY(__flush_invalidate_dcache_page_alias)
entry sp, 16 abi_entry_default
movi a7, 0 # required for exception handler movi a7, 0 # required for exception handler
addi a6, a3, (PAGE_KERNEL | _PAGE_HW_WRITE) addi a6, a3, (PAGE_KERNEL | _PAGE_HW_WRITE)
@ -261,7 +261,7 @@ ENTRY(__flush_invalidate_dcache_page_alias)
idtlb a4 idtlb a4
dsync dsync
retw abi_ret_default
ENDPROC(__flush_invalidate_dcache_page_alias) ENDPROC(__flush_invalidate_dcache_page_alias)
@ -272,7 +272,7 @@ ENDPROC(__flush_invalidate_dcache_page_alias)
ENTRY(__invalidate_dcache_page_alias) ENTRY(__invalidate_dcache_page_alias)
entry sp, 16 abi_entry_default
movi a7, 0 # required for exception handler movi a7, 0 # required for exception handler
addi a6, a3, (PAGE_KERNEL | _PAGE_HW_WRITE) addi a6, a3, (PAGE_KERNEL | _PAGE_HW_WRITE)
@ -285,7 +285,7 @@ ENTRY(__invalidate_dcache_page_alias)
idtlb a4 idtlb a4
dsync dsync
retw abi_ret_default
ENDPROC(__invalidate_dcache_page_alias) ENDPROC(__invalidate_dcache_page_alias)
#endif #endif
@ -296,7 +296,7 @@ ENTRY(__tlbtemp_mapping_itlb)
ENTRY(__invalidate_icache_page_alias) ENTRY(__invalidate_icache_page_alias)
entry sp, 16 abi_entry_default
addi a6, a3, (PAGE_KERNEL_EXEC | _PAGE_HW_WRITE) addi a6, a3, (PAGE_KERNEL_EXEC | _PAGE_HW_WRITE)
mov a4, a2 mov a4, a2
@ -307,7 +307,7 @@ ENTRY(__invalidate_icache_page_alias)
iitlb a4 iitlb a4
isync isync
retw abi_ret_default
ENDPROC(__invalidate_icache_page_alias) ENDPROC(__invalidate_icache_page_alias)
@ -325,12 +325,12 @@ ENTRY(__tlbtemp_mapping_end)
ENTRY(__invalidate_icache_page) ENTRY(__invalidate_icache_page)
entry sp, 16 abi_entry_default
___invalidate_icache_page a2 a3 ___invalidate_icache_page a2 a3
isync isync
retw abi_ret_default
ENDPROC(__invalidate_icache_page) ENDPROC(__invalidate_icache_page)
@ -340,12 +340,12 @@ ENDPROC(__invalidate_icache_page)
ENTRY(__invalidate_dcache_page) ENTRY(__invalidate_dcache_page)
entry sp, 16 abi_entry_default
___invalidate_dcache_page a2 a3 ___invalidate_dcache_page a2 a3
dsync dsync
retw abi_ret_default
ENDPROC(__invalidate_dcache_page) ENDPROC(__invalidate_dcache_page)
@ -355,12 +355,12 @@ ENDPROC(__invalidate_dcache_page)
ENTRY(__flush_invalidate_dcache_page) ENTRY(__flush_invalidate_dcache_page)
entry sp, 16 abi_entry_default
___flush_invalidate_dcache_page a2 a3 ___flush_invalidate_dcache_page a2 a3
dsync dsync
retw abi_ret_default
ENDPROC(__flush_invalidate_dcache_page) ENDPROC(__flush_invalidate_dcache_page)
@ -370,12 +370,12 @@ ENDPROC(__flush_invalidate_dcache_page)
ENTRY(__flush_dcache_page) ENTRY(__flush_dcache_page)
entry sp, 16 abi_entry_default
___flush_dcache_page a2 a3 ___flush_dcache_page a2 a3
dsync dsync
retw abi_ret_default
ENDPROC(__flush_dcache_page) ENDPROC(__flush_dcache_page)
@ -385,12 +385,12 @@ ENDPROC(__flush_dcache_page)
ENTRY(__invalidate_icache_range) ENTRY(__invalidate_icache_range)
entry sp, 16 abi_entry_default
___invalidate_icache_range a2 a3 a4 ___invalidate_icache_range a2 a3 a4
isync isync
retw abi_ret_default
ENDPROC(__invalidate_icache_range) ENDPROC(__invalidate_icache_range)
@ -400,12 +400,12 @@ ENDPROC(__invalidate_icache_range)
ENTRY(__flush_invalidate_dcache_range) ENTRY(__flush_invalidate_dcache_range)
entry sp, 16 abi_entry_default
___flush_invalidate_dcache_range a2 a3 a4 ___flush_invalidate_dcache_range a2 a3 a4
dsync dsync
retw abi_ret_default
ENDPROC(__flush_invalidate_dcache_range) ENDPROC(__flush_invalidate_dcache_range)
@ -415,12 +415,12 @@ ENDPROC(__flush_invalidate_dcache_range)
ENTRY(__flush_dcache_range) ENTRY(__flush_dcache_range)
entry sp, 16 abi_entry_default
___flush_dcache_range a2 a3 a4 ___flush_dcache_range a2 a3 a4
dsync dsync
retw abi_ret_default
ENDPROC(__flush_dcache_range) ENDPROC(__flush_dcache_range)
@ -430,11 +430,11 @@ ENDPROC(__flush_dcache_range)
ENTRY(__invalidate_dcache_range) ENTRY(__invalidate_dcache_range)
entry sp, 16 abi_entry_default
___invalidate_dcache_range a2 a3 a4 ___invalidate_dcache_range a2 a3 a4
retw abi_ret_default
ENDPROC(__invalidate_dcache_range) ENDPROC(__invalidate_dcache_range)
@ -444,12 +444,12 @@ ENDPROC(__invalidate_dcache_range)
ENTRY(__invalidate_icache_all) ENTRY(__invalidate_icache_all)
entry sp, 16 abi_entry_default
___invalidate_icache_all a2 a3 ___invalidate_icache_all a2 a3
isync isync
retw abi_ret_default
ENDPROC(__invalidate_icache_all) ENDPROC(__invalidate_icache_all)
@ -459,12 +459,12 @@ ENDPROC(__invalidate_icache_all)
ENTRY(__flush_invalidate_dcache_all) ENTRY(__flush_invalidate_dcache_all)
entry sp, 16 abi_entry_default
___flush_invalidate_dcache_all a2 a3 ___flush_invalidate_dcache_all a2 a3
dsync dsync
retw abi_ret_default
ENDPROC(__flush_invalidate_dcache_all) ENDPROC(__flush_invalidate_dcache_all)
@ -474,11 +474,11 @@ ENDPROC(__flush_invalidate_dcache_all)
ENTRY(__invalidate_dcache_all) ENTRY(__invalidate_dcache_all)
entry sp, 16 abi_entry_default
___invalidate_dcache_all a2 a3 ___invalidate_dcache_all a2 a3
dsync dsync
retw abi_ret_default
ENDPROC(__invalidate_dcache_all) ENDPROC(__invalidate_dcache_all)