1
0
Fork 0
alistair23-linux/arch/arm/lib/io-readsw-armv4.S

129 lines
2.2 KiB
ArmAsm
Raw Normal View History

/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/arch/arm/lib/io-readsw-armv4.S
*
* Copyright (C) 1995-2000 Russell King
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
.macro pack, rd, hw1, hw2
#ifndef __ARMEB__
orr \rd, \hw1, \hw2, lsl #16
#else
orr \rd, \hw2, \hw1, lsl #16
#endif
.endm
.Linsw_align: movs ip, r1, lsl #31
bne .Linsw_noalign
ldrh ip, [r0]
sub r2, r2, #1
strh ip, [r1], #2
ENTRY(__raw_readsw)
teq r2, #0
ARM: convert all "mov.* pc, reg" to "bx reg" for ARMv6+ ARMv6 and greater introduced a new instruction ("bx") which can be used to return from function calls. Recent CPUs perform better when the "bx lr" instruction is used rather than the "mov pc, lr" instruction, and this sequence is strongly recommended to be used by the ARM architecture manual (section A.4.1.1). We provide a new macro "ret" with all its variants for the condition code which will resolve to the appropriate instruction. Rather than doing this piecemeal, and miss some instances, change all the "mov pc" instances to use the new macro, with the exception of the "movs" instruction and the kprobes code. This allows us to detect the "mov pc, lr" case and fix it up - and also gives us the possibility of deploying this for other registers depending on the CPU selection. Reported-by: Will Deacon <will.deacon@arm.com> Tested-by: Stephen Warren <swarren@nvidia.com> # Tegra Jetson TK1 Tested-by: Robert Jarzmik <robert.jarzmik@free.fr> # mioa701_bootresume.S Tested-by: Andrew Lunn <andrew@lunn.ch> # Kirkwood Tested-by: Shawn Guo <shawn.guo@freescale.com> Tested-by: Tony Lindgren <tony@atomide.com> # OMAPs Tested-by: Gregory CLEMENT <gregory.clement@free-electrons.com> # Armada XP, 375, 385 Acked-by: Sekhar Nori <nsekhar@ti.com> # DaVinci Acked-by: Christoffer Dall <christoffer.dall@linaro.org> # kvm/hyp Acked-by: Haojian Zhuang <haojian.zhuang@gmail.com> # PXA3xx Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> # Xen Tested-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de> # ARMv7M Tested-by: Simon Horman <horms+renesas@verge.net.au> # Shmobile Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2014-06-30 09:29:12 -06:00
reteq lr
tst r1, #3
bne .Linsw_align
stmfd sp!, {r4, r5, lr}
subs r2, r2, #8
bmi .Lno_insw_8
.Linsw_8_lp: ldrh r3, [r0]
ldrh r4, [r0]
pack r3, r3, r4
ldrh r4, [r0]
ldrh r5, [r0]
pack r4, r4, r5
ldrh r5, [r0]
ldrh ip, [r0]
pack r5, r5, ip
ldrh ip, [r0]
ldrh lr, [r0]
pack ip, ip, lr
subs r2, r2, #8
stmia r1!, {r3 - r5, ip}
bpl .Linsw_8_lp
.Lno_insw_8: tst r2, #4
beq .Lno_insw_4
ldrh r3, [r0]
ldrh r4, [r0]
pack r3, r3, r4
ldrh r4, [r0]
ldrh ip, [r0]
pack r4, r4, ip
stmia r1!, {r3, r4}
.Lno_insw_4: movs r2, r2, lsl #31
bcc .Lno_insw_2
ldrh r3, [r0]
ldrh ip, [r0]
pack r3, r3, ip
str r3, [r1], #4
.Lno_insw_2: ldrhne r3, [r0]
strhne r3, [r1]
ldmfd sp!, {r4, r5, pc}
#ifdef __ARMEB__
#define _BE_ONLY_(code...) code
#define _LE_ONLY_(code...)
#define push_hbyte0 lsr #8
#define pull_hbyte1 lsl #24
#else
#define _BE_ONLY_(code...)
#define _LE_ONLY_(code...) code
#define push_hbyte0 lsl #24
#define pull_hbyte1 lsr #8
#endif
.Linsw_noalign: stmfd sp!, {r4, lr}
ldrbcc ip, [r1, #-1]!
bcc 1f
ldrh ip, [r0]
sub r2, r2, #1
_BE_ONLY_( mov ip, ip, ror #8 )
strb ip, [r1], #1
_LE_ONLY_( mov ip, ip, lsr #8 )
_BE_ONLY_( mov ip, ip, lsr #24 )
1: subs r2, r2, #2
bmi 3f
_BE_ONLY_( mov ip, ip, lsl #24 )
2: ldrh r3, [r0]
ldrh r4, [r0]
subs r2, r2, #2
orr ip, ip, r3, lsl #8
orr ip, ip, r4, push_hbyte0
str ip, [r1], #4
mov ip, r4, pull_hbyte1
bpl 2b
_BE_ONLY_( mov ip, ip, lsr #24 )
3: tst r2, #1
strb ip, [r1], #1
ldrhne ip, [r0]
_BE_ONLY_( movne ip, ip, ror #8 )
strbne ip, [r1], #1
_LE_ONLY_( movne ip, ip, lsr #8 )
_BE_ONLY_( movne ip, ip, lsr #24 )
strbne ip, [r1]
ldmfd sp!, {r4, pc}
ENDPROC(__raw_readsw)