alistair23-linux/arch/x86/lib/copy_page_64.S
Ingo Molnar 131484c8da x86/debug: Remove perpetually broken, unmaintainable dwarf annotations
So the dwarf2 annotations in low level assembly code have
become an increasing hindrance: unreadable, messy macros
mixed into some of the most security sensitive code paths
of the Linux kernel.

These debug info annotations don't even buy the upstream
kernel anything: dwarf driven stack unwinding has caused
problems in the past so it's out of tree, and the upstream
kernel only uses the much more robust framepointers based
stack unwinding method.

In addition to that there's a steady, slow bitrot going
on with these annotations, requiring frequent fixups.
There's no tooling and no functionality upstream that
keeps it correct.

So burn down the sick forest, allowing new, healthier growth:

   27 files changed, 350 insertions(+), 1101 deletions(-)

Someone who has the willingness and time to do this
properly can attempt to reintroduce dwarf debuginfo in x86
assembly code plus dwarf unwinding from first principles,
with the following conditions:

 - it should be maximally readable, and maximally low-key to
   'ordinary' code reading and maintenance.

 - find a build time method to insert dwarf annotations
   automatically in the most common cases, for pop/push
   instructions that manipulate the stack pointer. This could
   be done for example via a preprocessing step that just
   looks for common patterns - plus special annotations for
   the few cases where we want to depart from the default.
   We have hundreds of CFI annotations, so automating most of
   that makes sense.

 - it should come with build tooling checks that ensure that
   CFI annotations are sensible. We've seen such efforts from
   the framepointer side, and there's no reason it couldn't be
   done on the dwarf side.

Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Frédéric Weisbecker <fweisbec@gmail.com
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Jan Beulich <JBeulich@suse.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-06-02 07:57:48 +02:00

87 lines
1.7 KiB
ArmAsm

/* Written 2003 by Andi Kleen, based on a kernel by Evandro Menezes */
#include <linux/linkage.h>
#include <asm/cpufeature.h>
#include <asm/alternative-asm.h>
/*
* Some CPUs run faster using the string copy instructions (sane microcode).
* It is also a lot simpler. Use this when possible. But, don't use streaming
* copy unless the CPU indicates X86_FEATURE_REP_GOOD. Could vary the
* prefetch distance based on SMP/UP.
*/
ALIGN
ENTRY(copy_page)
ALTERNATIVE "jmp copy_page_regs", "", X86_FEATURE_REP_GOOD
movl $4096/8, %ecx
rep movsq
ret
ENDPROC(copy_page)
ENTRY(copy_page_regs)
subq $2*8, %rsp
movq %rbx, (%rsp)
movq %r12, 1*8(%rsp)
movl $(4096/64)-5, %ecx
.p2align 4
.Loop64:
dec %rcx
movq 0x8*0(%rsi), %rax
movq 0x8*1(%rsi), %rbx
movq 0x8*2(%rsi), %rdx
movq 0x8*3(%rsi), %r8
movq 0x8*4(%rsi), %r9
movq 0x8*5(%rsi), %r10
movq 0x8*6(%rsi), %r11
movq 0x8*7(%rsi), %r12
prefetcht0 5*64(%rsi)
movq %rax, 0x8*0(%rdi)
movq %rbx, 0x8*1(%rdi)
movq %rdx, 0x8*2(%rdi)
movq %r8, 0x8*3(%rdi)
movq %r9, 0x8*4(%rdi)
movq %r10, 0x8*5(%rdi)
movq %r11, 0x8*6(%rdi)
movq %r12, 0x8*7(%rdi)
leaq 64 (%rsi), %rsi
leaq 64 (%rdi), %rdi
jnz .Loop64
movl $5, %ecx
.p2align 4
.Loop2:
decl %ecx
movq 0x8*0(%rsi), %rax
movq 0x8*1(%rsi), %rbx
movq 0x8*2(%rsi), %rdx
movq 0x8*3(%rsi), %r8
movq 0x8*4(%rsi), %r9
movq 0x8*5(%rsi), %r10
movq 0x8*6(%rsi), %r11
movq 0x8*7(%rsi), %r12
movq %rax, 0x8*0(%rdi)
movq %rbx, 0x8*1(%rdi)
movq %rdx, 0x8*2(%rdi)
movq %r8, 0x8*3(%rdi)
movq %r9, 0x8*4(%rdi)
movq %r10, 0x8*5(%rdi)
movq %r11, 0x8*6(%rdi)
movq %r12, 0x8*7(%rdi)
leaq 64(%rdi), %rdi
leaq 64(%rsi), %rsi
jnz .Loop2
movq (%rsp), %rbx
movq 1*8(%rsp), %r12
addq $2*8, %rsp
ret
ENDPROC(copy_page_regs)