x86/asm/32: Add ENDs to some functions and relabel with SYM_CODE_*
All these are functions which are invoked from elsewhere but they are not typical C functions. So annotate them using the new SYM_CODE_START. All these were not balanced with any END, so mark their ends by SYM_CODE_END, appropriately. Signed-off-by: Jiri Slaby <jslaby@suse.cz> Signed-off-by: Borislav Petkov <bp@suse.de> Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> [xen bits] Reviewed-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com> [hibernate] Cc: Andy Lutomirski <luto@kernel.org> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Juergen Gross <jgross@suse.com> Cc: Len Brown <len.brown@intel.com> Cc: linux-arch@vger.kernel.org Cc: linux-pm@vger.kernel.org Cc: Pavel Machek <pavel@ucw.cz> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Pingfan Liu <kernelfans@gmail.com> Cc: Stefano Stabellini <sstabellini@kernel.org> Cc: "Steven Rostedt (VMware)" <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: x86-ml <x86@kernel.org> Cc: xen-devel@lists.xenproject.org Link: https://lkml.kernel.org/r/20191011115108.12392-26-jslaby@suse.czalistair/sunxi64-5.5-dsi
parent
6dcc5627f6
commit
78762b0e79
|
@ -847,9 +847,10 @@ SYM_ENTRY(__begin_SYSENTER_singlestep_region, SYM_L_GLOBAL, SYM_A_NONE)
|
||||||
* Xen doesn't set %esp to be precisely what the normal SYSENTER
|
* Xen doesn't set %esp to be precisely what the normal SYSENTER
|
||||||
* entry point expects, so fix it up before using the normal path.
|
* entry point expects, so fix it up before using the normal path.
|
||||||
*/
|
*/
|
||||||
ENTRY(xen_sysenter_target)
|
SYM_CODE_START(xen_sysenter_target)
|
||||||
addl $5*4, %esp /* remove xen-provided frame */
|
addl $5*4, %esp /* remove xen-provided frame */
|
||||||
jmp .Lsysenter_past_esp
|
jmp .Lsysenter_past_esp
|
||||||
|
SYM_CODE_END(xen_sysenter_target)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -9,8 +9,7 @@
|
||||||
.code32
|
.code32
|
||||||
ALIGN
|
ALIGN
|
||||||
|
|
||||||
ENTRY(wakeup_pmode_return)
|
SYM_CODE_START(wakeup_pmode_return)
|
||||||
wakeup_pmode_return:
|
|
||||||
movw $__KERNEL_DS, %ax
|
movw $__KERNEL_DS, %ax
|
||||||
movw %ax, %ss
|
movw %ax, %ss
|
||||||
movw %ax, %fs
|
movw %ax, %fs
|
||||||
|
@ -39,6 +38,7 @@ wakeup_pmode_return:
|
||||||
# jump to place where we left off
|
# jump to place where we left off
|
||||||
movl saved_eip, %eax
|
movl saved_eip, %eax
|
||||||
jmp *%eax
|
jmp *%eax
|
||||||
|
SYM_CODE_END(wakeup_pmode_return)
|
||||||
|
|
||||||
bogus_magic:
|
bogus_magic:
|
||||||
jmp bogus_magic
|
jmp bogus_magic
|
||||||
|
@ -72,7 +72,7 @@ restore_registers:
|
||||||
popfl
|
popfl
|
||||||
ret
|
ret
|
||||||
|
|
||||||
ENTRY(do_suspend_lowlevel)
|
SYM_CODE_START(do_suspend_lowlevel)
|
||||||
call save_processor_state
|
call save_processor_state
|
||||||
call save_registers
|
call save_registers
|
||||||
pushl $3
|
pushl $3
|
||||||
|
@ -87,6 +87,7 @@ ret_point:
|
||||||
call restore_registers
|
call restore_registers
|
||||||
call restore_processor_state
|
call restore_processor_state
|
||||||
ret
|
ret
|
||||||
|
SYM_CODE_END(do_suspend_lowlevel)
|
||||||
|
|
||||||
.data
|
.data
|
||||||
ALIGN
|
ALIGN
|
||||||
|
|
|
@ -89,7 +89,7 @@ WEAK(ftrace_stub)
|
||||||
ret
|
ret
|
||||||
END(ftrace_caller)
|
END(ftrace_caller)
|
||||||
|
|
||||||
ENTRY(ftrace_regs_caller)
|
SYM_CODE_START(ftrace_regs_caller)
|
||||||
/*
|
/*
|
||||||
* We're here from an mcount/fentry CALL, and the stack frame looks like:
|
* We're here from an mcount/fentry CALL, and the stack frame looks like:
|
||||||
*
|
*
|
||||||
|
@ -163,6 +163,7 @@ SYM_INNER_LABEL(ftrace_regs_call, SYM_L_GLOBAL)
|
||||||
popl %eax
|
popl %eax
|
||||||
|
|
||||||
jmp .Lftrace_ret
|
jmp .Lftrace_ret
|
||||||
|
SYM_CODE_END(ftrace_regs_caller)
|
||||||
|
|
||||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||||
ENTRY(ftrace_graph_caller)
|
ENTRY(ftrace_graph_caller)
|
||||||
|
|
|
@ -64,7 +64,7 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
|
||||||
* can.
|
* can.
|
||||||
*/
|
*/
|
||||||
__HEAD
|
__HEAD
|
||||||
ENTRY(startup_32)
|
SYM_CODE_START(startup_32)
|
||||||
movl pa(initial_stack),%ecx
|
movl pa(initial_stack),%ecx
|
||||||
|
|
||||||
/* test KEEP_SEGMENTS flag to see if the bootloader is asking
|
/* test KEEP_SEGMENTS flag to see if the bootloader is asking
|
||||||
|
@ -172,6 +172,7 @@ num_subarch_entries = (. - subarch_entries) / 4
|
||||||
#else
|
#else
|
||||||
jmp .Ldefault_entry
|
jmp .Ldefault_entry
|
||||||
#endif /* CONFIG_PARAVIRT */
|
#endif /* CONFIG_PARAVIRT */
|
||||||
|
SYM_CODE_END(startup_32)
|
||||||
|
|
||||||
#ifdef CONFIG_HOTPLUG_CPU
|
#ifdef CONFIG_HOTPLUG_CPU
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -35,7 +35,7 @@ ENTRY(swsusp_arch_suspend)
|
||||||
ret
|
ret
|
||||||
ENDPROC(swsusp_arch_suspend)
|
ENDPROC(swsusp_arch_suspend)
|
||||||
|
|
||||||
ENTRY(restore_image)
|
SYM_CODE_START(restore_image)
|
||||||
/* prepare to jump to the image kernel */
|
/* prepare to jump to the image kernel */
|
||||||
movl restore_jump_address, %ebx
|
movl restore_jump_address, %ebx
|
||||||
movl restore_cr3, %ebp
|
movl restore_cr3, %ebp
|
||||||
|
@ -45,9 +45,10 @@ ENTRY(restore_image)
|
||||||
/* jump to relocated restore code */
|
/* jump to relocated restore code */
|
||||||
movl relocated_restore_code, %eax
|
movl relocated_restore_code, %eax
|
||||||
jmpl *%eax
|
jmpl *%eax
|
||||||
|
SYM_CODE_END(restore_image)
|
||||||
|
|
||||||
/* code below has been relocated to a safe page */
|
/* code below has been relocated to a safe page */
|
||||||
ENTRY(core_restore_code)
|
SYM_CODE_START(core_restore_code)
|
||||||
movl temp_pgt, %eax
|
movl temp_pgt, %eax
|
||||||
movl %eax, %cr3
|
movl %eax, %cr3
|
||||||
|
|
||||||
|
@ -77,6 +78,7 @@ copy_loop:
|
||||||
|
|
||||||
done:
|
done:
|
||||||
jmpl *%ebx
|
jmpl *%ebx
|
||||||
|
SYM_CODE_END(core_restore_code)
|
||||||
|
|
||||||
/* code below belongs to the image kernel */
|
/* code below belongs to the image kernel */
|
||||||
.align PAGE_SIZE
|
.align PAGE_SIZE
|
||||||
|
|
|
@ -29,7 +29,7 @@
|
||||||
.code16
|
.code16
|
||||||
|
|
||||||
.balign PAGE_SIZE
|
.balign PAGE_SIZE
|
||||||
ENTRY(trampoline_start)
|
SYM_CODE_START(trampoline_start)
|
||||||
wbinvd # Needed for NUMA-Q should be harmless for others
|
wbinvd # Needed for NUMA-Q should be harmless for others
|
||||||
|
|
||||||
LJMPW_RM(1f)
|
LJMPW_RM(1f)
|
||||||
|
@ -54,11 +54,13 @@ ENTRY(trampoline_start)
|
||||||
lmsw %dx # into protected mode
|
lmsw %dx # into protected mode
|
||||||
|
|
||||||
ljmpl $__BOOT_CS, $pa_startup_32
|
ljmpl $__BOOT_CS, $pa_startup_32
|
||||||
|
SYM_CODE_END(trampoline_start)
|
||||||
|
|
||||||
.section ".text32","ax"
|
.section ".text32","ax"
|
||||||
.code32
|
.code32
|
||||||
ENTRY(startup_32) # note: also used from wakeup_asm.S
|
SYM_CODE_START(startup_32) # note: also used from wakeup_asm.S
|
||||||
jmp *%eax
|
jmp *%eax
|
||||||
|
SYM_CODE_END(startup_32)
|
||||||
|
|
||||||
.bss
|
.bss
|
||||||
.balign 8
|
.balign 8
|
||||||
|
|
|
@ -56,7 +56,7 @@
|
||||||
_ASM_EXTABLE(1b,2b)
|
_ASM_EXTABLE(1b,2b)
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
ENTRY(xen_iret)
|
SYM_CODE_START(xen_iret)
|
||||||
/* test eflags for special cases */
|
/* test eflags for special cases */
|
||||||
testl $(X86_EFLAGS_VM | XEN_EFLAGS_NMI), 8(%esp)
|
testl $(X86_EFLAGS_VM | XEN_EFLAGS_NMI), 8(%esp)
|
||||||
jnz hyper_iret
|
jnz hyper_iret
|
||||||
|
@ -122,6 +122,7 @@ xen_iret_end_crit:
|
||||||
hyper_iret:
|
hyper_iret:
|
||||||
/* put this out of line since its very rarely used */
|
/* put this out of line since its very rarely used */
|
||||||
jmp hypercall_page + __HYPERVISOR_iret * 32
|
jmp hypercall_page + __HYPERVISOR_iret * 32
|
||||||
|
SYM_CODE_END(xen_iret)
|
||||||
|
|
||||||
.globl xen_iret_start_crit, xen_iret_end_crit
|
.globl xen_iret_start_crit, xen_iret_end_crit
|
||||||
|
|
||||||
|
@ -165,7 +166,7 @@ hyper_iret:
|
||||||
* SAVE_ALL state before going on, since it's usermode state which we
|
* SAVE_ALL state before going on, since it's usermode state which we
|
||||||
* eventually need to restore.
|
* eventually need to restore.
|
||||||
*/
|
*/
|
||||||
ENTRY(xen_iret_crit_fixup)
|
SYM_CODE_START(xen_iret_crit_fixup)
|
||||||
/*
|
/*
|
||||||
* Paranoia: Make sure we're really coming from kernel space.
|
* Paranoia: Make sure we're really coming from kernel space.
|
||||||
* One could imagine a case where userspace jumps into the
|
* One could imagine a case where userspace jumps into the
|
||||||
|
@ -204,4 +205,4 @@ ENTRY(xen_iret_crit_fixup)
|
||||||
|
|
||||||
lea 4(%edi), %esp /* point esp to new frame */
|
lea 4(%edi), %esp /* point esp to new frame */
|
||||||
2: jmp xen_do_upcall
|
2: jmp xen_do_upcall
|
||||||
|
SYM_CODE_END(xen_iret_crit_fixup)
|
||||||
|
|
Loading…
Reference in New Issue