[PARISC] Beautify parisc vmlinux.lds.S

Introduce a consistent layout of vmlinux.
The same layout has been introduced for most
architectures.

And the same time move a few label definitions inside
the curly brackets so they are assigned the correct
starting address. Before a ld inserted alignment
would have casued the label to pint before the actual
start of the section.

Signed-off-by: Sam Ravnborg <sam@ravnborg.org>
Signed-off-by: Kyle McMartin <kyle@mcmartin.ca>
This commit is contained in:
Sam Ravnborg 2007-10-18 00:04:25 -07:00 committed by Kyle McMartin
parent e9a03990d9
commit be1b3d8cb1

View file

@ -46,168 +46,219 @@ jiffies = jiffies_64;
#endif #endif
SECTIONS SECTIONS
{ {
. = KERNEL_BINARY_TEXT_START;
. = KERNEL_BINARY_TEXT_START; _text = .; /* Text and read-only data */
.text ALIGN(16) : {
_text = .; /* Text and read-only data */ TEXT_TEXT
.text ALIGN(16) : { SCHED_TEXT
TEXT_TEXT LOCK_TEXT
SCHED_TEXT *(.text.do_softirq)
LOCK_TEXT *(.text.sys_exit)
*(.text.do_softirq) *(.text.do_sigaltstack)
*(.text.sys_exit) *(.text.do_fork)
*(.text.do_sigaltstack) *(.text.*)
*(.text.do_fork) *(.fixup)
*(.text.*) *(.lock.text) /* out-of-line lock text */
*(.fixup) *(.gnu.warning)
*(.lock.text) /* out-of-line lock text */
*(.gnu.warning)
} = 0 } = 0
/* End of text section */
_etext = .;
_etext = .; /* End of text section */ RODATA
BUG_TABLE
RODATA /* writeable */
/* Make sure this is page aligned so
BUG_TABLE * that we can properly leave these
* as writable
/* writeable */ */
. = ALIGN(ASM_PAGE_SIZE); /* Make sure this is page aligned so . = ALIGN(ASM_PAGE_SIZE);
that we can properly leave these data_start = .;
as writable */ . = ALIGN(16);
data_start = .; /* Exception table */
__ex_table : {
. = ALIGN(16); /* Exception table */ __start___ex_table = .;
__start___ex_table = .; *(__ex_table)
__ex_table : { *(__ex_table) } __stop___ex_table = .;
__stop___ex_table = .;
NOTES
__start___unwind = .; /* unwind info */
.PARISC.unwind : { *(.PARISC.unwind) }
__stop___unwind = .;
/* rarely changed data like cpu maps */
. = ALIGN(16);
.data.read_mostly : { *(.data.read_mostly) }
. = ALIGN(L1_CACHE_BYTES);
.data : { /* Data */
DATA_DATA
CONSTRUCTORS
} }
. = ALIGN(L1_CACHE_BYTES); NOTES
.data.cacheline_aligned : { *(.data.cacheline_aligned) }
/* PA-RISC locks requires 16-byte alignment */ /* unwind info */
. = ALIGN(16); .PARISC.unwind : {
.data.lock_aligned : { *(.data.lock_aligned) } __start___unwind = .;
*(.PARISC.unwind)
. = ALIGN(ASM_PAGE_SIZE); __stop___unwind = .;
/* nosave data is really only used for software suspend...it's here
* just in case we ever implement it */
__nosave_begin = .;
.data_nosave : { *(.data.nosave) }
. = ALIGN(ASM_PAGE_SIZE);
__nosave_end = .;
_edata = .; /* End of data section */
__bss_start = .; /* BSS */
/* page table entries need to be PAGE_SIZE aligned */
. = ALIGN(ASM_PAGE_SIZE);
.data.vmpages : {
*(.data.vm0.pmd)
*(.data.vm0.pgd)
*(.data.vm0.pte)
} }
.bss : { *(.bss) *(COMMON) }
__bss_stop = .; /* rarely changed data like cpu maps */
. = ALIGN(16);
.data.read_mostly : {
*(.data.read_mostly)
}
. = ALIGN(L1_CACHE_BYTES);
/* Data */
.data : {
DATA_DATA
CONSTRUCTORS
}
. = ALIGN(L1_CACHE_BYTES);
.data.cacheline_aligned : {
*(.data.cacheline_aligned)
}
/* PA-RISC locks requires 16-byte alignment */
. = ALIGN(16);
.data.lock_aligned : {
*(.data.lock_aligned)
}
/* nosave data is really only used for software suspend...it's here
* just in case we ever implement it
*/
. = ALIGN(ASM_PAGE_SIZE);
__nosave_begin = .;
.data_nosave : {
*(.data.nosave)
}
. = ALIGN(ASM_PAGE_SIZE);
__nosave_end = .;
/* End of data section */
_edata = .;
/* BSS */
__bss_start = .;
/* page table entries need to be PAGE_SIZE aligned */
. = ALIGN(ASM_PAGE_SIZE);
.data.vmpages : {
*(.data.vm0.pmd)
*(.data.vm0.pgd)
*(.data.vm0.pte)
}
.bss : {
*(.bss)
*(COMMON)
}
__bss_stop = .;
/* assembler code expects init_task to be 16k aligned */ /* assembler code expects init_task to be 16k aligned */
. = ALIGN(16384); /* init_task */ . = ALIGN(16384);
.data.init_task : { *(.data.init_task) } /* init_task */
.data.init_task : {
*(.data.init_task)
}
/* The interrupt stack is currently partially coded, but not yet /* The interrupt stack is currently partially coded, but not yet
* implemented */ * implemented
. = ALIGN(16384); */
init_istack : { *(init_istack) } . = ALIGN(16384);
init_istack : {
*(init_istack)
}
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
. = ALIGN(16); /* Linkage tables */ . = ALIGN(16);
.opd : { *(.opd) } PROVIDE (__gp = .); /* Linkage tables */
.plt : { *(.plt) } .opd : {
.dlt : { *(.dlt) } *(.opd)
} PROVIDE (__gp = .);
.plt : {
*(.plt)
}
.dlt : {
*(.dlt)
}
#endif #endif
/* reserve space for interrupt stack by aligning __init* to 16k */ /* reserve space for interrupt stack by aligning __init* to 16k */
. = ALIGN(16384); . = ALIGN(16384);
__init_begin = .; __init_begin = .;
.init.text : { .init.text : {
_sinittext = .; _sinittext = .;
*(.init.text) *(.init.text)
_einittext = .; _einittext = .;
} }
.init.data : { *(.init.data) } .init.data : {
. = ALIGN(16); *(.init.data)
__setup_start = .; }
.init.setup : { *(.init.setup) } . = ALIGN(16);
__setup_end = .; .init.setup : {
__initcall_start = .; __setup_start = .;
.initcall.init : { *(.init.setup)
INITCALLS __setup_end = .;
} }
__initcall_end = .; .initcall.init : {
__con_initcall_start = .; __initcall_start = .;
.con_initcall.init : { *(.con_initcall.init) } INITCALLS
__con_initcall_end = .; __initcall_end = .;
SECURITY_INIT }
/* alternate instruction replacement. This is a mechanism x86 uses .con_initcall.init : {
* to detect the CPU type and replace generic instruction sequences __con_initcall_start = .;
* with CPU specific ones. We don't currently do this in PA, but *(.con_initcall.init)
* it seems like a good idea... */ __con_initcall_end = .;
. = ALIGN(4); }
__alt_instructions = .; SECURITY_INIT
.altinstructions : { *(.altinstructions) }
__alt_instructions_end = .; /* alternate instruction replacement. This is a mechanism x86 uses
.altinstr_replacement : { *(.altinstr_replacement) } * to detect the CPU type and replace generic instruction sequences
/* .exit.text is discard at runtime, not link time, to deal with references * with CPU specific ones. We don't currently do this in PA, but
from .altinstructions and .eh_frame */ * it seems like a good idea...
.exit.text : { *(.exit.text) } */
.exit.data : { *(.exit.data) } . = ALIGN(4);
.altinstructions : {
__alt_instructions = .;
*(.altinstructions)
__alt_instructions_end = .;
}
.altinstr_replacement : {
*(.altinstr_replacement)
}
/* .exit.text is discard at runtime, not link time, to deal with references
* from .altinstructions and .eh_frame
*/
.exit.text : {
*(.exit.text)
}
.exit.data : {
*(.exit.data)
}
#ifdef CONFIG_BLK_DEV_INITRD #ifdef CONFIG_BLK_DEV_INITRD
. = ALIGN(ASM_PAGE_SIZE); . = ALIGN(ASM_PAGE_SIZE);
__initramfs_start = .; .init.ramfs : {
.init.ramfs : { *(.init.ramfs) } __initramfs_start = .;
__initramfs_end = .; *(.init.ramfs)
__initramfs_end = .;
}
#endif #endif
PERCPU(ASM_PAGE_SIZE) PERCPU(ASM_PAGE_SIZE)
. = ALIGN(ASM_PAGE_SIZE);
__init_end = .;
/* freed after init ends here */
_end = . ;
. = ALIGN(ASM_PAGE_SIZE); /* Sections to be discarded */
__init_end = .; /DISCARD/ : {
/* freed after init ends here */ *(.exitcall.exit)
_end = . ;
/* Sections to be discarded */
/DISCARD/ : {
*(.exitcall.exit)
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
/* temporary hack until binutils is fixed to not emit these /* temporary hack until binutils is fixed to not emit these
for static binaries */ * for static binaries
*(.interp) */
*(.dynsym) *(.interp)
*(.dynstr) *(.dynsym)
*(.dynamic) *(.dynstr)
*(.hash) *(.dynamic)
*(.gnu.hash) *(.hash)
*(.gnu.hash)
#endif #endif
} }
STABS_DEBUG STABS_DEBUG
.note 0 : { *(.note) } .note 0 : { *(.note) }
} }