1
0
Fork 0

Merge branch 'kexec/idmap' of git://git.kernel.org/pub/scm/linux/kernel/git/will/linux into devel-stable

hifive-unleashed-5.1
Russell King 2011-12-06 20:27:54 +00:00
commit 3ee0fc5ca1
32 changed files with 144 additions and 93 deletions

View File

@ -0,0 +1,14 @@
#ifndef __ASM_IDMAP_H
#define __ASM_IDMAP_H
#include <linux/compiler.h>
#include <asm/pgtable.h>
/* Tag a function as requiring to be executed via an identity mapping. */
#define __idmap __section(.idmap.text) noinline notrace
extern pgd_t *idmap_pgd;
void setup_mm_for_reboot(void);
#endif /* __ASM_IDMAP_H */

View File

@ -347,9 +347,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define pgtable_cache_init() do { } while (0)
void identity_mapping_add(pgd_t *, unsigned long, unsigned long);
void identity_mapping_del(pgd_t *, unsigned long, unsigned long);
#endif /* !__ASSEMBLY__ */
#endif /* CONFIG_MMU */

View File

@ -170,11 +170,11 @@ __create_page_tables:
* Create identity mapping to cater for __enable_mmu.
* This identity mapping will be removed by paging_init().
*/
adr r0, __enable_mmu_loc
adr r0, __turn_mmu_on_loc
ldmia r0, {r3, r5, r6}
sub r0, r0, r3 @ virt->phys offset
add r5, r5, r0 @ phys __enable_mmu
add r6, r6, r0 @ phys __enable_mmu_end
add r5, r5, r0 @ phys __turn_mmu_on
add r6, r6, r0 @ phys __turn_mmu_on_end
mov r5, r5, lsr #SECTION_SHIFT
mov r6, r6, lsr #SECTION_SHIFT
@ -287,10 +287,10 @@ __create_page_tables:
ENDPROC(__create_page_tables)
.ltorg
.align
__enable_mmu_loc:
__turn_mmu_on_loc:
.long .
.long __enable_mmu
.long __enable_mmu_end
.long __turn_mmu_on
.long __turn_mmu_on_end
#if defined(CONFIG_SMP)
__CPUINIT
@ -398,15 +398,17 @@ ENDPROC(__enable_mmu)
* other registers depend on the function called upon completion
*/
.align 5
__turn_mmu_on:
.pushsection .idmap.text, "ax"
ENTRY(__turn_mmu_on)
mov r0, r0
mcr p15, 0, r0, c1, c0, 0 @ write control reg
mrc p15, 0, r3, c0, c0, 0 @ read id reg
mov r3, r3
mov r3, r13
mov pc, r3
__enable_mmu_end:
__turn_mmu_on_end:
ENDPROC(__turn_mmu_on)
.popsection
#ifdef CONFIG_SMP_ON_UP

View File

@ -54,6 +54,7 @@ ENDPROC(cpu_suspend_abort)
* r0 = control register value
*/
.align 5
.pushsection .idmap.text,"ax"
ENTRY(cpu_resume_mmu)
ldr r3, =cpu_resume_after_mmu
mcr p15, 0, r0, c1, c0, 0 @ turn on MMU, I-cache, etc
@ -62,6 +63,7 @@ ENTRY(cpu_resume_mmu)
mov r0, r0
mov pc, r3 @ jump to virtual address
ENDPROC(cpu_resume_mmu)
.popsection
cpu_resume_after_mmu:
bl cpu_init @ restore the und/abt/irq banked regs
mov r0, #0 @ return zero on success

View File

@ -31,6 +31,7 @@
#include <asm/cpu.h>
#include <asm/cputype.h>
#include <asm/exception.h>
#include <asm/idmap.h>
#include <asm/topology.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
@ -61,7 +62,6 @@ int __cpuinit __cpu_up(unsigned int cpu)
{
struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu);
struct task_struct *idle = ci->idle;
pgd_t *pgd;
int ret;
/*
@ -83,30 +83,12 @@ int __cpuinit __cpu_up(unsigned int cpu)
init_idle(idle, cpu);
}
/*
* Allocate initial page tables to allow the new CPU to
* enable the MMU safely. This essentially means a set
* of our "standard" page tables, with the addition of
* a 1:1 mapping for the physical address of the kernel.
*/
pgd = pgd_alloc(&init_mm);
if (!pgd)
return -ENOMEM;
if (PHYS_OFFSET != PAGE_OFFSET) {
#ifndef CONFIG_HOTPLUG_CPU
identity_mapping_add(pgd, __pa(__init_begin), __pa(__init_end));
#endif
identity_mapping_add(pgd, __pa(_stext), __pa(_etext));
identity_mapping_add(pgd, __pa(_sdata), __pa(_edata));
}
/*
* We need to tell the secondary core where to find
* its stack and the page tables.
*/
secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
secondary_data.pgdir = virt_to_phys(pgd);
secondary_data.pgdir = virt_to_phys(idmap_pgd);
secondary_data.swapper_pg_dir = virt_to_phys(swapper_pg_dir);
__cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data));
outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1));
@ -142,16 +124,6 @@ int __cpuinit __cpu_up(unsigned int cpu)
secondary_data.stack = NULL;
secondary_data.pgdir = 0;
if (PHYS_OFFSET != PAGE_OFFSET) {
#ifndef CONFIG_HOTPLUG_CPU
identity_mapping_del(pgd, __pa(__init_begin), __pa(__init_end));
#endif
identity_mapping_del(pgd, __pa(_stext), __pa(_etext));
identity_mapping_del(pgd, __pa(_sdata), __pa(_edata));
}
pgd_free(&init_mm, pgd);
return ret;
}

View File

@ -1,13 +1,12 @@
#include <linux/init.h>
#include <asm/idmap.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/memory.h>
#include <asm/suspend.h>
#include <asm/tlbflush.h>
static pgd_t *suspend_pgd;
extern int __cpu_suspend(unsigned long, int (*)(unsigned long));
extern void cpu_resume_mmu(void);
@ -21,7 +20,7 @@ void __cpu_suspend_save(u32 *ptr, u32 ptrsz, u32 sp, u32 *save_ptr)
*save_ptr = virt_to_phys(ptr);
/* This must correspond to the LDM in cpu_resume() assembly */
*ptr++ = virt_to_phys(suspend_pgd);
*ptr++ = virt_to_phys(idmap_pgd);
*ptr++ = sp;
*ptr++ = virt_to_phys(cpu_do_resume);
@ -42,7 +41,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
struct mm_struct *mm = current->active_mm;
int ret;
if (!suspend_pgd)
if (!idmap_pgd)
return -EINVAL;
/*
@ -59,14 +58,3 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
return ret;
}
static int __init cpu_suspend_init(void)
{
suspend_pgd = pgd_alloc(&init_mm);
if (suspend_pgd) {
unsigned long addr = virt_to_phys(cpu_resume_mmu);
identity_mapping_add(suspend_pgd, addr, addr + SECTION_SIZE);
}
return suspend_pgd ? 0 : -ENOMEM;
}
core_initcall(cpu_suspend_init);

View File

@ -13,6 +13,12 @@
*(.proc.info.init) \
VMLINUX_SYMBOL(__proc_info_end) = .;
#define IDMAP_TEXT \
ALIGN_FUNCTION(); \
VMLINUX_SYMBOL(__idmap_text_start) = .; \
*(.idmap.text) \
VMLINUX_SYMBOL(__idmap_text_end) = .;
#ifdef CONFIG_HOTPLUG_CPU
#define ARM_CPU_DISCARD(x)
#define ARM_CPU_KEEP(x) x
@ -92,6 +98,7 @@ SECTIONS
SCHED_TEXT
LOCK_TEXT
KPROBES_TEXT
IDMAP_TEXT
#ifdef CONFIG_MMU
*(.fixup)
#endif

View File

@ -1,8 +1,12 @@
#include <linux/kernel.h>
#include <asm/cputype.h>
#include <asm/idmap.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/sections.h>
pgd_t *idmap_pgd;
static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
unsigned long prot)
@ -28,7 +32,7 @@ static void idmap_add_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
} while (pud++, addr = next, addr != end);
}
void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end)
static void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end)
{
unsigned long prot, next;
@ -43,48 +47,41 @@ void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end)
} while (pgd++, addr = next, addr != end);
}
#ifdef CONFIG_SMP
static void idmap_del_pmd(pud_t *pud, unsigned long addr, unsigned long end)
extern char __idmap_text_start[], __idmap_text_end[];
static int __init init_static_idmap(void)
{
pmd_t *pmd = pmd_offset(pud, addr);
pmd_clear(pmd);
phys_addr_t idmap_start, idmap_end;
idmap_pgd = pgd_alloc(&init_mm);
if (!idmap_pgd)
return -ENOMEM;
/* Add an identity mapping for the physical address of the section. */
idmap_start = virt_to_phys((void *)__idmap_text_start);
idmap_end = virt_to_phys((void *)__idmap_text_end);
pr_info("Setting up static identity map for 0x%llx - 0x%llx\n",
(long long)idmap_start, (long long)idmap_end);
identity_mapping_add(idmap_pgd, idmap_start, idmap_end);
return 0;
}
static void idmap_del_pud(pgd_t *pgd, unsigned long addr, unsigned long end)
{
pud_t *pud = pud_offset(pgd, addr);
unsigned long next;
do {
next = pud_addr_end(addr, end);
idmap_del_pmd(pud, addr, next);
} while (pud++, addr = next, addr != end);
}
void identity_mapping_del(pgd_t *pgd, unsigned long addr, unsigned long end)
{
unsigned long next;
pgd += pgd_index(addr);
do {
next = pgd_addr_end(addr, end);
idmap_del_pud(pgd, addr, next);
} while (pgd++, addr = next, addr != end);
}
#endif
early_initcall(init_static_idmap);
/*
* In order to soft-boot, we need to insert a 1:1 mapping in place of
* the user-mode pages. This will then ensure that we have predictable
* results when turning the mmu off
* In order to soft-boot, we need to switch to a 1:1 mapping for the
* cpu_reset functions. This will then ensure that we have predictable
* results when turning off the mmu.
*/
void setup_mm_for_reboot(void)
{
/*
* We need to access to user-mode page tables here. For kernel threads
* we don't have any user-mode mappings so we use the context that we
* "borrowed".
*/
identity_mapping_add(current->active_mm->pgd, 0, TASK_SIZE);
/* Clean and invalidate L1. */
flush_cache_all();
/* Switch to the identity mapping. */
cpu_switch_mm(idmap_pgd, &init_mm);
/* Flush the TLB. */
local_flush_tlb_all();
}

View File

@ -95,6 +95,7 @@ ENTRY(cpu_arm1020_proc_fin)
* loc: location to jump to for soft reset
*/
.align 5
.pushsection .idmap.text, "ax"
ENTRY(cpu_arm1020_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
@ -107,6 +108,8 @@ ENTRY(cpu_arm1020_reset)
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0
ENDPROC(cpu_arm1020_reset)
.popsection
/*
* cpu_arm1020_do_idle()

View File

@ -95,6 +95,7 @@ ENTRY(cpu_arm1020e_proc_fin)
* loc: location to jump to for soft reset
*/
.align 5
.pushsection .idmap.text, "ax"
ENTRY(cpu_arm1020e_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
@ -107,6 +108,8 @@ ENTRY(cpu_arm1020e_reset)
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0
ENDPROC(cpu_arm1020e_reset)
.popsection
/*
* cpu_arm1020e_do_idle()

View File

@ -84,6 +84,7 @@ ENTRY(cpu_arm1022_proc_fin)
* loc: location to jump to for soft reset
*/
.align 5
.pushsection .idmap.text, "ax"
ENTRY(cpu_arm1022_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
@ -96,6 +97,8 @@ ENTRY(cpu_arm1022_reset)
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0
ENDPROC(cpu_arm1022_reset)
.popsection
/*
* cpu_arm1022_do_idle()

View File

@ -84,6 +84,7 @@ ENTRY(cpu_arm1026_proc_fin)
* loc: location to jump to for soft reset
*/
.align 5
.pushsection .idmap.text, "ax"
ENTRY(cpu_arm1026_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
@ -96,6 +97,8 @@ ENTRY(cpu_arm1026_reset)
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0
ENDPROC(cpu_arm1026_reset)
.popsection
/*
* cpu_arm1026_do_idle()

View File

@ -225,6 +225,7 @@ ENTRY(cpu_arm7_set_pte_ext)
* Params : r0 = address to jump to
* Notes : This sets up everything for a reset
*/
.pushsection .idmap.text, "ax"
ENTRY(cpu_arm6_reset)
ENTRY(cpu_arm7_reset)
mov r1, #0
@ -235,6 +236,9 @@ ENTRY(cpu_arm7_reset)
mov r1, #0x30
mcr p15, 0, r1, c1, c0, 0 @ turn off MMU etc
mov pc, r0
ENDPROC(cpu_arm6_reset)
ENDPROC(cpu_arm7_reset)
.popsection
__CPUINIT

View File

@ -101,6 +101,7 @@ ENTRY(cpu_arm720_set_pte_ext)
* Params : r0 = address to jump to
* Notes : This sets up everything for a reset
*/
.pushsection .idmap.text, "ax"
ENTRY(cpu_arm720_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate cache
@ -112,6 +113,8 @@ ENTRY(cpu_arm720_reset)
bic ip, ip, #0x2100 @ ..v....s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0
ENDPROC(cpu_arm720_reset)
.popsection
__CPUINIT

View File

@ -49,6 +49,7 @@ ENTRY(cpu_arm740_proc_fin)
* Params : r0 = address to jump to
* Notes : This sets up everything for a reset
*/
.pushsection .idmap.text, "ax"
ENTRY(cpu_arm740_reset)
mov ip, #0
mcr p15, 0, ip, c7, c0, 0 @ invalidate cache
@ -56,6 +57,8 @@ ENTRY(cpu_arm740_reset)
bic ip, ip, #0x0000000c @ ............wc..
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0
ENDPROC(cpu_arm740_reset)
.popsection
__CPUINIT

View File

@ -45,8 +45,11 @@ ENTRY(cpu_arm7tdmi_proc_fin)
* Params : loc(r0) address to jump to
* Purpose : Sets up everything for a reset and jump to the location for soft reset.
*/
.pushsection .idmap.text, "ax"
ENTRY(cpu_arm7tdmi_reset)
mov pc, r0
ENDPROC(cpu_arm7tdmi_reset)
.popsection
__CPUINIT

View File

@ -85,6 +85,7 @@ ENTRY(cpu_arm920_proc_fin)
* loc: location to jump to for soft reset
*/
.align 5
.pushsection .idmap.text, "ax"
ENTRY(cpu_arm920_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
@ -97,6 +98,8 @@ ENTRY(cpu_arm920_reset)
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0
ENDPROC(cpu_arm920_reset)
.popsection
/*
* cpu_arm920_do_idle()

View File

@ -87,6 +87,7 @@ ENTRY(cpu_arm922_proc_fin)
* loc: location to jump to for soft reset
*/
.align 5
.pushsection .idmap.text, "ax"
ENTRY(cpu_arm922_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
@ -99,6 +100,8 @@ ENTRY(cpu_arm922_reset)
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0
ENDPROC(cpu_arm922_reset)
.popsection
/*
* cpu_arm922_do_idle()

View File

@ -108,6 +108,7 @@ ENTRY(cpu_arm925_proc_fin)
* loc: location to jump to for soft reset
*/
.align 5
.pushsection .idmap.text, "ax"
ENTRY(cpu_arm925_reset)
/* Send software reset to MPU and DSP */
mov ip, #0xff000000
@ -115,6 +116,8 @@ ENTRY(cpu_arm925_reset)
orr ip, ip, #0x0000ce00
mov r4, #1
strh r4, [ip, #0x10]
ENDPROC(cpu_arm925_reset)
.popsection
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches

View File

@ -77,6 +77,7 @@ ENTRY(cpu_arm926_proc_fin)
* loc: location to jump to for soft reset
*/
.align 5
.pushsection .idmap.text, "ax"
ENTRY(cpu_arm926_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
@ -89,6 +90,8 @@ ENTRY(cpu_arm926_reset)
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0
ENDPROC(cpu_arm926_reset)
.popsection
/*
* cpu_arm926_do_idle()

View File

@ -48,6 +48,7 @@ ENTRY(cpu_arm940_proc_fin)
* Params : r0 = address to jump to
* Notes : This sets up everything for a reset
*/
.pushsection .idmap.text, "ax"
ENTRY(cpu_arm940_reset)
mov ip, #0
mcr p15, 0, ip, c7, c5, 0 @ flush I cache
@ -58,6 +59,8 @@ ENTRY(cpu_arm940_reset)
bic ip, ip, #0x00001000 @ i-cache
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0
ENDPROC(cpu_arm940_reset)
.popsection
/*
* cpu_arm940_do_idle()

View File

@ -55,6 +55,7 @@ ENTRY(cpu_arm946_proc_fin)
* Params : r0 = address to jump to
* Notes : This sets up everything for a reset
*/
.pushsection .idmap.text, "ax"
ENTRY(cpu_arm946_reset)
mov ip, #0
mcr p15, 0, ip, c7, c5, 0 @ flush I cache
@ -65,6 +66,8 @@ ENTRY(cpu_arm946_reset)
bic ip, ip, #0x00001000 @ i-cache
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0
ENDPROC(cpu_arm946_reset)
.popsection
/*
* cpu_arm946_do_idle()

View File

@ -45,8 +45,11 @@ ENTRY(cpu_arm9tdmi_proc_fin)
* Params : loc(r0) address to jump to
* Purpose : Sets up everything for a reset and jump to the location for soft reset.
*/
.pushsection .idmap.text, "ax"
ENTRY(cpu_arm9tdmi_reset)
mov pc, r0
ENDPROC(cpu_arm9tdmi_reset)
.popsection
__CPUINIT

View File

@ -57,6 +57,7 @@ ENTRY(cpu_fa526_proc_fin)
* loc: location to jump to for soft reset
*/
.align 4
.pushsection .idmap.text, "ax"
ENTRY(cpu_fa526_reset)
/* TODO: Use CP8 if possible... */
mov ip, #0
@ -73,6 +74,8 @@ ENTRY(cpu_fa526_reset)
nop
nop
mov pc, r0
ENDPROC(cpu_fa526_reset)
.popsection
/*
* cpu_fa526_do_idle()

View File

@ -98,6 +98,7 @@ ENTRY(cpu_feroceon_proc_fin)
* loc: location to jump to for soft reset
*/
.align 5
.pushsection .idmap.text, "ax"
ENTRY(cpu_feroceon_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
@ -110,6 +111,8 @@ ENTRY(cpu_feroceon_reset)
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0
ENDPROC(cpu_feroceon_reset)
.popsection
/*
* cpu_feroceon_do_idle()

View File

@ -69,6 +69,7 @@ ENTRY(cpu_mohawk_proc_fin)
* (same as arm926)
*/
.align 5
.pushsection .idmap.text, "ax"
ENTRY(cpu_mohawk_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
@ -79,6 +80,8 @@ ENTRY(cpu_mohawk_reset)
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0
ENDPROC(cpu_mohawk_reset)
.popsection
/*
* cpu_mohawk_do_idle()

View File

@ -62,6 +62,7 @@ ENTRY(cpu_sa110_proc_fin)
* loc: location to jump to for soft reset
*/
.align 5
.pushsection .idmap.text, "ax"
ENTRY(cpu_sa110_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
@ -74,6 +75,8 @@ ENTRY(cpu_sa110_reset)
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0
ENDPROC(cpu_sa110_reset)
.popsection
/*
* cpu_sa110_do_idle(type)

View File

@ -70,6 +70,7 @@ ENTRY(cpu_sa1100_proc_fin)
* loc: location to jump to for soft reset
*/
.align 5
.pushsection .idmap.text, "ax"
ENTRY(cpu_sa1100_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
@ -82,6 +83,8 @@ ENTRY(cpu_sa1100_reset)
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0
ENDPROC(cpu_sa1100_reset)
.popsection
/*
* cpu_sa1100_do_idle(type)

View File

@ -55,6 +55,7 @@ ENTRY(cpu_v6_proc_fin)
* - loc - location to jump to for soft reset
*/
.align 5
.pushsection .idmap.text, "ax"
ENTRY(cpu_v6_reset)
mrc p15, 0, r1, c1, c0, 0 @ ctrl register
bic r1, r1, #0x1 @ ...............m
@ -62,6 +63,8 @@ ENTRY(cpu_v6_reset)
mov r1, #0
mcr p15, 0, r1, c7, c5, 4 @ ISB
mov pc, r0
ENDPROC(cpu_v6_reset)
.popsection
/*
* cpu_v6_do_idle()

View File

@ -63,6 +63,7 @@ ENDPROC(cpu_v7_proc_fin)
* caches disabled.
*/
.align 5
.pushsection .idmap.text, "ax"
ENTRY(cpu_v7_reset)
mrc p15, 0, r1, c1, c0, 0 @ ctrl register
bic r1, r1, #0x1 @ ...............m
@ -71,6 +72,7 @@ ENTRY(cpu_v7_reset)
isb
mov pc, r0
ENDPROC(cpu_v7_reset)
.popsection
/*
* cpu_v7_do_idle()

View File

@ -105,6 +105,7 @@ ENTRY(cpu_xsc3_proc_fin)
* loc: location to jump to for soft reset
*/
.align 5
.pushsection .idmap.text, "ax"
ENTRY(cpu_xsc3_reset)
mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE
msr cpsr_c, r1 @ reset CPSR
@ -119,6 +120,8 @@ ENTRY(cpu_xsc3_reset)
@ already containing those two last instructions to survive.
mcr p15, 0, ip, c8, c7, 0 @ invalidate I and D TLBs
mov pc, r0
ENDPROC(cpu_xsc3_reset)
.popsection
/*
* cpu_xsc3_do_idle()

View File

@ -142,6 +142,7 @@ ENTRY(cpu_xscale_proc_fin)
* Beware PXA270 erratum E7.
*/
.align 5
.pushsection .idmap.text, "ax"
ENTRY(cpu_xscale_reset)
mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE
msr cpsr_c, r1 @ reset CPSR
@ -160,6 +161,8 @@ ENTRY(cpu_xscale_reset)
@ already containing those two last instructions to survive.
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
mov pc, r0
ENDPROC(cpu_xscale_reset)
.popsection
/*
* cpu_xscale_do_idle()