alistair23-linux/arch/arm/mm/idmap.c
Will Deacon 8903826d0c ARM: idmap: populate identity map pgd at init time using .init.text
When disabling and re-enabling the MMU, it is necessary to take out an
identity mapping for the code that manipulates the SCTLR in order to
avoid it disappearing from under our feet. This is useful when soft
rebooting and returning from CPU suspend.

This patch allocates a set of page tables during boot and populates them
with an identity mapping for the .idmap.text section. This means that
users of the identity map do not need to manage their own pgd and can
instead annotate their functions with __idmap or, in the case of assembly
code, place them in the correct section.

Acked-by: Dave Martin <dave.martin@linaro.org>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Tested-by: Lorenzo Pieralisi <Lorenzo.Pieralisi@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2011-12-06 14:04:14 +00:00

117 lines
2.8 KiB
C

#include <linux/kernel.h>
#include <asm/cputype.h>
#include <asm/idmap.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/sections.h>
pgd_t *idmap_pgd;
static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
unsigned long prot)
{
pmd_t *pmd = pmd_offset(pud, addr);
addr = (addr & PMD_MASK) | prot;
pmd[0] = __pmd(addr);
addr += SECTION_SIZE;
pmd[1] = __pmd(addr);
flush_pmd_entry(pmd);
}
static void idmap_add_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
unsigned long prot)
{
pud_t *pud = pud_offset(pgd, addr);
unsigned long next;
do {
next = pud_addr_end(addr, end);
idmap_add_pmd(pud, addr, next, prot);
} while (pud++, addr = next, addr != end);
}
void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end)
{
unsigned long prot, next;
prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE;
if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
prot |= PMD_BIT4;
pgd += pgd_index(addr);
do {
next = pgd_addr_end(addr, end);
idmap_add_pud(pgd, addr, next, prot);
} while (pgd++, addr = next, addr != end);
}
#ifdef CONFIG_SMP
static void idmap_del_pmd(pud_t *pud, unsigned long addr, unsigned long end)
{
pmd_t *pmd = pmd_offset(pud, addr);
pmd_clear(pmd);
}
static void idmap_del_pud(pgd_t *pgd, unsigned long addr, unsigned long end)
{
pud_t *pud = pud_offset(pgd, addr);
unsigned long next;
do {
next = pud_addr_end(addr, end);
idmap_del_pmd(pud, addr, next);
} while (pud++, addr = next, addr != end);
}
void identity_mapping_del(pgd_t *pgd, unsigned long addr, unsigned long end)
{
unsigned long next;
pgd += pgd_index(addr);
do {
next = pgd_addr_end(addr, end);
idmap_del_pud(pgd, addr, next);
} while (pgd++, addr = next, addr != end);
}
#endif
extern char __idmap_text_start[], __idmap_text_end[];
static int __init init_static_idmap(void)
{
phys_addr_t idmap_start, idmap_end;
idmap_pgd = pgd_alloc(&init_mm);
if (!idmap_pgd)
return -ENOMEM;
/* Add an identity mapping for the physical address of the section. */
idmap_start = virt_to_phys((void *)__idmap_text_start);
idmap_end = virt_to_phys((void *)__idmap_text_end);
pr_info("Setting up static identity map for 0x%llx - 0x%llx\n",
(long long)idmap_start, (long long)idmap_end);
identity_mapping_add(idmap_pgd, idmap_start, idmap_end);
return 0;
}
arch_initcall(init_static_idmap);
/*
* In order to soft-boot, we need to insert a 1:1 mapping in place of
* the user-mode pages. This will then ensure that we have predictable
* results when turning the mmu off
*/
void setup_mm_for_reboot(void)
{
/*
* We need to access to user-mode page tables here. For kernel threads
* we don't have any user-mode mappings so we use the context that we
* "borrowed".
*/
identity_mapping_add(current->active_mm->pgd, 0, TASK_SIZE);
local_flush_tlb_all();
}