1
0
Fork 0

arm64: ptdump: Don't iterate kernel page tables using PTRS_PER_PXX

When 52-bit virtual addressing is enabled for userspace
(CONFIG_ARM64_USER_VA_BITS_52=y), the kernel continues to utilise 48-bit
virtual addressing in TTBR1. Consequently, PTRS_PER_PGD reflects the
larger page table size for userspace and the pgd pointer for kernel page
tables is offset before being written to TTBR1.

This means that we can't use PTRS_PER_PGD to iterate over kernel page
tables unless we apply the same offset, which is fiddly to get right and
leads to some non-idiomatic walking code. Instead, just follow the usual
pattern when walking page tables by using a while loop driven by
pXd_offset() and pXd_addr_end().

Reported-by: Qian Cai <cai@lca.pw>
Tested-by: Qian Cai <cai@lca.pw>
Acked-by: Steve Capper <steve.capper@arm.com>
Tested-by: Steve Capper <steve.capper@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
hifive-unleashed-5.1
Will Deacon 2019-02-04 14:37:38 +00:00
parent 8834f5600c
commit d23c808c6f
1 changed files with 29 additions and 30 deletions

View File

@ -286,74 +286,73 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
}
static void walk_pte(struct pg_state *st, pmd_t *pmdp, unsigned long start)
static void walk_pte(struct pg_state *st, pmd_t *pmdp, unsigned long start,
unsigned long end)
{
pte_t *ptep = pte_offset_kernel(pmdp, 0UL);
unsigned long addr;
unsigned i;
unsigned long addr = start;
pte_t *ptep = pte_offset_kernel(pmdp, start);
for (i = 0; i < PTRS_PER_PTE; i++, ptep++) {
addr = start + i * PAGE_SIZE;
do {
note_page(st, addr, 4, READ_ONCE(pte_val(*ptep)));
}
} while (ptep++, addr += PAGE_SIZE, addr != end);
}
static void walk_pmd(struct pg_state *st, pud_t *pudp, unsigned long start)
static void walk_pmd(struct pg_state *st, pud_t *pudp, unsigned long start,
unsigned long end)
{
pmd_t *pmdp = pmd_offset(pudp, 0UL);
unsigned long addr;
unsigned i;
unsigned long next, addr = start;
pmd_t *pmdp = pmd_offset(pudp, start);
for (i = 0; i < PTRS_PER_PMD; i++, pmdp++) {
do {
pmd_t pmd = READ_ONCE(*pmdp);
next = pmd_addr_end(addr, end);
addr = start + i * PMD_SIZE;
if (pmd_none(pmd) || pmd_sect(pmd)) {
note_page(st, addr, 3, pmd_val(pmd));
} else {
BUG_ON(pmd_bad(pmd));
walk_pte(st, pmdp, addr);
walk_pte(st, pmdp, addr, next);
}
}
} while (pmdp++, addr = next, addr != end);
}
static void walk_pud(struct pg_state *st, pgd_t *pgdp, unsigned long start)
static void walk_pud(struct pg_state *st, pgd_t *pgdp, unsigned long start,
unsigned long end)
{
pud_t *pudp = pud_offset(pgdp, 0UL);
unsigned long addr;
unsigned i;
unsigned long next, addr = start;
pud_t *pudp = pud_offset(pgdp, start);
for (i = 0; i < PTRS_PER_PUD; i++, pudp++) {
do {
pud_t pud = READ_ONCE(*pudp);
next = pud_addr_end(addr, end);
addr = start + i * PUD_SIZE;
if (pud_none(pud) || pud_sect(pud)) {
note_page(st, addr, 2, pud_val(pud));
} else {
BUG_ON(pud_bad(pud));
walk_pmd(st, pudp, addr);
walk_pmd(st, pudp, addr, next);
}
}
} while (pudp++, addr = next, addr != end);
}
static void walk_pgd(struct pg_state *st, struct mm_struct *mm,
unsigned long start)
{
pgd_t *pgdp = pgd_offset(mm, 0UL);
unsigned i;
unsigned long addr;
unsigned long end = (start < TASK_SIZE_64) ? TASK_SIZE_64 : 0;
unsigned long next, addr = start;
pgd_t *pgdp = pgd_offset(mm, start);
for (i = 0; i < PTRS_PER_PGD; i++, pgdp++) {
do {
pgd_t pgd = READ_ONCE(*pgdp);
next = pgd_addr_end(addr, end);
addr = start + i * PGDIR_SIZE;
if (pgd_none(pgd)) {
note_page(st, addr, 1, pgd_val(pgd));
} else {
BUG_ON(pgd_bad(pgd));
walk_pud(st, pgdp, addr);
walk_pud(st, pgdp, addr, next);
}
}
} while (pgdp++, addr = next, addr != end);
}
void ptdump_walk_pgd(struct seq_file *m, struct ptdump_info *info)