1
0
Fork 0

powerpc/mm/book3s/radix: Add mapping statistics

Add statistics that show how memory is mapped within the kernel linear mapping.
This is similar to commit 37cd944c8d ("s390/pgtable: add mapping statistics")

We don't do this with Hash translation mode. Hash uses one size (mmu_linear_psize)
to map the kernel linear mapping and we print the linear psize during boot as
below.

"Page orders: linear mapping = 24, virtual = 16, io = 16, vmemmap = 24"

A sample output looks like:

DirectMap4k:           0 kB
DirectMap64k:       18432 kB
DirectMap2M:     1030144 kB
DirectMap1G:    11534336 kB

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
hifive-unleashed-5.1
Aneesh Kumar K.V 2018-08-13 11:14:57 +05:30 committed by Michael Ellerman
parent 241b5f7ffc
commit a2dc009afa
4 changed files with 46 additions and 5 deletions

View File

@ -227,4 +227,11 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
#define check_pgt_cache() do { } while (0)
extern atomic_long_t direct_pages_count[MMU_PAGE_COUNT];
static inline void update_page_count(int psize, long count)
{
if (IS_ENABLED(CONFIG_PROC_FS))
atomic_long_add(count, &direct_pages_count[psize]);
}
#endif /* _ASM_POWERPC_BOOK3S_64_PGALLOC_H */

View File

@ -32,6 +32,9 @@
#define RADIX_PUD_BAD_BITS 0x60000000000000e0UL
#define RADIX_PGD_BAD_BITS 0x60000000000000e0UL
#define RADIX_PMD_SHIFT (PAGE_SHIFT + RADIX_PTE_INDEX_SIZE)
#define RADIX_PUD_SHIFT (RADIX_PMD_SHIFT + RADIX_PMD_INDEX_SIZE)
#define RADIX_PGD_SHIFT (RADIX_PUD_SHIFT + RADIX_PUD_INDEX_SIZE)
/*
* Size of EA range mapped by our pagetables.
*/

View File

@ -455,3 +455,25 @@ void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index)
return pgtable_free(table, index);
}
#endif
#ifdef CONFIG_PROC_FS
atomic_long_t direct_pages_count[MMU_PAGE_COUNT];
void arch_report_meminfo(struct seq_file *m)
{
/*
* Hash maps the memory with one size mmu_linear_psize.
* So don't bother to print these on hash
*/
if (!radix_enabled())
return;
seq_printf(m, "DirectMap4k: %8lu kB\n",
atomic_long_read(&direct_pages_count[MMU_PAGE_4K]) << 2);
seq_printf(m, "DirectMap64k: %8lu kB\n",
atomic_long_read(&direct_pages_count[MMU_PAGE_64K]) << 6);
seq_printf(m, "DirectMap2M: %8lu kB\n",
atomic_long_read(&direct_pages_count[MMU_PAGE_2M]) << 11);
seq_printf(m, "DirectMap1G: %8lu kB\n",
atomic_long_read(&direct_pages_count[MMU_PAGE_1G]) << 20);
}
#endif /* CONFIG_PROC_FS */

View File

@ -267,6 +267,7 @@ static int __meminit create_physical_mapping(unsigned long start,
#else
int split_text_mapping = 0;
#endif
int psize;
start = _ALIGN_UP(start, PAGE_SIZE);
for (addr = start; addr < end; addr += mapping_size) {
@ -280,13 +281,17 @@ static int __meminit create_physical_mapping(unsigned long start,
retry:
if (IS_ALIGNED(addr, PUD_SIZE) && gap >= PUD_SIZE &&
mmu_psize_defs[MMU_PAGE_1G].shift &&
PUD_SIZE <= max_mapping_size)
PUD_SIZE <= max_mapping_size) {
mapping_size = PUD_SIZE;
else if (IS_ALIGNED(addr, PMD_SIZE) && gap >= PMD_SIZE &&
mmu_psize_defs[MMU_PAGE_2M].shift)
psize = MMU_PAGE_1G;
} else if (IS_ALIGNED(addr, PMD_SIZE) && gap >= PMD_SIZE &&
mmu_psize_defs[MMU_PAGE_2M].shift) {
mapping_size = PMD_SIZE;
else
psize = MMU_PAGE_2M;
} else {
mapping_size = PAGE_SIZE;
psize = mmu_virtual_psize;
}
if (split_text_mapping && (mapping_size == PUD_SIZE) &&
(addr <= __pa_symbol(__init_begin)) &&
@ -297,8 +302,10 @@ retry:
if (split_text_mapping && (mapping_size == PMD_SIZE) &&
(addr <= __pa_symbol(__init_begin)) &&
(addr + mapping_size) >= __pa_symbol(_stext))
(addr + mapping_size) >= __pa_symbol(_stext)) {
mapping_size = PAGE_SIZE;
psize = mmu_virtual_psize;
}
if (mapping_size != previous_size) {
print_mapping(start, addr, previous_size);
@ -316,6 +323,8 @@ retry:
rc = __map_kernel_page(vaddr, addr, prot, mapping_size, nid, start, end);
if (rc)
return rc;
update_page_count(psize, 1);
}
print_mapping(start, addr, mapping_size);