1
0
Fork 0

x86: do not PSE on CONFIG_DEBUG_PAGEALLOC=y

get more testing of the c_p_a() code done by not turning off
PSE on DEBUG_PAGEALLOC.

this simplifies the early pagetable setup code, and tests
the largepage-splitup code quite heavily.

In the end, all the largepages will be split up pretty quickly,
so there's no difference to how DEBUG_PAGEALLOC worked before.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
hifive-unleashed-5.1
Ingo Molnar 2008-01-30 13:33:58 +01:00
parent 9a3dc7804e
commit 12d6f21eac
5 changed files with 29 additions and 14 deletions

View File

@ -641,13 +641,6 @@ void __init early_cpu_init(void)
nexgen_init_cpu();
umc_init_cpu();
early_cpu_detect();
#ifdef CONFIG_DEBUG_PAGEALLOC
/* pse is not compatible with on-the-fly unmapping,
* disable it even if the cpus claim to support it.
*/
setup_clear_cpu_cap(X86_FEATURE_PSE);
#endif
}
/* Make sure %fs is initialized properly in idle threads */

View File

@ -61,13 +61,17 @@ static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
static int split_large_page(pte_t *kpte, unsigned long address)
{
pgprot_t ref_prot = pte_pgprot(pte_clrhuge(*kpte));
gfp_t gfp_flags = GFP_KERNEL;
unsigned long flags;
unsigned long addr;
pte_t *pbase, *tmp;
struct page *base;
int i, level;
base = alloc_pages(GFP_KERNEL, 0);
#ifdef CONFIG_DEBUG_PAGEALLOC
gfp_flags = GFP_ATOMIC;
#endif
base = alloc_pages(gfp_flags, 0);
if (!base)
return -ENOMEM;
@ -218,6 +222,12 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
numpages * PAGE_SIZE);
}
/*
* If page allocator is not up yet then do not call c_p_a():
*/
if (!debug_pagealloc_enabled)
return;
/*
* the return value is ignored - the calls cannot fail,
* large pages are disabled at boot time.

View File

@ -29,11 +29,6 @@ int change_page_attr(struct page *page, int numpages, pgprot_t prot);
int change_page_attr_addr(unsigned long addr, int numpages, pgprot_t prot);
void clflush_cache_range(void *addr, int size);
#ifdef CONFIG_DEBUG_PAGEALLOC
/* internal debugging function */
void kernel_map_pages(struct page *page, int numpages, int enable);
#endif
#ifdef CONFIG_DEBUG_RODATA
void mark_rodata_ro(void);
#endif

View File

@ -1118,9 +1118,21 @@ static inline void vm_stat_account(struct mm_struct *mm,
}
#endif /* CONFIG_PROC_FS */
#ifndef CONFIG_DEBUG_PAGEALLOC
#ifdef CONFIG_DEBUG_PAGEALLOC
extern int debug_pagealloc_enabled;
extern void kernel_map_pages(struct page *page, int numpages, int enable);
static inline void enable_debug_pagealloc(void)
{
debug_pagealloc_enabled = 1;
}
#else
static inline void
kernel_map_pages(struct page *page, int numpages, int enable) {}
static inline void enable_debug_pagealloc(void)
{
}
#endif
extern struct vm_area_struct *get_gate_vma(struct task_struct *tsk);

View File

@ -318,6 +318,10 @@ static int __init unknown_bootoption(char *param, char *val)
return 0;
}
#ifdef CONFIG_DEBUG_PAGEALLOC
int __read_mostly debug_pagealloc_enabled = 0;
#endif
static int __init init_setup(char *str)
{
unsigned int i;
@ -552,6 +556,7 @@ asmlinkage void __init start_kernel(void)
preempt_disable();
build_all_zonelists();
page_alloc_init();
enable_debug_pagealloc();
printk(KERN_NOTICE "Kernel command line: %s\n", boot_command_line);
parse_early_param();
parse_args("Booting kernel", static_command_line, __start___param,