From ff14c1d01576fb839a925a42596582f6c68a1a1a Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Tue, 1 Nov 2011 15:58:16 +0200 Subject: [PATCH 01/13] x86, mm: Use MAX_DMA_PFN for ZONE_DMA on 32-bit Use MAX_DMA_PFN which represents the 16 MB ISA DMA limit on 32-bit x86 just like we do on 64-bit. Acked-by: Tejun Heo Acked-by: Yinghai Lu Acked-by: David Rientjes Signed-off-by: Pekka Enberg Link: http://lkml.kernel.org/r/1320155902-10424-1-git-send-email-penberg@kernel.org Signed-off-by: Ingo Molnar --- arch/x86/mm/init_32.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 29f7c6d98179..434c97d620c2 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -679,8 +679,7 @@ static void __init zone_sizes_init(void) unsigned long max_zone_pfns[MAX_NR_ZONES]; memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); #ifdef CONFIG_ZONE_DMA - max_zone_pfns[ZONE_DMA] = - virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; + max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN; #endif max_zone_pfns[ZONE_NORMAL] = max_low_pfn; #ifdef CONFIG_HIGHMEM From 4c0b2e5f8940fec7cbeafcf641fecd5e746329c5 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Tue, 1 Nov 2011 15:58:17 +0200 Subject: [PATCH 02/13] x86, mm: Move zone init from paging_init() on 64-bit This patch introduces a zone_sizes_init() helper function on 64-bit to make it more similar to 32-bit init. Acked-by: Tejun Heo Acked-by: Yinghai Lu Acked-by: David Rientjes Signed-off-by: Pekka Enberg Link: http://lkml.kernel.org/r/1320155902-10424-2-git-send-email-penberg@kernel.org Signed-off-by: Ingo Molnar --- arch/x86/mm/init_64.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index bbaaa005bf0e..3ddda59f7087 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -612,7 +612,7 @@ void __init initmem_init(void) } #endif -void __init paging_init(void) +static void __init zone_sizes_init(void) { unsigned long max_zone_pfns[MAX_NR_ZONES]; @@ -623,6 +623,11 @@ void __init paging_init(void) max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN; max_zone_pfns[ZONE_NORMAL] = max_pfn; + free_area_init_nodes(max_zone_pfns); +} + +void __init paging_init(void) +{ sparse_memory_present_with_active_regions(MAX_NUMNODES); sparse_init(); @@ -634,7 +639,7 @@ void __init paging_init(void) */ node_clear_state(0, N_NORMAL_MEMORY); - free_area_init_nodes(max_zone_pfns); + zone_sizes_init(); } /* From e4794640ca408acda18eb31b126f58a58803b9c9 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Tue, 1 Nov 2011 15:58:18 +0200 Subject: [PATCH 03/13] x86, mm: Use max_pfn instead of highend_pfn The 'highend_pfn' variable is always set to 'max_pfn' so just use the latter directly. Acked-by: Tejun Heo Acked-by: Yinghai Lu Signed-off-by: Pekka Enberg Link: http://lkml.kernel.org/r/1320155902-10424-3-git-send-email-penberg@kernel.org Signed-off-by: Ingo Molnar --- arch/x86/mm/init_32.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 434c97d620c2..5ac0118b7610 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -683,7 +683,7 @@ static void __init zone_sizes_init(void) #endif max_zone_pfns[ZONE_NORMAL] = max_low_pfn; #ifdef CONFIG_HIGHMEM - max_zone_pfns[ZONE_HIGHMEM] = highend_pfn; + max_zone_pfns[ZONE_HIGHMEM] = max_pfn; #endif free_area_init_nodes(max_zone_pfns); From 80b3cac97bc14fdf839d967602e599cbf82ea336 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Tue, 1 Nov 2011 15:58:19 +0200 Subject: [PATCH 04/13] x86, mm: Wrap ZONE_DMA32 with CONFIG_ZONE_DMA32 In preparation for unifying 32-bit and 64-bit zone_sizes_init() make sure ZONE_DMA32 is wrapped in CONFIG_ZONE_DMA32. Acked-by: Tejun Heo Acked-by: Yinghai Lu Acked-by: David Rientjes Acked-by: Arun Sharma Signed-off-by: Pekka Enberg Link: http://lkml.kernel.org/r/1320155902-10424-4-git-send-email-penberg@kernel.org Signed-off-by: Ingo Molnar --- arch/x86/mm/init_64.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 3ddda59f7087..a9214e6e721a 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -620,7 +620,9 @@ static void __init zone_sizes_init(void) #ifdef CONFIG_ZONE_DMA max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN; #endif +#ifdef CONFIG_ZONE_DMA32 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN; +#endif max_zone_pfns[ZONE_NORMAL] = max_pfn; free_area_init_nodes(max_zone_pfns); From ece838b6257412647197c072fe59dfc6615df144 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Tue, 1 Nov 2011 15:58:20 +0200 Subject: [PATCH 05/13] x86, mm: Use max_low_pfn for ZONE_NORMAL on 64-bit 64-bit has no highmem so max_low_pfn is always the same as 'max_pfn'. Acked-by: Tejun Heo Acked-by: Yinghai Lu Acked-by: David Rientjes Signed-off-by: Pekka Enberg Link: http://lkml.kernel.org/r/1320155902-10424-5-git-send-email-penberg@kernel.org Signed-off-by: Ingo Molnar --- arch/x86/mm/init_64.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index a9214e6e721a..f6b1f087cced 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -623,7 +623,7 @@ static void __init zone_sizes_init(void) #ifdef CONFIG_ZONE_DMA32 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN; #endif - max_zone_pfns[ZONE_NORMAL] = max_pfn; + max_zone_pfns[ZONE_NORMAL] = max_low_pfn; free_area_init_nodes(max_zone_pfns); } From 248b52b97da7a712d2263a51d8d84c959f38ef75 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Tue, 1 Nov 2011 15:58:21 +0200 Subject: [PATCH 06/13] x86, mm: Prepare zone_sizes_init() for unification Make 32-bit and 64-bit zone_sizes_init() identical in preparation for unification. Acked-by: Tejun Heo Acked-by: Yinghai Lu Signed-off-by: Pekka Enberg Link: http://lkml.kernel.org/r/1320155902-10424-6-git-send-email-penberg@kernel.org Signed-off-by: Ingo Molnar --- arch/x86/mm/init_32.c | 4 ++++ arch/x86/mm/init_64.c | 3 +++ 2 files changed, 7 insertions(+) diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 5ac0118b7610..27455b958b8d 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -677,9 +677,13 @@ void __init initmem_init(void) static void __init zone_sizes_init(void) { unsigned long max_zone_pfns[MAX_NR_ZONES]; + memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); #ifdef CONFIG_ZONE_DMA max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN; +#endif +#ifdef CONFIG_ZONE_DMA32 + max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN; #endif max_zone_pfns[ZONE_NORMAL] = max_low_pfn; #ifdef CONFIG_HIGHMEM diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index f6b1f087cced..06c4360cf796 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -624,6 +624,9 @@ static void __init zone_sizes_init(void) max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN; #endif max_zone_pfns[ZONE_NORMAL] = max_low_pfn; +#ifdef CONFIG_HIGHMEM + max_zone_pfns[ZONE_HIGHMEM] = max_pfn; +#endif free_area_init_nodes(max_zone_pfns); } From 176239153049a023d060ce95b05f7ef31667e362 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Tue, 1 Nov 2011 15:58:22 +0200 Subject: [PATCH 07/13] x86, mm: Unify zone_sizes_init() Now that zone_sizes_init() is identical on 32-bit and 64-bit, move the code to arch/x86/mm/init.c and use it for both architectures. Acked-by: Tejun Heo Acked-by: Yinghai Lu Signed-off-by: Pekka Enberg Link: http://lkml.kernel.org/r/1320155902-10424-7-git-send-email-penberg@kernel.org Signed-off-by: Ingo Molnar --- arch/x86/include/asm/init.h | 2 ++ arch/x86/mm/init.c | 23 +++++++++++++++++++++++ arch/x86/mm/init_32.c | 19 ------------------- arch/x86/mm/init_64.c | 19 ------------------- 4 files changed, 25 insertions(+), 38 deletions(-) diff --git a/arch/x86/include/asm/init.h b/arch/x86/include/asm/init.h index 8dbe353e41e1..adcc0ae73d09 100644 --- a/arch/x86/include/asm/init.h +++ b/arch/x86/include/asm/init.h @@ -5,6 +5,8 @@ extern void __init early_ioremap_page_table_range_init(void); #endif +extern void __init zone_sizes_init(void); + extern unsigned long __init kernel_physical_mapping_init(unsigned long start, unsigned long end, diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 87488b93a65c..2426b60bb409 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -3,6 +3,7 @@ #include #include #include +#include /* for max_low_pfn */ #include #include @@ -15,6 +16,7 @@ #include #include #include +#include /* for MAX_DMA_PFN */ unsigned long __initdata pgt_buf_start; unsigned long __meminitdata pgt_buf_end; @@ -392,3 +394,24 @@ void free_initrd_mem(unsigned long start, unsigned long end) free_init_pages("initrd memory", start, PAGE_ALIGN(end)); } #endif + +void __init zone_sizes_init(void) +{ + unsigned long max_zone_pfns[MAX_NR_ZONES]; + + memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); + +#ifdef CONFIG_ZONE_DMA + max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN; +#endif +#ifdef CONFIG_ZONE_DMA32 + max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN; +#endif + max_zone_pfns[ZONE_NORMAL] = max_low_pfn; +#ifdef CONFIG_HIGHMEM + max_zone_pfns[ZONE_HIGHMEM] = max_pfn; +#endif + + free_area_init_nodes(max_zone_pfns); +} + diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 27455b958b8d..3bebaed5021c 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -674,25 +674,6 @@ void __init initmem_init(void) } #endif /* !CONFIG_NEED_MULTIPLE_NODES */ -static void __init zone_sizes_init(void) -{ - unsigned long max_zone_pfns[MAX_NR_ZONES]; - - memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); -#ifdef CONFIG_ZONE_DMA - max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN; -#endif -#ifdef CONFIG_ZONE_DMA32 - max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN; -#endif - max_zone_pfns[ZONE_NORMAL] = max_low_pfn; -#ifdef CONFIG_HIGHMEM - max_zone_pfns[ZONE_HIGHMEM] = max_pfn; -#endif - - free_area_init_nodes(max_zone_pfns); -} - void __init setup_bootmem_allocator(void) { printk(KERN_INFO " mapped low ram: 0 - %08lx\n", diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 06c4360cf796..6fcce7d34555 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -612,25 +612,6 @@ void __init initmem_init(void) } #endif -static void __init zone_sizes_init(void) -{ - unsigned long max_zone_pfns[MAX_NR_ZONES]; - - memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); -#ifdef CONFIG_ZONE_DMA - max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN; -#endif -#ifdef CONFIG_ZONE_DMA32 - max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN; -#endif - max_zone_pfns[ZONE_NORMAL] = max_low_pfn; -#ifdef CONFIG_HIGHMEM - max_zone_pfns[ZONE_HIGHMEM] = max_pfn; -#endif - - free_area_init_nodes(max_zone_pfns); -} - void __init paging_init(void) { sparse_memory_present_with_active_regions(MAX_NUMNODES); From 9af0c7a6fa860698d080481f24a342ba74b68982 Mon Sep 17 00:00:00 2001 From: Ludwig Nussel Date: Tue, 15 Nov 2011 14:46:46 -0800 Subject: [PATCH 08/13] x86: Fix mmap random address range On x86_32 casting the unsigned int result of get_random_int() to long may result in a negative value. On x86_32 the range of mmap_rnd() therefore was -255 to 255. The 32bit mode on x86_64 used 0 to 255 as intended. The bug was introduced by 675a081 ("x86: unify mmap_{32|64}.c") in January 2008. Signed-off-by: Ludwig Nussel Cc: Linus Torvalds Cc: harvey.harrison@gmail.com Cc: "H. Peter Anvin" Cc: Harvey Harrison Signed-off-by: Andrew Morton Link: http://lkml.kernel.org/r/201111152246.pAFMklOB028527@wpaz5.hot.corp.google.com Signed-off-by: Ingo Molnar --- arch/x86/mm/mmap.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c index 4b5ba85eb5c9..845df6835f9f 100644 --- a/arch/x86/mm/mmap.c +++ b/arch/x86/mm/mmap.c @@ -75,9 +75,9 @@ static unsigned long mmap_rnd(void) */ if (current->flags & PF_RANDOMIZE) { if (mmap_is_ia32()) - rnd = (long)get_random_int() % (1<<8); + rnd = get_random_int() % (1<<8); else - rnd = (long)(get_random_int() % (1<<28)); + rnd = get_random_int() % (1<<28); } return rnd << PAGE_SHIFT; } From d1bbdd669298b7ca08284ddb29153dfc039dd89d Mon Sep 17 00:00:00 2001 From: Mike Ditto Date: Tue, 15 Nov 2011 14:46:50 -0800 Subject: [PATCH 09/13] arch/x86/kernel/e820.c: Eliminate bubble sort from sanitize_e820_map() Replace the bubble sort in sanitize_e820_map() with a call to the generic kernel sort function to avoid pathological performance with large maps. On large (thousands of entries) E820 maps, the previous code took minutes to run; with this change it's now milliseconds. Signed-off-by: Mike Ditto Cc: sassmann@kpanic.de Cc: yuenn@google.com Cc: Stefan Assmann Cc: Nancy Yuen Cc: "H. Peter Anvin" Signed-off-by: Andrew Morton Signed-off-by: Ingo Molnar --- arch/x86/kernel/e820.c | 59 +++++++++++++++++------------------------- 1 file changed, 24 insertions(+), 35 deletions(-) diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index 303a0e48f076..f655f802260d 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include @@ -227,22 +228,38 @@ void __init e820_print_map(char *who) * ____________________33__ * ______________________4_ */ +struct change_member { + struct e820entry *pbios; /* pointer to original bios entry */ + unsigned long long addr; /* address for this change point */ +}; + +static int __init cpcompare(const void *a, const void *b) +{ + struct change_member * const *app = a, * const *bpp = b; + const struct change_member *ap = *app, *bp = *bpp; + + /* + * Inputs are pointers to two elements of change_point[]. If their + * addresses are unequal, their difference dominates. If the addresses + * are equal, then consider one that represents the end of its region + * to be greater than one that does not. + */ + if (ap->addr != bp->addr) + return ap->addr > bp->addr ? 1 : -1; + + return (ap->addr != ap->pbios->addr) - (bp->addr != bp->pbios->addr); +} int __init sanitize_e820_map(struct e820entry *biosmap, int max_nr_map, u32 *pnr_map) { - struct change_member { - struct e820entry *pbios; /* pointer to original bios entry */ - unsigned long long addr; /* address for this change point */ - }; static struct change_member change_point_list[2*E820_X_MAX] __initdata; static struct change_member *change_point[2*E820_X_MAX] __initdata; static struct e820entry *overlap_list[E820_X_MAX] __initdata; static struct e820entry new_bios[E820_X_MAX] __initdata; - struct change_member *change_tmp; unsigned long current_type, last_type; unsigned long long last_addr; - int chgidx, still_changing; + int chgidx; int overlap_entries; int new_bios_entry; int old_nr, new_nr, chg_nr; @@ -279,35 +296,7 @@ int __init sanitize_e820_map(struct e820entry *biosmap, int max_nr_map, chg_nr = chgidx; /* sort change-point list by memory addresses (low -> high) */ - still_changing = 1; - while (still_changing) { - still_changing = 0; - for (i = 1; i < chg_nr; i++) { - unsigned long long curaddr, lastaddr; - unsigned long long curpbaddr, lastpbaddr; - - curaddr = change_point[i]->addr; - lastaddr = change_point[i - 1]->addr; - curpbaddr = change_point[i]->pbios->addr; - lastpbaddr = change_point[i - 1]->pbios->addr; - - /* - * swap entries, when: - * - * curaddr > lastaddr or - * curaddr == lastaddr and curaddr == curpbaddr and - * lastaddr != lastpbaddr - */ - if (curaddr < lastaddr || - (curaddr == lastaddr && curaddr == curpbaddr && - lastaddr != lastpbaddr)) { - change_tmp = change_point[i]; - change_point[i] = change_point[i-1]; - change_point[i-1] = change_tmp; - still_changing = 1; - } - } - } + sort(change_point, chg_nr, sizeof *change_point, cpcompare, 0); /* create a new bios memory map, removing overlaps */ overlap_entries = 0; /* number of entries in the overlap table */ From 706d9a9c8b5758390036b9980a2b12d809599777 Mon Sep 17 00:00:00 2001 From: H Hartley Sweeten Date: Tue, 15 Nov 2011 14:48:56 -0800 Subject: [PATCH 10/13] arch/x86/kernel/e820.c: quiet sparse noise about plain integer as NULL pointer The last parameter to sort() is a pointer to the function used to swap items. This parameter should be NULL, not 0, when not used. This quiets the following sparse warning: warning: Using plain integer as NULL pointer Signed-off-by: H Hartley Sweeten Signed-off-by: Andrew Morton Cc: hartleys@visionengravers.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/e820.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index f655f802260d..d6bd85352c81 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c @@ -296,7 +296,7 @@ int __init sanitize_e820_map(struct e820entry *biosmap, int max_nr_map, chg_nr = chgidx; /* sort change-point list by memory addresses (low -> high) */ - sort(change_point, chg_nr, sizeof *change_point, cpcompare, 0); + sort(change_point, chg_nr, sizeof *change_point, cpcompare, NULL); /* create a new bios memory map, removing overlaps */ overlap_entries = 0; /* number of entries in the overlap table */ From 855c743a27bb58a9a521bdc485ef5acfdb69badc Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Tue, 6 Dec 2011 09:08:34 +0100 Subject: [PATCH 11/13] x86/mm: Initialize high mem before free_all_bootmem() Patch fixes a boot crash with pagealloc debugging enabled: Initializing HighMem for node 0 (000377fe:0003fff0) BUG: unable to handle kernel paging request at f6fefe80 IP: [] find_range_array+0x5e/0x69 [...] Call Trace: [] __get_free_all_memory_range+0x39/0xb4 [] add_highpages_with_active_regions+0x18/0x9b [] set_highmem_pages_init+0x70/0x90 [] mem_init+0x50/0x21b [] start_kernel+0x1bf/0x31c [] i386_start_kernel+0x65/0x67 The crash happens when memblock wants to allocate big area for temporary "struct range" array and reuses pages from top of low memory, which were already passed to the buddy allocator. Reported-by: Ingo Molnar Signed-off-by: Stanislaw Gruszka Cc: linux-mm@kvack.org Cc: Mel Gorman Link: http://lkml.kernel.org/r/20111206080833.GB3105@redhat.com Signed-off-by: Ingo Molnar --- arch/x86/mm/init_32.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 3bebaed5021c..a2fecb1611cc 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -744,6 +744,17 @@ void __init mem_init(void) #ifdef CONFIG_FLATMEM BUG_ON(!mem_map); #endif + /* + * With CONFIG_DEBUG_PAGEALLOC initialization of highmem pages has to + * be done before free_all_bootmem(). Memblock use free low memory for + * temporary data (see find_range_array()) and for this purpose can use + * pages that was already passed to the buddy allocator, hence marked as + * not accessible in the page tables when compiled with + * CONFIG_DEBUG_PAGEALLOC. Otherwise order of initialization is not + * important here. + */ + set_highmem_pages_init(); + /* this will put all low memory onto the freelists */ totalram_pages += free_all_bootmem(); @@ -755,8 +766,6 @@ void __init mem_init(void) if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp))) reservedpages++; - set_highmem_pages_init(); - codesize = (unsigned long) &_etext - (unsigned long) &_text; datasize = (unsigned long) &_edata - (unsigned long) &_etext; initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; From 54c29c635ae91f5d75ced7bffeaa77ba37ca02bb Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Tue, 29 Nov 2011 17:05:11 +0100 Subject: [PATCH 12/13] mm, x86: Remove debug_pagealloc_enabled When (no)bootmem finish operation, it pass pages to buddy allocator. Since debug_pagealloc_enabled is not set, we will do not protect pages, what is not what we want with CONFIG_DEBUG_PAGEALLOC=y. To fix remove debug_pagealloc_enabled. That variable was introduced by commit 12d6f21e "x86: do not PSE on CONFIG_DEBUG_PAGEALLOC=y" to get more CPA (change page attribude) code testing. But currently we have CONFIG_CPA_DEBUG, which test CPA. Signed-off-by: Stanislaw Gruszka Acked-by: Mel Gorman Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/1322582711-14571-1-git-send-email-sgruszka@redhat.com Signed-off-by: Ingo Molnar --- arch/x86/mm/pageattr.c | 6 ------ include/linux/mm.h | 10 ---------- init/main.c | 5 ----- mm/debug-pagealloc.c | 3 --- 4 files changed, 24 deletions(-) diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index f9e526742fa1..5031eefa051f 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -1333,12 +1333,6 @@ void kernel_map_pages(struct page *page, int numpages, int enable) numpages * PAGE_SIZE); } - /* - * If page allocator is not up yet then do not call c_p_a(): - */ - if (!debug_pagealloc_enabled) - return; - /* * The return value is ignored as the calls cannot fail. * Large pages for identity mappings are not used at boot time diff --git a/include/linux/mm.h b/include/linux/mm.h index 3dc3a8c2c485..0a22db144753 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1537,23 +1537,13 @@ static inline void vm_stat_account(struct mm_struct *mm, #endif /* CONFIG_PROC_FS */ #ifdef CONFIG_DEBUG_PAGEALLOC -extern int debug_pagealloc_enabled; - extern void kernel_map_pages(struct page *page, int numpages, int enable); - -static inline void enable_debug_pagealloc(void) -{ - debug_pagealloc_enabled = 1; -} #ifdef CONFIG_HIBERNATION extern bool kernel_page_present(struct page *page); #endif /* CONFIG_HIBERNATION */ #else static inline void kernel_map_pages(struct page *page, int numpages, int enable) {} -static inline void enable_debug_pagealloc(void) -{ -} #ifdef CONFIG_HIBERNATION static inline bool kernel_page_present(struct page *page) { return true; } #endif /* CONFIG_HIBERNATION */ diff --git a/init/main.c b/init/main.c index 217ed23e9487..99c4ba30ba7e 100644 --- a/init/main.c +++ b/init/main.c @@ -282,10 +282,6 @@ static int __init unknown_bootoption(char *param, char *val) return 0; } -#ifdef CONFIG_DEBUG_PAGEALLOC -int __read_mostly debug_pagealloc_enabled = 0; -#endif - static int __init init_setup(char *str) { unsigned int i; @@ -597,7 +593,6 @@ asmlinkage void __init start_kernel(void) } #endif page_cgroup_init(); - enable_debug_pagealloc(); debug_objects_mem_init(); kmemleak_init(); setup_per_cpu_pageset(); diff --git a/mm/debug-pagealloc.c b/mm/debug-pagealloc.c index 7cea557407f4..789ff70c8a4a 100644 --- a/mm/debug-pagealloc.c +++ b/mm/debug-pagealloc.c @@ -95,9 +95,6 @@ static void unpoison_pages(struct page *page, int n) void kernel_map_pages(struct page *page, int numpages, int enable) { - if (!debug_pagealloc_enabled) - return; - if (enable) unpoison_pages(page, numpages); else From 54eed6cb16ec315565aaaf8e34252ca253a68b7b Mon Sep 17 00:00:00 2001 From: Petr Holasek Date: Thu, 8 Dec 2011 13:16:41 +0100 Subject: [PATCH 13/13] x86/numa: Add constraints check for nid parameters This patch adds constraint checks to the numa_set_distance() function. When the check triggers (this should not happen normally) it emits a warning and avoids a store to a negative index in numa_distance[] array - i.e. avoids memory corruption. Negative ids can be passed when the pxm-to-nids mapping is not properly filled while parsing the SRAT. Signed-off-by: Petr Holasek Acked-by: David Rientjes Cc: Anton Arapov Link: http://lkml.kernel.org/r/20111208121640.GA2229@dhcp-27-244.brq.redhat.com Signed-off-by: Ingo Molnar --- arch/x86/mm/numa.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index fbeaaf416610..cdc00543d375 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c @@ -430,8 +430,9 @@ static int __init numa_alloc_distance(void) * calls are ignored until the distance table is reset with * numa_reset_distance(). * - * If @from or @to is higher than the highest known node at the time of - * table creation or @distance doesn't make sense, the call is ignored. + * If @from or @to is higher than the highest known node or lower than zero + * at the time of table creation or @distance doesn't make sense, the call + * is ignored. * This is to allow simplification of specific NUMA config implementations. */ void __init numa_set_distance(int from, int to, int distance) @@ -439,8 +440,9 @@ void __init numa_set_distance(int from, int to, int distance) if (!numa_distance && numa_alloc_distance() < 0) return; - if (from >= numa_distance_cnt || to >= numa_distance_cnt) { - printk_once(KERN_DEBUG "NUMA: Debug: distance out of bound, from=%d to=%d distance=%d\n", + if (from >= numa_distance_cnt || to >= numa_distance_cnt || + from < 0 || to < 0) { + pr_warn_once("NUMA: Warning: node ids are out of bound, from=%d to=%d distance=%d\n", from, to, distance); return; }