1
0
Fork 0

mm/debug-pagealloc: cleanup page guard code

Page guard is used by debug-pagealloc feature.  Currently, it is
open-coded, but, I think that more abstraction of it makes core page
allocator code more readable.

There is no functional difference.

Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Gioh Kim <gioh.kim@lge.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
hifive-unleashed-5.1
Joonsoo Kim 2014-12-12 16:55:01 -08:00 committed by Linus Torvalds
parent 4308ce17f6
commit 2847cf95c6
1 changed files with 19 additions and 19 deletions

View File

@ -439,18 +439,29 @@ static int __init debug_guardpage_minorder_setup(char *buf)
}
__setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup);
static inline void set_page_guard_flag(struct page *page)
static inline void set_page_guard(struct zone *zone, struct page *page,
unsigned int order, int migratetype)
{
__set_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
INIT_LIST_HEAD(&page->lru);
set_page_private(page, order);
/* Guard pages are not available for any usage */
__mod_zone_freepage_state(zone, -(1 << order), migratetype);
}
static inline void clear_page_guard_flag(struct page *page)
static inline void clear_page_guard(struct zone *zone, struct page *page,
unsigned int order, int migratetype)
{
__clear_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
set_page_private(page, 0);
if (!is_migrate_isolate(migratetype))
__mod_zone_freepage_state(zone, (1 << order), migratetype);
}
#else
static inline void set_page_guard_flag(struct page *page) { }
static inline void clear_page_guard_flag(struct page *page) { }
static inline void set_page_guard(struct zone *zone, struct page *page,
unsigned int order, int migratetype) {}
static inline void clear_page_guard(struct zone *zone, struct page *page,
unsigned int order, int migratetype) {}
#endif
static inline void set_page_order(struct page *page, unsigned int order)
@ -581,12 +592,7 @@ static inline void __free_one_page(struct page *page,
* merge with it and move up one order.
*/
if (page_is_guard(buddy)) {
clear_page_guard_flag(buddy);
set_page_private(buddy, 0);
if (!is_migrate_isolate(migratetype)) {
__mod_zone_freepage_state(zone, 1 << order,
migratetype);
}
clear_page_guard(zone, buddy, order, migratetype);
} else {
list_del(&buddy->lru);
zone->free_area[order].nr_free--;
@ -861,23 +867,17 @@ static inline void expand(struct zone *zone, struct page *page,
size >>= 1;
VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
#ifdef CONFIG_DEBUG_PAGEALLOC
if (high < debug_guardpage_minorder()) {
if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
high < debug_guardpage_minorder()) {
/*
* Mark as guard pages (or page), that will allow to
* merge back to allocator when buddy will be freed.
* Corresponding page table entries will not be touched,
* pages will stay not present in virtual address space
*/
INIT_LIST_HEAD(&page[size].lru);
set_page_guard_flag(&page[size]);
set_page_private(&page[size], high);
/* Guard pages are not available for any usage */
__mod_zone_freepage_state(zone, -(1 << high),
migratetype);
set_page_guard(zone, &page[size], high, migratetype);
continue;
}
#endif
list_add(&page[size].lru, &area->free_list[migratetype]);
area->nr_free++;
set_page_order(&page[size], high);