From 5556cfe8d994d5e7b4d50fd91597b8dc0b3a82fd Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Mon, 14 Oct 2019 14:11:40 -0700 Subject: [PATCH] mm, page_owner: fix off-by-one error in __set_page_owner_handle() Patch series "followups to debug_pagealloc improvements through page_owner", v3. These are followups to [1] which made it to Linus meanwhile. Patches 1 and 3 are based on Kirill's review, patch 2 on KASAN request [2]. It would be nice if all of this made it to 5.4 with [1] already there (or at least Patch 1). This patch (of 3): As noted by Kirill, commit 7e2f2a0cd17c ("mm, page_owner: record page owner for each subpage") has introduced an off-by-one error in __set_page_owner_handle() when looking up page_ext for subpages. As a result, the head page page_owner info is set twice, while for the last tail page, it's not set at all. Fix this and also make the code more efficient by advancing the page_ext pointer we already have, instead of calling lookup_page_ext() for each subpage. Since the full size of struct page_ext is not known at compile time, we can't use a simple page_ext++ statement, so introduce a page_ext_next() inline function for that. Link: http://lkml.kernel.org/r/20190930122916.14969-2-vbabka@suse.cz Fixes: 7e2f2a0cd17c ("mm, page_owner: record page owner for each subpage") Signed-off-by: Vlastimil Babka Reported-by: Kirill A. Shutemov Reported-by: Miles Chen Acked-by: Kirill A. Shutemov Cc: Andrey Ryabinin Cc: Dmitry Vyukov Cc: Walter Wu Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/page_ext.h | 8 ++++++++ mm/page_ext.c | 23 +++++++++-------------- mm/page_owner.c | 15 +++++++-------- 3 files changed, 24 insertions(+), 22 deletions(-) diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h index 682fd465df06..5e856512bafb 100644 --- a/include/linux/page_ext.h +++ b/include/linux/page_ext.h @@ -36,6 +36,7 @@ struct page_ext { unsigned long flags; }; +extern unsigned long page_ext_size; extern void pgdat_page_ext_init(struct pglist_data *pgdat); #ifdef CONFIG_SPARSEMEM @@ -52,6 +53,13 @@ static inline void page_ext_init(void) struct page_ext *lookup_page_ext(const struct page *page); +static inline struct page_ext *page_ext_next(struct page_ext *curr) +{ + void *next = curr; + next += page_ext_size; + return next; +} + #else /* !CONFIG_PAGE_EXTENSION */ struct page_ext; diff --git a/mm/page_ext.c b/mm/page_ext.c index 5f5769c7db3b..4ade843ff588 100644 --- a/mm/page_ext.c +++ b/mm/page_ext.c @@ -67,8 +67,9 @@ static struct page_ext_operations *page_ext_ops[] = { #endif }; +unsigned long page_ext_size = sizeof(struct page_ext); + static unsigned long total_usage; -static unsigned long extra_mem; static bool __init invoke_need_callbacks(void) { @@ -78,9 +79,8 @@ static bool __init invoke_need_callbacks(void) for (i = 0; i < entries; i++) { if (page_ext_ops[i]->need && page_ext_ops[i]->need()) { - page_ext_ops[i]->offset = sizeof(struct page_ext) + - extra_mem; - extra_mem += page_ext_ops[i]->size; + page_ext_ops[i]->offset = page_ext_size; + page_ext_size += page_ext_ops[i]->size; need = true; } } @@ -99,14 +99,9 @@ static void __init invoke_init_callbacks(void) } } -static unsigned long get_entry_size(void) -{ - return sizeof(struct page_ext) + extra_mem; -} - static inline struct page_ext *get_entry(void *base, unsigned long index) { - return base + get_entry_size() * index; + return base + page_ext_size * index; } #if !defined(CONFIG_SPARSEMEM) @@ -156,7 +151,7 @@ static int __init alloc_node_page_ext(int nid) !IS_ALIGNED(node_end_pfn(nid), MAX_ORDER_NR_PAGES)) nr_pages += MAX_ORDER_NR_PAGES; - table_size = get_entry_size() * nr_pages; + table_size = page_ext_size * nr_pages; base = memblock_alloc_try_nid( table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS), @@ -234,7 +229,7 @@ static int __meminit init_section_page_ext(unsigned long pfn, int nid) if (section->page_ext) return 0; - table_size = get_entry_size() * PAGES_PER_SECTION; + table_size = page_ext_size * PAGES_PER_SECTION; base = alloc_page_ext(table_size, nid); /* @@ -254,7 +249,7 @@ static int __meminit init_section_page_ext(unsigned long pfn, int nid) * we need to apply a mask. */ pfn &= PAGE_SECTION_MASK; - section->page_ext = (void *)base - get_entry_size() * pfn; + section->page_ext = (void *)base - page_ext_size * pfn; total_usage += table_size; return 0; } @@ -267,7 +262,7 @@ static void free_page_ext(void *addr) struct page *page = virt_to_page(addr); size_t table_size; - table_size = get_entry_size() * PAGES_PER_SECTION; + table_size = page_ext_size * PAGES_PER_SECTION; BUG_ON(PageReserved(page)); kmemleak_free(addr); diff --git a/mm/page_owner.c b/mm/page_owner.c index dee931184788..d3cf5d336ccf 100644 --- a/mm/page_owner.c +++ b/mm/page_owner.c @@ -156,10 +156,10 @@ void __reset_page_owner(struct page *page, unsigned int order) handle = save_stack(GFP_NOWAIT | __GFP_NOWARN); #endif + page_ext = lookup_page_ext(page); + if (unlikely(!page_ext)) + return; for (i = 0; i < (1 << order); i++) { - page_ext = lookup_page_ext(page + i); - if (unlikely(!page_ext)) - continue; __clear_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags); #ifdef CONFIG_DEBUG_PAGEALLOC if (debug_pagealloc_enabled()) { @@ -167,6 +167,7 @@ void __reset_page_owner(struct page *page, unsigned int order) page_owner->free_handle = handle; } #endif + page_ext = page_ext_next(page_ext); } } @@ -186,7 +187,7 @@ static inline void __set_page_owner_handle(struct page *page, __set_bit(PAGE_EXT_OWNER, &page_ext->flags); __set_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags); - page_ext = lookup_page_ext(page + i); + page_ext = page_ext_next(page_ext); } } @@ -224,12 +225,10 @@ void __split_page_owner(struct page *page, unsigned int order) if (unlikely(!page_ext)) return; - page_owner = get_page_owner(page_ext); - page_owner->order = 0; - for (i = 1; i < (1 << order); i++) { - page_ext = lookup_page_ext(page + i); + for (i = 0; i < (1 << order); i++) { page_owner = get_page_owner(page_ext); page_owner->order = 0; + page_ext = page_ext_next(page_ext); } }