1
0
Fork 0

mm, page_owner: decouple freeing stack trace from debug_pagealloc

Commit 8974558f49 ("mm, page_owner, debug_pagealloc: save and dump
freeing stack trace") enhanced page_owner to also store freeing stack
trace, when debug_pagealloc is also enabled.  KASAN would also like to
do this [1] to improve error reports to debug e.g. UAF issues.

Kirill has suggested that the freeing stack trace saving should be also
possible to be enabled separately from KASAN or debug_pagealloc, i.e.
with an extra boot option.  Qian argued that we have enough options
already, and avoiding the extra overhead is not worth the complications
in the case of a debugging option.  Kirill noted that the extra stack
handle in struct page_owner requires 0.1% of memory.

This patch therefore enables free stack saving whenever page_owner is
enabled, regardless of whether debug_pagealloc or KASAN is also enabled.
KASAN kernels booted with page_owner=on will thus benefit from the
improved error reports.

[1] https://bugzilla.kernel.org/show_bug.cgi?id=203967

[vbabka@suse.cz: v3]
  Link: http://lkml.kernel.org/r/20191007091808.7096-3-vbabka@suse.cz
Link: http://lkml.kernel.org/r/20190930122916.14969-3-vbabka@suse.cz
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Reviewed-by: Qian Cai <cai@lca.pw>
Suggested-by: Dmitry Vyukov <dvyukov@google.com>
Suggested-by: Walter Wu <walter-zh.wu@mediatek.com>
Suggested-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
Suggested-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Suggested-by: Qian Cai <cai@lca.pw>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
alistair/sunxi64-5.4-dsi
Vlastimil Babka 2019-10-14 14:11:44 -07:00 committed by Linus Torvalds
parent 5556cfe8d9
commit 0fe9a448a0
2 changed files with 10 additions and 21 deletions

View File

@ -41,6 +41,9 @@ smaller binary while the latter is 1.1 - 2 times faster.
Both KASAN modes work with both SLUB and SLAB memory allocators. Both KASAN modes work with both SLUB and SLAB memory allocators.
For better bug detection and nicer reporting, enable CONFIG_STACKTRACE. For better bug detection and nicer reporting, enable CONFIG_STACKTRACE.
To augment reports with last allocation and freeing stack of the physical page,
it is recommended to enable also CONFIG_PAGE_OWNER and boot with page_owner=on.
To disable instrumentation for specific files or directories, add a line To disable instrumentation for specific files or directories, add a line
similar to the following to the respective kernel Makefile: similar to the following to the respective kernel Makefile:

View File

@ -24,12 +24,10 @@ struct page_owner {
short last_migrate_reason; short last_migrate_reason;
gfp_t gfp_mask; gfp_t gfp_mask;
depot_stack_handle_t handle; depot_stack_handle_t handle;
#ifdef CONFIG_DEBUG_PAGEALLOC
depot_stack_handle_t free_handle; depot_stack_handle_t free_handle;
#endif
}; };
static bool page_owner_disabled = true; static bool page_owner_enabled = false;
DEFINE_STATIC_KEY_FALSE(page_owner_inited); DEFINE_STATIC_KEY_FALSE(page_owner_inited);
static depot_stack_handle_t dummy_handle; static depot_stack_handle_t dummy_handle;
@ -44,7 +42,7 @@ static int __init early_page_owner_param(char *buf)
return -EINVAL; return -EINVAL;
if (strcmp(buf, "on") == 0) if (strcmp(buf, "on") == 0)
page_owner_disabled = false; page_owner_enabled = true;
return 0; return 0;
} }
@ -52,10 +50,7 @@ early_param("page_owner", early_page_owner_param);
static bool need_page_owner(void) static bool need_page_owner(void)
{ {
if (page_owner_disabled) return page_owner_enabled;
return false;
return true;
} }
static __always_inline depot_stack_handle_t create_dummy_stack(void) static __always_inline depot_stack_handle_t create_dummy_stack(void)
@ -84,7 +79,7 @@ static noinline void register_early_stack(void)
static void init_page_owner(void) static void init_page_owner(void)
{ {
if (page_owner_disabled) if (!page_owner_enabled)
return; return;
register_dummy_stack(); register_dummy_stack();
@ -148,25 +143,18 @@ void __reset_page_owner(struct page *page, unsigned int order)
{ {
int i; int i;
struct page_ext *page_ext; struct page_ext *page_ext;
#ifdef CONFIG_DEBUG_PAGEALLOC
depot_stack_handle_t handle = 0; depot_stack_handle_t handle = 0;
struct page_owner *page_owner; struct page_owner *page_owner;
if (debug_pagealloc_enabled()) handle = save_stack(GFP_NOWAIT | __GFP_NOWARN);
handle = save_stack(GFP_NOWAIT | __GFP_NOWARN);
#endif
page_ext = lookup_page_ext(page); page_ext = lookup_page_ext(page);
if (unlikely(!page_ext)) if (unlikely(!page_ext))
return; return;
for (i = 0; i < (1 << order); i++) { for (i = 0; i < (1 << order); i++) {
__clear_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags); __clear_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags);
#ifdef CONFIG_DEBUG_PAGEALLOC page_owner = get_page_owner(page_ext);
if (debug_pagealloc_enabled()) { page_owner->free_handle = handle;
page_owner = get_page_owner(page_ext);
page_owner->free_handle = handle;
}
#endif
page_ext = page_ext_next(page_ext); page_ext = page_ext_next(page_ext);
} }
} }
@ -450,7 +438,6 @@ void __dump_page_owner(struct page *page)
stack_trace_print(entries, nr_entries, 0); stack_trace_print(entries, nr_entries, 0);
} }
#ifdef CONFIG_DEBUG_PAGEALLOC
handle = READ_ONCE(page_owner->free_handle); handle = READ_ONCE(page_owner->free_handle);
if (!handle) { if (!handle) {
pr_alert("page_owner free stack trace missing\n"); pr_alert("page_owner free stack trace missing\n");
@ -459,7 +446,6 @@ void __dump_page_owner(struct page *page)
pr_alert("page last free stack trace:\n"); pr_alert("page last free stack trace:\n");
stack_trace_print(entries, nr_entries, 0); stack_trace_print(entries, nr_entries, 0);
} }
#endif
if (page_owner->last_migrate_reason != -1) if (page_owner->last_migrate_reason != -1)
pr_alert("page has been migrated, last migrate reason: %s\n", pr_alert("page has been migrated, last migrate reason: %s\n",