From 52219aeaf2dc6f7607704af2c40e3866fb04aed2 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Thu, 4 Jun 2020 16:48:38 -0700 Subject: [PATCH] mm/memory_hotplug: handle memblocks only with CONFIG_ARCH_KEEP_MEMBLOCK The comment in add_memory_resource() is stale: hotadd_new_pgdat() will no longer call get_pfn_range_for_nid(), as a hotadded pgdat will simply span no pages at all, until memory is moved to the zone/node via move_pfn_range_to_zone() - e.g., when onlining memory blocks. The only archs that care about memblocks for hotplugged memory (either for iterating over all system RAM or testing for memory validity) are arm64, s390x, and powerpc - due to CONFIG_ARCH_KEEP_MEMBLOCK. Without CONFIG_ARCH_KEEP_MEMBLOCK, we can simply stop messing with memblocks. Signed-off-by: David Hildenbrand Signed-off-by: Andrew Morton Acked-by: Mike Rapoport Acked-by: Michal Hocko Cc: Michal Hocko Cc: Baoquan He Cc: Oscar Salvador Cc: Pankaj Gupta Cc: Mike Rapoport Cc: Anshuman Khandual Link: http://lkml.kernel.org/r/20200422155353.25381-3-david@redhat.com Signed-off-by: Linus Torvalds --- mm/Kconfig | 3 +++ mm/memory_hotplug.c | 20 ++++++++++---------- 2 files changed, 13 insertions(+), 10 deletions(-) diff --git a/mm/Kconfig b/mm/Kconfig index e3490ecac839..5b28240d2af8 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -133,6 +133,9 @@ config HAVE_FAST_GUP depends on MMU bool +# Don't discard allocated memory used to track "memory" and "reserved" memblocks +# after early boot, so it can still be used to test for validity of memory. +# Also, memblocks are updated with memory hot(un)plug. config ARCH_KEEP_MEMBLOCK bool diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index ee3dcb5ed945..21bc3363a829 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1021,13 +1021,8 @@ int __ref add_memory_resource(int nid, struct resource *res) mem_hotplug_begin(); - /* - * Add new range to memblock so that when hotadd_new_pgdat() is called - * to allocate new pgdat, get_pfn_range_for_nid() will be able to find - * this new range and calculate total pages correctly. The range will - * be removed at hot-remove time. - */ - memblock_add_node(start, size, nid); + if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK)) + memblock_add_node(start, size, nid); ret = __try_online_node(nid, false); if (ret < 0) @@ -1076,7 +1071,8 @@ error: /* rollback pgdat allocation and others */ if (new_node) rollback_node_hotadd(nid); - memblock_remove(start, size); + if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK)) + memblock_remove(start, size); mem_hotplug_done(); return ret; } @@ -1673,8 +1669,12 @@ static int __ref try_remove_memory(int nid, u64 start, u64 size) mem_hotplug_begin(); arch_remove_memory(nid, start, size, NULL); - memblock_free(start, size); - memblock_remove(start, size); + + if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK)) { + memblock_free(start, size); + memblock_remove(start, size); + } + __release_memory_resource(start, size); try_offline_node(nid);