diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h index bc6f45421c98..fa1bfce10370 100644 --- a/arch/s390/include/asm/topology.h +++ b/arch/s390/include/asm/topology.h @@ -23,6 +23,7 @@ struct cpu_topology_s390 { }; extern struct cpu_topology_s390 cpu_topology[NR_CPUS]; +extern cpumask_t cpus_with_topology; #define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id) #define topology_thread_id(cpu) (cpu_topology[cpu].thread_id) @@ -36,6 +37,7 @@ extern struct cpu_topology_s390 cpu_topology[NR_CPUS]; #define mc_capable() 1 +void topology_init_early(void); int topology_cpu_init(struct cpu *); int topology_set_cpu_management(int fc); void topology_schedule_update(void); @@ -45,6 +47,7 @@ const struct cpumask *cpu_coregroup_mask(int cpu); #else /* CONFIG_SCHED_TOPOLOGY */ +static inline void topology_init_early(void) { } static inline void topology_schedule_update(void) { } static inline int topology_cpu_init(struct cpu *cpu) { return 0; } static inline void topology_expect_change(void) { } diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index aba3c5ce1559..adfac9f0a89f 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -924,6 +924,7 @@ void __init setup_arch(char **cmdline_p) cpu_init(); numa_setup(); smp_detect_cpus(); + topology_init_early(); /* * Create kernel page tables and switch to virtual addressing. diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index 7169d112c91a..93dcbae1e98d 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c @@ -7,6 +7,7 @@ #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include +#include #include #include #include @@ -51,6 +52,8 @@ static struct mask_info drawer_info; struct cpu_topology_s390 cpu_topology[NR_CPUS]; EXPORT_SYMBOL_GPL(cpu_topology); +cpumask_t cpus_with_topology; + static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) { cpumask_t mask; @@ -106,6 +109,7 @@ static void add_cpus_to_mask(struct topology_core *tl_core, cpumask_set_cpu(lcpu + i, &drawer->mask); cpumask_set_cpu(lcpu + i, &book->mask); cpumask_set_cpu(lcpu + i, &socket->mask); + cpumask_set_cpu(lcpu + i, &cpus_with_topology); smp_cpu_set_polarization(lcpu + i, tl_core->pp); } } @@ -231,6 +235,8 @@ static void update_cpu_masks(void) topo->socket_id = cpu; topo->book_id = cpu; topo->drawer_id = cpu; + if (cpu_present(cpu)) + cpumask_set_cpu(cpu, &cpus_with_topology); } } numa_update_cpu_topology(); @@ -241,12 +247,12 @@ void store_topology(struct sysinfo_15_1_x *info) stsi(info, 15, 1, min(topology_max_mnest, 4)); } -int arch_update_cpu_topology(void) +static int __arch_update_cpu_topology(void) { struct sysinfo_15_1_x *info = tl_info; - struct device *dev; - int cpu, rc = 0; + int rc = 0; + cpumask_clear(&cpus_with_topology); if (MACHINE_HAS_TOPOLOGY) { rc = 1; store_topology(info); @@ -255,6 +261,15 @@ int arch_update_cpu_topology(void) update_cpu_masks(); if (!MACHINE_HAS_TOPOLOGY) topology_update_polarization_simple(); + return rc; +} + +int arch_update_cpu_topology(void) +{ + struct device *dev; + int cpu, rc; + + rc = __arch_update_cpu_topology(); for_each_online_cpu(cpu) { dev = get_cpu_device(cpu); kobject_uevent(&dev->kobj, KOBJ_CHANGE); @@ -438,20 +453,20 @@ static void __init alloc_masks(struct sysinfo_15_1_x *info, nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i]; nr_masks = max(nr_masks, 1); for (i = 0; i < nr_masks; i++) { - mask->next = kzalloc(sizeof(*mask->next), GFP_KERNEL); + mask->next = memblock_virt_alloc(sizeof(*mask->next), 8); mask = mask->next; } } -static int __init s390_topology_init(void) +void __init topology_init_early(void) { struct sysinfo_15_1_x *info; int i; set_sched_topology(s390_topology); if (!MACHINE_HAS_TOPOLOGY) - return 0; - tl_info = (struct sysinfo_15_1_x *)__get_free_page(GFP_KERNEL); + goto out; + tl_info = memblock_virt_alloc(sizeof(*tl_info), PAGE_SIZE); info = tl_info; store_topology(info); pr_info("The CPU configuration topology of the machine is:"); @@ -461,9 +476,9 @@ static int __init s390_topology_init(void) alloc_masks(info, &socket_info, 1); alloc_masks(info, &book_info, 2); alloc_masks(info, &drawer_info, 3); - return 0; +out: + __arch_update_cpu_topology(); } -early_initcall(s390_topology_init); static int __init topology_init(void) { diff --git a/arch/s390/numa/mode_emu.c b/arch/s390/numa/mode_emu.c index 2ed27e8eb4d4..02b840d8f9af 100644 --- a/arch/s390/numa/mode_emu.c +++ b/arch/s390/numa/mode_emu.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -307,13 +308,11 @@ fail: /* * Allocate and initialize core to node mapping */ -static void create_core_to_node_map(void) +static void __ref create_core_to_node_map(void) { int i; - emu_cores = kzalloc(sizeof(*emu_cores), GFP_KERNEL); - if (emu_cores == NULL) - panic("Could not allocate cores to node memory"); + emu_cores = memblock_virt_alloc(sizeof(*emu_cores), 8); for (i = 0; i < ARRAY_SIZE(emu_cores->to_node_id); i++) emu_cores->to_node_id[i] = NODE_ID_FREE; } @@ -354,7 +353,7 @@ static struct toptree *toptree_from_topology(void) phys = toptree_new(TOPTREE_ID_PHYS, 1); - for_each_online_cpu(cpu) { + for_each_cpu(cpu, &cpus_with_topology) { top = &cpu_topology[cpu]; node = toptree_get_child(phys, 0); drawer = toptree_get_child(node, top->drawer_id); diff --git a/arch/s390/numa/toptree.c b/arch/s390/numa/toptree.c index 902d350d859a..26f622b1cd11 100644 --- a/arch/s390/numa/toptree.c +++ b/arch/s390/numa/toptree.c @@ -7,6 +7,7 @@ */ #include +#include #include #include #include @@ -25,10 +26,14 @@ * RETURNS: * Pointer to the new tree node or NULL on error */ -struct toptree *toptree_alloc(int level, int id) +struct toptree __ref *toptree_alloc(int level, int id) { - struct toptree *res = kzalloc(sizeof(struct toptree), GFP_KERNEL); + struct toptree *res; + if (slab_is_available()) + res = kzalloc(sizeof(*res), GFP_KERNEL); + else + res = memblock_virt_alloc(sizeof(*res), 8); if (!res) return res; @@ -65,7 +70,7 @@ static void toptree_remove(struct toptree *cand) * cleanly using toptree_remove. Possible children are freed * recursively. In the end @cand itself is freed. */ -void toptree_free(struct toptree *cand) +void __ref toptree_free(struct toptree *cand) { struct toptree *child, *tmp; @@ -73,7 +78,10 @@ void toptree_free(struct toptree *cand) toptree_remove(cand); toptree_for_each_child_safe(child, tmp, cand) toptree_free(child); - kfree(cand); + if (slab_is_available()) + kfree(cand); + else + memblock_free_early((unsigned long)cand, sizeof(*cand)); } /**