1
0
Fork 0

sched: Separate out build of MC sched groups from __build_sched_domains

... to further strip down __build_sched_domains().

Signed-off-by: Andreas Herrmann <andreas.herrmann3@amd.com>
Cc: Peter Zijlstra <peterz@infradead.org>
LKML-Reference: <20090818105838.GI29515@alberich.amd.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
hifive-unleashed-5.1
Andreas Herrmann 2009-08-18 12:58:38 +02:00 committed by Ingo Molnar
parent 0e8e85c941
commit a2af04cdbb
1 changed files with 10 additions and 13 deletions

View File

@ -8576,6 +8576,15 @@ static void build_sched_groups(struct s_data *d, enum sched_domain_level l,
&cpu_to_cpu_group,
d->send_covered, d->tmpmask);
break;
#endif
#ifdef CONFIG_SCHED_MC
case SD_LV_MC: /* set up multi-core groups */
cpumask_and(d->this_core_map, cpu_map, cpu_coregroup_mask(cpu));
if (cpu == cpumask_first(d->this_core_map))
init_sched_build_groups(d->this_core_map, cpu_map,
&cpu_to_core_group,
d->send_covered, d->tmpmask);
break;
#endif
default:
break;
@ -8618,21 +8627,9 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
for_each_cpu(i, cpu_map) {
build_sched_groups(&d, SD_LV_SIBLING, cpu_map, i);
build_sched_groups(&d, SD_LV_MC, cpu_map, i);
}
#ifdef CONFIG_SCHED_MC
/* Set up multi-core groups */
for_each_cpu(i, cpu_map) {
cpumask_and(d.this_core_map, cpu_coregroup_mask(i), cpu_map);
if (i != cpumask_first(d.this_core_map))
continue;
init_sched_build_groups(d.this_core_map, cpu_map,
&cpu_to_core_group,
d.send_covered, d.tmpmask);
}
#endif
/* Set up physical groups */
for (i = 0; i < nr_node_ids; i++) {
cpumask_and(d.nodemask, cpumask_of_node(i), cpu_map);