sched: Avoid SMT siblings in select_idle_sibling() if possible

Avoid select_idle_sibling() from picking a sibling thread if there's
an idle core that shares cache.

This fixes SMT balancing in the increasingly common case where there's
a shared cache core available to balance to.

Tested-by: Mike Galbraith <efault@gmx.de>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Suresh Siddha <suresh.b.siddha@intel.com>
Link: http://lkml.kernel.org/r/1321350377.1421.55.camel@twins
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Peter Zijlstra 2011-11-10 13:01:10 +01:00 committed by Ingo Molnar
parent f1c6f1a7ee
commit 4dcfe1025b

View file

@ -2326,7 +2326,8 @@ static int select_idle_sibling(struct task_struct *p, int target)
int cpu = smp_processor_id(); int cpu = smp_processor_id();
int prev_cpu = task_cpu(p); int prev_cpu = task_cpu(p);
struct sched_domain *sd; struct sched_domain *sd;
int i; struct sched_group *sg;
int i, smt = 0;
/* /*
* If the task is going to be woken-up on this cpu and if it is * If the task is going to be woken-up on this cpu and if it is
@ -2346,25 +2347,38 @@ static int select_idle_sibling(struct task_struct *p, int target)
* Otherwise, iterate the domains and find an elegible idle cpu. * Otherwise, iterate the domains and find an elegible idle cpu.
*/ */
rcu_read_lock(); rcu_read_lock();
again:
for_each_domain(target, sd) { for_each_domain(target, sd) {
if (!(sd->flags & SD_SHARE_PKG_RESOURCES)) if (!smt && (sd->flags & SD_SHARE_CPUPOWER))
break; continue;
for_each_cpu_and(i, sched_domain_span(sd), tsk_cpus_allowed(p)) { if (!(sd->flags & SD_SHARE_PKG_RESOURCES)) {
if (idle_cpu(i)) { if (!smt) {
target = i; smt = 1;
break; goto again;
} }
break;
} }
/* sg = sd->groups;
* Lets stop looking for an idle sibling when we reached do {
* the domain that spans the current cpu and prev_cpu. if (!cpumask_intersects(sched_group_cpus(sg),
*/ tsk_cpus_allowed(p)))
if (cpumask_test_cpu(cpu, sched_domain_span(sd)) && goto next;
cpumask_test_cpu(prev_cpu, sched_domain_span(sd)))
break; for_each_cpu(i, sched_group_cpus(sg)) {
if (!idle_cpu(i))
goto next;
}
target = cpumask_first_and(sched_group_cpus(sg),
tsk_cpus_allowed(p));
goto done;
next:
sg = sg->next;
} while (sg != sd->groups);
} }
done:
rcu_read_unlock(); rcu_read_unlock();
return target; return target;