1
0
Fork 0

sched/numa: Favor placing a task on the preferred node

A tasks preferred node is selected based on the number of faults
recorded for a node but the actual task_numa_migate() conducts a global
search regardless of the preferred nid. This patch checks if the
preferred nid has capacity and if so, searches for a CPU within that
node. This avoids a global search when the preferred node is not
overloaded.

Signed-off-by: Mel Gorman <mgorman@suse.de>
Reviewed-by: Rik van Riel <riel@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1381141781-10992-41-git-send-email-mgorman@suse.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
hifive-unleashed-5.1
Mel Gorman 2013-10-07 11:29:18 +01:00 committed by Ingo Molnar
parent fb13c7ee0e
commit 2c8a50aa87
1 changed files with 36 additions and 20 deletions

View File

@ -1052,6 +1052,20 @@ unlock:
rcu_read_unlock();
}
static void task_numa_find_cpu(struct task_numa_env *env, long imp)
{
int cpu;
for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
/* Skip this CPU if the source task cannot migrate */
if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(env->p)))
continue;
env->dst_cpu = cpu;
task_numa_compare(env, imp);
}
}
static int task_numa_migrate(struct task_struct *p)
{
struct task_numa_env env = {
@ -1068,7 +1082,8 @@ static int task_numa_migrate(struct task_struct *p)
};
struct sched_domain *sd;
unsigned long faults;
int nid, cpu, ret;
int nid, ret;
long imp;
/*
* Pick the lowest SD_NUMA domain, as that would have the smallest
@ -1085,28 +1100,29 @@ static int task_numa_migrate(struct task_struct *p)
faults = task_faults(p, env.src_nid);
update_numa_stats(&env.src_stats, env.src_nid);
env.dst_nid = p->numa_preferred_nid;
imp = task_faults(env.p, env.dst_nid) - faults;
update_numa_stats(&env.dst_stats, env.dst_nid);
/* Find an alternative node with relatively better statistics */
for_each_online_node(nid) {
long imp;
if (nid == env.src_nid)
continue;
/* Only consider nodes that recorded more faults */
imp = task_faults(p, nid) - faults;
if (imp < 0)
continue;
env.dst_nid = nid;
update_numa_stats(&env.dst_stats, env.dst_nid);
for_each_cpu(cpu, cpumask_of_node(nid)) {
/* Skip this CPU if the source task cannot migrate */
if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
/*
* If the preferred nid has capacity then use it. Otherwise find an
* alternative node with relatively better statistics.
*/
if (env.dst_stats.has_capacity) {
task_numa_find_cpu(&env, imp);
} else {
for_each_online_node(nid) {
if (nid == env.src_nid || nid == p->numa_preferred_nid)
continue;
env.dst_cpu = cpu;
task_numa_compare(&env, imp);
/* Only consider nodes that recorded more faults */
imp = task_faults(env.p, nid) - faults;
if (imp < 0)
continue;
env.dst_nid = nid;
update_numa_stats(&env.dst_stats, env.dst_nid);
task_numa_find_cpu(&env, imp);
}
}