diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 312961ef3ccc..0b361a0b58f6 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -1487,7 +1487,6 @@ static void cpuset_attach(struct cgroup_subsys_state *css, { /* static buf protected by cpuset_mutex */ static nodemask_t cpuset_attach_nodemask_to; - struct mm_struct *mm; struct task_struct *task; struct task_struct *leader = cgroup_taskset_first(tset); struct cpuset *cs = css_cs(css); @@ -1515,26 +1514,31 @@ static void cpuset_attach(struct cgroup_subsys_state *css, } /* - * Change mm, possibly for multiple threads in a threadgroup. This is - * expensive and may sleep. + * Change mm, possibly for multiple threads in a threadgroup. This + * is expensive and may sleep and should be moved outside migration + * path proper. */ cpuset_attach_nodemask_to = cs->effective_mems; - mm = get_task_mm(leader); - if (mm) { - mpol_rebind_mm(mm, &cpuset_attach_nodemask_to); + if (thread_group_leader(leader)) { + struct mm_struct *mm = get_task_mm(leader); - /* - * old_mems_allowed is the same with mems_allowed here, except - * if this task is being moved automatically due to hotplug. - * In that case @mems_allowed has been updated and is empty, - * so @old_mems_allowed is the right nodesets that we migrate - * mm from. - */ - if (is_memory_migrate(cs)) { - cpuset_migrate_mm(mm, &oldcs->old_mems_allowed, - &cpuset_attach_nodemask_to); + if (mm) { + mpol_rebind_mm(mm, &cpuset_attach_nodemask_to); + + /* + * old_mems_allowed is the same with mems_allowed + * here, except if this task is being moved + * automatically due to hotplug. In that case + * @mems_allowed has been updated and is empty, so + * @old_mems_allowed is the right nodesets that we + * migrate mm from. + */ + if (is_memory_migrate(cs)) { + cpuset_migrate_mm(mm, &oldcs->old_mems_allowed, + &cpuset_attach_nodemask_to); + } + mmput(mm); } - mmput(mm); } cs->old_mems_allowed = cpuset_attach_nodemask_to;