1
0
Fork 0

mempolicy: sanitize the usage of get_task_policy()

Cleanup + preparation. Every user of get_task_policy() calls it
unconditionally, even if it is not going to use the result.

get_task_policy() is cheap but still this does not look clean, plus
the code looks simpler if get_task_policy() is called only when this
is really needed.

Note: I hope this is correct, but it is not clear why vma_policy_mof()
doesn't fall back to get_task_policy() if ->get_policy() returns NULL.

Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: David Rientjes <rientjes@google.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Cyrill Gorcunov <gorcunov@openvz.org>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Hugh Dickins <hughd@google.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
hifive-unleashed-5.1
Oleg Nesterov 2014-10-09 15:27:45 -07:00 committed by Linus Torvalds
parent f15ca78e33
commit 8d90274b3b
1 changed files with 14 additions and 11 deletions

View File

@ -1621,14 +1621,11 @@ COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
struct mempolicy *get_vma_policy(struct task_struct *task, struct mempolicy *get_vma_policy(struct task_struct *task,
struct vm_area_struct *vma, unsigned long addr) struct vm_area_struct *vma, unsigned long addr)
{ {
struct mempolicy *pol = get_task_policy(task); struct mempolicy *pol = NULL;
if (vma) { if (vma) {
if (vma->vm_ops && vma->vm_ops->get_policy) { if (vma->vm_ops && vma->vm_ops->get_policy) {
struct mempolicy *vpol = vma->vm_ops->get_policy(vma, pol = vma->vm_ops->get_policy(vma, addr);
addr);
if (vpol)
pol = vpol;
} else if (vma->vm_policy) { } else if (vma->vm_policy) {
pol = vma->vm_policy; pol = vma->vm_policy;
@ -1643,12 +1640,15 @@ struct mempolicy *get_vma_policy(struct task_struct *task,
} }
} }
if (!pol)
pol = get_task_policy(task);
return pol; return pol;
} }
bool vma_policy_mof(struct task_struct *task, struct vm_area_struct *vma) bool vma_policy_mof(struct task_struct *task, struct vm_area_struct *vma)
{ {
struct mempolicy *pol = get_task_policy(task); struct mempolicy *pol = NULL;
if (vma) { if (vma) {
if (vma->vm_ops && vma->vm_ops->get_policy) { if (vma->vm_ops && vma->vm_ops->get_policy) {
@ -1660,11 +1660,14 @@ bool vma_policy_mof(struct task_struct *task, struct vm_area_struct *vma)
mpol_cond_put(pol); mpol_cond_put(pol);
return ret; return ret;
} else if (vma->vm_policy) {
pol = vma->vm_policy;
} }
pol = vma->vm_policy;
} }
if (!pol)
pol = get_task_policy(task);
return pol->flags & MPOL_F_MOF; return pol->flags & MPOL_F_MOF;
} }
@ -2068,12 +2071,12 @@ retry_cpuset:
*/ */
struct page *alloc_pages_current(gfp_t gfp, unsigned order) struct page *alloc_pages_current(gfp_t gfp, unsigned order)
{ {
struct mempolicy *pol = get_task_policy(current); struct mempolicy *pol = &default_policy;
struct page *page; struct page *page;
unsigned int cpuset_mems_cookie; unsigned int cpuset_mems_cookie;
if (in_interrupt() || (gfp & __GFP_THISNODE)) if (!in_interrupt() && !(gfp & __GFP_THISNODE))
pol = &default_policy; pol = get_task_policy(current);
retry_cpuset: retry_cpuset:
cpuset_mems_cookie = read_mems_allowed_begin(); cpuset_mems_cookie = read_mems_allowed_begin();