1
0
Fork 0

mm: memcontrol: inline memcg->move_lock locking

The wrappers around taking and dropping the memcg->move_lock spinlock add
nothing of value.  Inline the spinlock calls into the callsites.

Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Vladimir Davydov <vdavydov@parallels.com>
Acked-by: Michal Hocko <mhocko@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
wifi-calibration
Johannes Weiner 2014-12-10 15:44:05 -08:00 committed by Linus Torvalds
parent 2983331575
commit 354a4783a2
1 changed files with 6 additions and 22 deletions

View File

@ -1522,23 +1522,6 @@ static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
return false;
}
/*
* Take this lock when
* - a code tries to modify page's memcg while it's USED.
* - a code tries to modify page state accounting in a memcg.
*/
static void move_lock_mem_cgroup(struct mem_cgroup *memcg,
unsigned long *flags)
{
spin_lock_irqsave(&memcg->move_lock, *flags);
}
static void move_unlock_mem_cgroup(struct mem_cgroup *memcg,
unsigned long *flags)
{
spin_unlock_irqrestore(&memcg->move_lock, *flags);
}
#define K(x) ((x) << (PAGE_SHIFT-10))
/**
* mem_cgroup_print_oom_info: Print OOM information relevant to memory controller.
@ -2156,9 +2139,9 @@ again:
if (atomic_read(&memcg->moving_account) <= 0)
return memcg;
move_lock_mem_cgroup(memcg, flags);
spin_lock_irqsave(&memcg->move_lock, *flags);
if (memcg != pc->mem_cgroup) {
move_unlock_mem_cgroup(memcg, flags);
spin_unlock_irqrestore(&memcg->move_lock, *flags);
goto again;
}
*locked = true;
@ -2176,7 +2159,7 @@ void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, bool locked,
unsigned long flags)
{
if (memcg && locked)
move_unlock_mem_cgroup(memcg, &flags);
spin_unlock_irqrestore(&memcg->move_lock, flags);
rcu_read_unlock();
}
@ -3219,7 +3202,7 @@ static int mem_cgroup_move_account(struct page *page,
if (pc->mem_cgroup != from)
goto out_unlock;
move_lock_mem_cgroup(from, &flags);
spin_lock_irqsave(&from->move_lock, flags);
if (!PageAnon(page) && page_mapped(page)) {
__this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
@ -3243,7 +3226,8 @@ static int mem_cgroup_move_account(struct page *page,
/* caller should have done css_get */
pc->mem_cgroup = to;
move_unlock_mem_cgroup(from, &flags);
spin_unlock_irqrestore(&from->move_lock, flags);
ret = 0;
local_irq_disable();