1
0
Fork 0

Merge branch 'akpm' (fixes from Andrew Morton)

Merge four more fixes from Andrew Morton.

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  lib/scatterlist.c: don't flush_kernel_dcache_page on slab page
  mm: memcg: fix test for child groups
  mm: memcg: lockdep annotation for memcg OOM lock
  mm: memcg: use proper memcg in limit bypass
hifive-unleashed-5.1
Linus Torvalds 2013-10-31 16:58:23 -07:00
commit 4f794ee8c4
2 changed files with 27 additions and 30 deletions

View File

@ -577,7 +577,8 @@ void sg_miter_stop(struct sg_mapping_iter *miter)
miter->__offset += miter->consumed;
miter->__remaining -= miter->consumed;
if (miter->__flags & SG_MITER_TO_SG)
if ((miter->__flags & SG_MITER_TO_SG) &&
!PageSlab(miter->page))
flush_kernel_dcache_page(miter->page);
if (miter->__flags & SG_MITER_ATOMIC) {

View File

@ -54,6 +54,7 @@
#include <linux/page_cgroup.h>
#include <linux/cpu.h>
#include <linux/oom.h>
#include <linux/lockdep.h>
#include "internal.h"
#include <net/sock.h>
#include <net/ip.h>
@ -2046,6 +2047,12 @@ static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
return total;
}
#ifdef CONFIG_LOCKDEP
static struct lockdep_map memcg_oom_lock_dep_map = {
.name = "memcg_oom_lock",
};
#endif
static DEFINE_SPINLOCK(memcg_oom_lock);
/*
@ -2083,7 +2090,8 @@ static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
}
iter->oom_lock = false;
}
}
} else
mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
spin_unlock(&memcg_oom_lock);
@ -2095,6 +2103,7 @@ static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
struct mem_cgroup *iter;
spin_lock(&memcg_oom_lock);
mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_);
for_each_mem_cgroup_tree(iter, memcg)
iter->oom_lock = false;
spin_unlock(&memcg_oom_lock);
@ -2765,10 +2774,10 @@ done:
*ptr = memcg;
return 0;
nomem:
*ptr = NULL;
if (gfp_mask & __GFP_NOFAIL)
return 0;
return -ENOMEM;
if (!(gfp_mask & __GFP_NOFAIL)) {
*ptr = NULL;
return -ENOMEM;
}
bypass:
*ptr = root_mem_cgroup;
return -EINTR;
@ -4950,31 +4959,18 @@ static void mem_cgroup_reparent_charges(struct mem_cgroup *memcg)
} while (usage > 0);
}
/*
* This mainly exists for tests during the setting of set of use_hierarchy.
* Since this is the very setting we are changing, the current hierarchy value
* is meaningless
*/
static inline bool __memcg_has_children(struct mem_cgroup *memcg)
{
struct cgroup_subsys_state *pos;
/* bounce at first found */
css_for_each_child(pos, &memcg->css)
return true;
return false;
}
/*
* Must be called with memcg_create_mutex held, unless the cgroup is guaranteed
* to be already dead (as in mem_cgroup_force_empty, for instance). This is
* from mem_cgroup_count_children(), in the sense that we don't really care how
* many children we have; we only need to know if we have any. It also counts
* any memcg without hierarchy as infertile.
*/
static inline bool memcg_has_children(struct mem_cgroup *memcg)
{
return memcg->use_hierarchy && __memcg_has_children(memcg);
lockdep_assert_held(&memcg_create_mutex);
/*
* The lock does not prevent addition or deletion to the list
* of children, but it prevents a new child from being
* initialized based on this parent in css_online(), so it's
* enough to decide whether hierarchically inherited
* attributes can still be changed or not.
*/
return memcg->use_hierarchy &&
!list_empty(&memcg->css.cgroup->children);
}
/*
@ -5054,7 +5050,7 @@ static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
*/
if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
(val == 1 || val == 0)) {
if (!__memcg_has_children(memcg))
if (list_empty(&memcg->css.cgroup->children))
memcg->use_hierarchy = val;
else
retval = -EBUSY;