memcg: flatten task_struct->memcg_oom

task_struct->memcg_oom is a sub-struct containing fields which are used
for async memcg oom handling.  Most task_struct fields aren't packaged
this way and it can lead to unnecessary alignment paddings.  This patch
flattens it.

* task.memcg_oom.memcg          -> task.memcg_in_oom
* task.memcg_oom.gfp_mask	-> task.memcg_oom_gfp_mask
* task.memcg_oom.order          -> task.memcg_oom_order
* task.memcg_oom.may_oom        -> task.memcg_may_oom

In addition, task.memcg_may_oom is relocated to where other bitfields are
which reduces the size of task_struct.

Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Michal Hocko <mhocko@suse.com>
Reviewed-by: Vladimir Davydov <vdavydov@parallels.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Tejun Heo 2015-11-05 18:46:09 -08:00 committed by Linus Torvalds
parent 55e1ceaf25
commit 626ebc4100
3 changed files with 19 additions and 20 deletions

View file

@ -406,19 +406,19 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
static inline void mem_cgroup_oom_enable(void) static inline void mem_cgroup_oom_enable(void)
{ {
WARN_ON(current->memcg_oom.may_oom); WARN_ON(current->memcg_may_oom);
current->memcg_oom.may_oom = 1; current->memcg_may_oom = 1;
} }
static inline void mem_cgroup_oom_disable(void) static inline void mem_cgroup_oom_disable(void)
{ {
WARN_ON(!current->memcg_oom.may_oom); WARN_ON(!current->memcg_may_oom);
current->memcg_oom.may_oom = 0; current->memcg_may_oom = 0;
} }
static inline bool task_in_memcg_oom(struct task_struct *p) static inline bool task_in_memcg_oom(struct task_struct *p)
{ {
return p->memcg_oom.memcg; return p->memcg_in_oom;
} }
bool mem_cgroup_oom_synchronize(bool wait); bool mem_cgroup_oom_synchronize(bool wait);

View file

@ -1473,7 +1473,9 @@ struct task_struct {
unsigned sched_reset_on_fork:1; unsigned sched_reset_on_fork:1;
unsigned sched_contributes_to_load:1; unsigned sched_contributes_to_load:1;
unsigned sched_migrated:1; unsigned sched_migrated:1;
#ifdef CONFIG_MEMCG
unsigned memcg_may_oom:1;
#endif
#ifdef CONFIG_MEMCG_KMEM #ifdef CONFIG_MEMCG_KMEM
unsigned memcg_kmem_skip_account:1; unsigned memcg_kmem_skip_account:1;
#endif #endif
@ -1804,12 +1806,9 @@ struct task_struct {
unsigned long trace_recursion; unsigned long trace_recursion;
#endif /* CONFIG_TRACING */ #endif /* CONFIG_TRACING */
#ifdef CONFIG_MEMCG #ifdef CONFIG_MEMCG
struct memcg_oom_info { struct mem_cgroup *memcg_in_oom;
struct mem_cgroup *memcg; gfp_t memcg_oom_gfp_mask;
gfp_t gfp_mask; int memcg_oom_order;
int order;
unsigned int may_oom:1;
} memcg_oom;
#endif #endif
#ifdef CONFIG_UPROBES #ifdef CONFIG_UPROBES
struct uprobe_task *utask; struct uprobe_task *utask;

View file

@ -1661,7 +1661,7 @@ static void memcg_oom_recover(struct mem_cgroup *memcg)
static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order) static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
{ {
if (!current->memcg_oom.may_oom) if (!current->memcg_may_oom)
return; return;
/* /*
* We are in the middle of the charge context here, so we * We are in the middle of the charge context here, so we
@ -1678,9 +1678,9 @@ static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
* and when we know whether the fault was overall successful. * and when we know whether the fault was overall successful.
*/ */
css_get(&memcg->css); css_get(&memcg->css);
current->memcg_oom.memcg = memcg; current->memcg_in_oom = memcg;
current->memcg_oom.gfp_mask = mask; current->memcg_oom_gfp_mask = mask;
current->memcg_oom.order = order; current->memcg_oom_order = order;
} }
/** /**
@ -1702,7 +1702,7 @@ static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
*/ */
bool mem_cgroup_oom_synchronize(bool handle) bool mem_cgroup_oom_synchronize(bool handle)
{ {
struct mem_cgroup *memcg = current->memcg_oom.memcg; struct mem_cgroup *memcg = current->memcg_in_oom;
struct oom_wait_info owait; struct oom_wait_info owait;
bool locked; bool locked;
@ -1730,8 +1730,8 @@ bool mem_cgroup_oom_synchronize(bool handle)
if (locked && !memcg->oom_kill_disable) { if (locked && !memcg->oom_kill_disable) {
mem_cgroup_unmark_under_oom(memcg); mem_cgroup_unmark_under_oom(memcg);
finish_wait(&memcg_oom_waitq, &owait.wait); finish_wait(&memcg_oom_waitq, &owait.wait);
mem_cgroup_out_of_memory(memcg, current->memcg_oom.gfp_mask, mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask,
current->memcg_oom.order); current->memcg_oom_order);
} else { } else {
schedule(); schedule();
mem_cgroup_unmark_under_oom(memcg); mem_cgroup_unmark_under_oom(memcg);
@ -1748,7 +1748,7 @@ bool mem_cgroup_oom_synchronize(bool handle)
memcg_oom_recover(memcg); memcg_oom_recover(memcg);
} }
cleanup: cleanup:
current->memcg_oom.memcg = NULL; current->memcg_in_oom = NULL;
css_put(&memcg->css); css_put(&memcg->css);
return true; return true;
} }