diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 254698856b8f..9fcece9be85d 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -253,6 +253,7 @@ struct mem_cgroup { /* Index in the kmem_cache->memcg_params.memcg_caches array */ int kmemcg_id; enum memcg_kmem_state kmem_state; + struct list_head kmem_caches; #endif int last_scanned_node; diff --git a/include/linux/slab.h b/include/linux/slab.h index 1f611ba00f1d..a0cc7a77cda2 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -565,6 +565,8 @@ struct memcg_cache_array { * @memcg: Pointer to the memcg this cache belongs to. * * @children_node: List node for @root_cache->children list. + * + * @kmem_caches_node: List node for @memcg->kmem_caches list. */ struct memcg_cache_params { struct kmem_cache *root_cache; @@ -576,6 +578,7 @@ struct memcg_cache_params { struct { struct mem_cgroup *memcg; struct list_head children_node; + struct list_head kmem_caches_node; }; }; }; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index b822e158b319..834d641dfa8c 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2837,6 +2837,7 @@ static int memcg_online_kmem(struct mem_cgroup *memcg) */ memcg->kmemcg_id = memcg_id; memcg->kmem_state = KMEM_ONLINE; + INIT_LIST_HEAD(&memcg->kmem_caches); return 0; } @@ -4002,9 +4003,9 @@ static struct cftype mem_cgroup_legacy_files[] = { #ifdef CONFIG_SLABINFO { .name = "kmem.slabinfo", - .seq_start = slab_start, - .seq_next = slab_next, - .seq_stop = slab_stop, + .seq_start = memcg_slab_start, + .seq_next = memcg_slab_next, + .seq_stop = memcg_slab_stop, .seq_show = memcg_slab_show, }, #endif diff --git a/mm/slab.h b/mm/slab.h index 3ed3336883ed..a08f01016a3f 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -494,6 +494,9 @@ static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) void *slab_start(struct seq_file *m, loff_t *pos); void *slab_next(struct seq_file *m, void *p, loff_t *pos); void slab_stop(struct seq_file *m, void *p); +void *memcg_slab_start(struct seq_file *m, loff_t *pos); +void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos); +void memcg_slab_stop(struct seq_file *m, void *p); int memcg_slab_show(struct seq_file *m, void *p); void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr); diff --git a/mm/slab_common.c b/mm/slab_common.c index c3885032dbce..c3bbeddaeaaf 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -154,6 +154,7 @@ static int init_memcg_params(struct kmem_cache *s, s->memcg_params.root_cache = root_cache; s->memcg_params.memcg = memcg; INIT_LIST_HEAD(&s->memcg_params.children_node); + INIT_LIST_HEAD(&s->memcg_params.kmem_caches_node); return 0; } @@ -224,6 +225,7 @@ int memcg_update_all_caches(int num_memcgs) static void unlink_memcg_cache(struct kmem_cache *s) { list_del(&s->memcg_params.children_node); + list_del(&s->memcg_params.kmem_caches_node); } #else static inline int init_memcg_params(struct kmem_cache *s, @@ -596,6 +598,7 @@ void memcg_create_kmem_cache(struct mem_cgroup *memcg, list_add(&s->memcg_params.children_node, &root_cache->memcg_params.children); + list_add(&s->memcg_params.kmem_caches_node, &memcg->kmem_caches); /* * Since readers won't lock (see cache_from_memcg_idx()), we need a @@ -651,9 +654,8 @@ void memcg_destroy_kmem_caches(struct mem_cgroup *memcg) get_online_mems(); mutex_lock(&slab_mutex); - list_for_each_entry_safe(s, s2, &slab_caches, list) { - if (is_root_cache(s) || s->memcg_params.memcg != memcg) - continue; + list_for_each_entry_safe(s, s2, &memcg->kmem_caches, + memcg_params.kmem_caches_node) { /* * The cgroup is about to be freed and therefore has no charges * left. Hence, all its caches must be empty by now. @@ -1201,15 +1203,35 @@ static int slab_show(struct seq_file *m, void *p) } #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) -int memcg_slab_show(struct seq_file *m, void *p) +void *memcg_slab_start(struct seq_file *m, loff_t *pos) { - struct kmem_cache *s = list_entry(p, struct kmem_cache, list); struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); - if (p == slab_caches.next) + mutex_lock(&slab_mutex); + return seq_list_start(&memcg->kmem_caches, *pos); +} + +void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); + + return seq_list_next(p, &memcg->kmem_caches, pos); +} + +void memcg_slab_stop(struct seq_file *m, void *p) +{ + mutex_unlock(&slab_mutex); +} + +int memcg_slab_show(struct seq_file *m, void *p) +{ + struct kmem_cache *s = list_entry(p, struct kmem_cache, + memcg_params.kmem_caches_node); + struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); + + if (p == memcg->kmem_caches.next) print_slabinfo_header(m); - if (!is_root_cache(s) && s->memcg_params.memcg == memcg) - cache_show(s, m); + cache_show(s, m); return 0; } #endif