1
0
Fork 0

mm/slab_common.c: do not warn that cache is busy on destroy more than once

Currently, when kmem_cache_destroy() is called for a global cache, we
print a warning for each per memcg cache attached to it that has active
objects (see shutdown_cache).  This is redundant, because it gives no new
information and only clutters the log.  If a cache being destroyed has
active objects, there must be a memory leak in the module that created the
cache, and it does not matter if the cache was used by users in memory
cgroups or not.

This patch moves the warning from shutdown_cache(), which is called for
shutting down both global and per memcg caches, to kmem_cache_destroy(),
so that the warning is only printed once if there are objects left in the
cache being destroyed.

Signed-off-by: Vladimir Davydov <vdavydov@virtuozzo.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
steinar/wifi_calib_4_9_kernel
Vladimir Davydov 2015-11-05 18:45:14 -08:00 committed by Linus Torvalds
parent d60fdcc9e3
commit cd918c5574
1 changed files with 7 additions and 6 deletions

View File

@ -451,12 +451,8 @@ EXPORT_SYMBOL(kmem_cache_create);
static int shutdown_cache(struct kmem_cache *s,
struct list_head *release, bool *need_rcu_barrier)
{
if (__kmem_cache_shutdown(s) != 0) {
printk(KERN_ERR "kmem_cache_destroy %s: "
"Slab cache still has objects\n", s->name);
dump_stack();
if (__kmem_cache_shutdown(s) != 0)
return -EBUSY;
}
if (s->flags & SLAB_DESTROY_BY_RCU)
*need_rcu_barrier = true;
@ -722,8 +718,13 @@ void kmem_cache_destroy(struct kmem_cache *s)
err = shutdown_memcg_caches(s, &release, &need_rcu_barrier);
if (!err)
shutdown_cache(s, &release, &need_rcu_barrier);
err = shutdown_cache(s, &release, &need_rcu_barrier);
if (err) {
pr_err("kmem_cache_destroy %s: "
"Slab cache still has objects\n", s->name);
dump_stack();
}
out_unlock:
mutex_unlock(&slab_mutex);