1
0
Fork 0

SLUB: add CONFIG_SLUB_DEBUG

CONFIG_SLUB_DEBUG can be used to switch off the debugging and sysfs components
of SLUB.  Thus SLUB will be able to replace SLOB.  SLUB can arrange objects in
a denser way than SLOB and the code size should be minimal without debugging
and sysfs support.

Note that CONFIG_SLUB_DEBUG is materially different from CONFIG_SLAB_DEBUG.
CONFIG_SLAB_DEBUG is used to enable slab debugging in SLAB.  SLUB enables
debugging via a boot parameter.  SLUB debug code should always be present.

CONFIG_SLUB_DEBUG can be modified in the embedded config section.

Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
hifive-unleashed-5.1
Christoph Lameter 2007-05-09 02:32:44 -07:00 committed by Linus Torvalds
parent 02cbc87446
commit 41ecc55b8a
2 changed files with 123 additions and 75 deletions

View File

@ -504,6 +504,15 @@ config VM_EVENT_COUNTERS
on EMBEDDED systems. /proc/vmstat will only show page counts
if VM event counters are disabled.
config SLUB_DEBUG
default y
bool "Enable SLUB debugging support" if EMBEDDED
help
SLUB has extensive debug support features. Disabling these can
result in significant savings in code size. This also disables
SLUB sysfs support. /sys/slab will not exist and there will be
no support for cache validation etc.
choice
prompt "Choose SLAB allocator"
default SLAB

189
mm/slub.c
View File

@ -89,17 +89,25 @@
static inline int SlabDebug(struct page *page)
{
#ifdef CONFIG_SLUB_DEBUG
return PageError(page);
#else
return 0;
#endif
}
static inline void SetSlabDebug(struct page *page)
{
#ifdef CONFIG_SLUB_DEBUG
SetPageError(page);
#endif
}
static inline void ClearSlabDebug(struct page *page)
{
#ifdef CONFIG_SLUB_DEBUG
ClearPageError(page);
#endif
}
/*
@ -207,7 +215,7 @@ struct track {
enum track_item { TRACK_ALLOC, TRACK_FREE };
#ifdef CONFIG_SYSFS
#if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
static int sysfs_slab_add(struct kmem_cache *);
static int sysfs_slab_alias(struct kmem_cache *, const char *);
static void sysfs_slab_remove(struct kmem_cache *);
@ -284,6 +292,14 @@ static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
return (p - addr) / s->size;
}
#ifdef CONFIG_SLUB_DEBUG
/*
* Debug settings:
*/
static int slub_debug;
static char *slub_debug_slabs;
/*
* Object debugging
*/
@ -821,6 +837,97 @@ static void trace(struct kmem_cache *s, struct page *page, void *object, int all
}
}
static int __init setup_slub_debug(char *str)
{
if (!str || *str != '=')
slub_debug = DEBUG_DEFAULT_FLAGS;
else {
str++;
if (*str == 0 || *str == ',')
slub_debug = DEBUG_DEFAULT_FLAGS;
else
for( ;*str && *str != ','; str++)
switch (*str) {
case 'f' : case 'F' :
slub_debug |= SLAB_DEBUG_FREE;
break;
case 'z' : case 'Z' :
slub_debug |= SLAB_RED_ZONE;
break;
case 'p' : case 'P' :
slub_debug |= SLAB_POISON;
break;
case 'u' : case 'U' :
slub_debug |= SLAB_STORE_USER;
break;
case 't' : case 'T' :
slub_debug |= SLAB_TRACE;
break;
default:
printk(KERN_ERR "slub_debug option '%c' "
"unknown. skipped\n",*str);
}
}
if (*str == ',')
slub_debug_slabs = str + 1;
return 1;
}
__setup("slub_debug", setup_slub_debug);
static void kmem_cache_open_debug_check(struct kmem_cache *s)
{
/*
* The page->offset field is only 16 bit wide. This is an offset
* in units of words from the beginning of an object. If the slab
* size is bigger then we cannot move the free pointer behind the
* object anymore.
*
* On 32 bit platforms the limit is 256k. On 64bit platforms
* the limit is 512k.
*
* Debugging or ctor/dtors may create a need to move the free
* pointer. Fail if this happens.
*/
if (s->size >= 65535 * sizeof(void *)) {
BUG_ON(s->flags & (SLAB_RED_ZONE | SLAB_POISON |
SLAB_STORE_USER | SLAB_DESTROY_BY_RCU));
BUG_ON(s->ctor || s->dtor);
}
else
/*
* Enable debugging if selected on the kernel commandline.
*/
if (slub_debug && (!slub_debug_slabs ||
strncmp(slub_debug_slabs, s->name,
strlen(slub_debug_slabs)) == 0))
s->flags |= slub_debug;
}
#else
static inline int alloc_object_checks(struct kmem_cache *s,
struct page *page, void *object) { return 0; }
static inline int free_object_checks(struct kmem_cache *s,
struct page *page, void *object) { return 0; }
static inline void add_full(struct kmem_cache_node *n, struct page *page) {}
static inline void remove_full(struct kmem_cache *s, struct page *page) {}
static inline void trace(struct kmem_cache *s, struct page *page,
void *object, int alloc) {}
static inline void init_object(struct kmem_cache *s,
void *object, int active) {}
static inline void init_tracking(struct kmem_cache *s, void *object) {}
static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
{ return 1; }
static inline int check_object(struct kmem_cache *s, struct page *page,
void *object, int active) { return 1; }
static inline void set_track(struct kmem_cache *s, void *object,
enum track_item alloc, void *addr) {}
static inline void kmem_cache_open_debug_check(struct kmem_cache *s) {}
#define slub_debug 0
#endif
/*
* Slab allocation and freeing
*/
@ -1445,13 +1552,6 @@ static int slub_min_objects = DEFAULT_MIN_OBJECTS;
*/
static int slub_nomerge;
/*
* Debug settings:
*/
static int slub_debug;
static char *slub_debug_slabs;
/*
* Calculate the order of allocation given an slab object size.
*
@ -1660,6 +1760,7 @@ static int calculate_sizes(struct kmem_cache *s)
*/
size = ALIGN(size, sizeof(void *));
#ifdef CONFIG_SLUB_DEBUG
/*
* If we are Redzoning then check if there is some space between the
* end of the object and the free pointer. If not then add an
@ -1667,6 +1768,7 @@ static int calculate_sizes(struct kmem_cache *s)
*/
if ((flags & SLAB_RED_ZONE) && size == s->objsize)
size += sizeof(void *);
#endif
/*
* With that we have determined the number of bytes in actual use
@ -1674,6 +1776,7 @@ static int calculate_sizes(struct kmem_cache *s)
*/
s->inuse = size;
#ifdef CONFIG_SLUB_DEBUG
if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
s->ctor || s->dtor)) {
/*
@ -1704,6 +1807,7 @@ static int calculate_sizes(struct kmem_cache *s)
* of the object.
*/
size += sizeof(void *);
#endif
/*
* Determine the alignment based on various parameters that the
@ -1753,32 +1857,7 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
s->objsize = size;
s->flags = flags;
s->align = align;
/*
* The page->offset field is only 16 bit wide. This is an offset
* in units of words from the beginning of an object. If the slab
* size is bigger then we cannot move the free pointer behind the
* object anymore.
*
* On 32 bit platforms the limit is 256k. On 64bit platforms
* the limit is 512k.
*
* Debugging or ctor/dtors may create a need to move the free
* pointer. Fail if this happens.
*/
if (s->size >= 65535 * sizeof(void *)) {
BUG_ON(flags & (SLAB_RED_ZONE | SLAB_POISON |
SLAB_STORE_USER | SLAB_DESTROY_BY_RCU));
BUG_ON(ctor || dtor);
}
else
/*
* Enable debugging if selected on the kernel commandline.
*/
if (slub_debug && (!slub_debug_slabs ||
strncmp(slub_debug_slabs, name,
strlen(slub_debug_slabs)) == 0))
s->flags |= slub_debug;
kmem_cache_open_debug_check(s);
if (!calculate_sizes(s))
goto error;
@ -1949,45 +2028,6 @@ static int __init setup_slub_nomerge(char *str)
__setup("slub_nomerge", setup_slub_nomerge);
static int __init setup_slub_debug(char *str)
{
if (!str || *str != '=')
slub_debug = DEBUG_DEFAULT_FLAGS;
else {
str++;
if (*str == 0 || *str == ',')
slub_debug = DEBUG_DEFAULT_FLAGS;
else
for( ;*str && *str != ','; str++)
switch (*str) {
case 'f' : case 'F' :
slub_debug |= SLAB_DEBUG_FREE;
break;
case 'z' : case 'Z' :
slub_debug |= SLAB_RED_ZONE;
break;
case 'p' : case 'P' :
slub_debug |= SLAB_POISON;
break;
case 'u' : case 'U' :
slub_debug |= SLAB_STORE_USER;
break;
case 't' : case 'T' :
slub_debug |= SLAB_TRACE;
break;
default:
printk(KERN_ERR "slub_debug option '%c' "
"unknown. skipped\n",*str);
}
}
if (*str == ',')
slub_debug_slabs = str + 1;
return 1;
}
__setup("slub_debug", setup_slub_debug);
static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
const char *name, int size, gfp_t gfp_flags)
{
@ -2554,8 +2594,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
return slab_alloc(s, gfpflags, node, caller);
}
#ifdef CONFIG_SYSFS
#if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
static int validate_slab(struct kmem_cache *s, struct page *page)
{
void *p;