1
0
Fork 0

SLUB: use list_for_each_entry for loops over all slabs

Use list_for_each_entry() instead of list_for_each().

Get rid of for_all_slabs(). It had only one user. So fold it into the
callback. This also gets rid of cpu_slab_flush.

Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
hifive-unleashed-5.1
Christoph Lameter 2007-07-17 04:03:19 -07:00 committed by Linus Torvalds
parent 2492268472
commit 5b95a4acf1
1 changed files with 13 additions and 38 deletions

View File

@ -2573,7 +2573,7 @@ static struct kmem_cache *find_mergeable(size_t size,
size_t align, unsigned long flags, size_t align, unsigned long flags,
void (*ctor)(void *, struct kmem_cache *, unsigned long)) void (*ctor)(void *, struct kmem_cache *, unsigned long))
{ {
struct list_head *h; struct kmem_cache *s;
if (slub_nomerge || (flags & SLUB_NEVER_MERGE)) if (slub_nomerge || (flags & SLUB_NEVER_MERGE))
return NULL; return NULL;
@ -2585,10 +2585,7 @@ static struct kmem_cache *find_mergeable(size_t size,
align = calculate_alignment(flags, align, size); align = calculate_alignment(flags, align, size);
size = ALIGN(size, align); size = ALIGN(size, align);
list_for_each(h, &slab_caches) { list_for_each_entry(s, &slab_caches, list) {
struct kmem_cache *s =
container_of(h, struct kmem_cache, list);
if (slab_unmergeable(s)) if (slab_unmergeable(s))
continue; continue;
@ -2670,33 +2667,6 @@ void *kmem_cache_zalloc(struct kmem_cache *s, gfp_t flags)
EXPORT_SYMBOL(kmem_cache_zalloc); EXPORT_SYMBOL(kmem_cache_zalloc);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static void for_all_slabs(void (*func)(struct kmem_cache *, int), int cpu)
{
struct list_head *h;
down_read(&slub_lock);
list_for_each(h, &slab_caches) {
struct kmem_cache *s =
container_of(h, struct kmem_cache, list);
func(s, cpu);
}
up_read(&slub_lock);
}
/*
* Version of __flush_cpu_slab for the case that interrupts
* are enabled.
*/
static void cpu_slab_flush(struct kmem_cache *s, int cpu)
{
unsigned long flags;
local_irq_save(flags);
__flush_cpu_slab(s, cpu);
local_irq_restore(flags);
}
/* /*
* Use the cpu notifier to insure that the cpu slabs are flushed when * Use the cpu notifier to insure that the cpu slabs are flushed when
* necessary. * necessary.
@ -2705,13 +2675,21 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu) unsigned long action, void *hcpu)
{ {
long cpu = (long)hcpu; long cpu = (long)hcpu;
struct kmem_cache *s;
unsigned long flags;
switch (action) { switch (action) {
case CPU_UP_CANCELED: case CPU_UP_CANCELED:
case CPU_UP_CANCELED_FROZEN: case CPU_UP_CANCELED_FROZEN:
case CPU_DEAD: case CPU_DEAD:
case CPU_DEAD_FROZEN: case CPU_DEAD_FROZEN:
for_all_slabs(cpu_slab_flush, cpu); down_read(&slub_lock);
list_for_each_entry(s, &slab_caches, list) {
local_irq_save(flags);
__flush_cpu_slab(s, cpu);
local_irq_restore(flags);
}
up_read(&slub_lock);
break; break;
default: default:
break; break;
@ -3736,7 +3714,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
static int __init slab_sysfs_init(void) static int __init slab_sysfs_init(void)
{ {
struct list_head *h; struct kmem_cache *s;
int err; int err;
err = subsystem_register(&slab_subsys); err = subsystem_register(&slab_subsys);
@ -3747,10 +3725,7 @@ static int __init slab_sysfs_init(void)
slab_state = SYSFS; slab_state = SYSFS;
list_for_each(h, &slab_caches) { list_for_each_entry(s, &slab_caches, list) {
struct kmem_cache *s =
container_of(h, struct kmem_cache, list);
err = sysfs_slab_add(s); err = sysfs_slab_add(s);
BUG_ON(err); BUG_ON(err);
} }