1
0
Fork 0

slub: Convert to hotplug state machine

Install the callbacks via the state machine.

Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: linux-mm@kvack.org
Cc: rt@linutronix.de
Cc: David Rientjes <rientjes@google.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Link: http://lkml.kernel.org/r/20160818125731.27256-5-bigeasy@linutronix.de
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
hifive-unleashed-5.1
Sebastian Andrzej Siewior 2016-08-18 14:57:19 +02:00 committed by Thomas Gleixner
parent 6731d4f123
commit a96a87bf94
2 changed files with 22 additions and 44 deletions

View File

@ -15,6 +15,7 @@ enum cpuhp_state {
CPUHP_X86_HPET_DEAD,
CPUHP_X86_APB_DEAD,
CPUHP_VIRT_NET_DEAD,
CPUHP_SLUB_DEAD,
CPUHP_WORKQUEUE_PREP,
CPUHP_POWER_NUMA_PREPARE,
CPUHP_HRTIMERS_PREPARE,

View File

@ -194,10 +194,6 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
#define __OBJECT_POISON 0x80000000UL /* Poison object */
#define __CMPXCHG_DOUBLE 0x40000000UL /* Use cmpxchg_double */
#ifdef CONFIG_SMP
static struct notifier_block slab_notifier;
#endif
/*
* Tracking user of a slab.
*/
@ -2304,6 +2300,25 @@ static void flush_all(struct kmem_cache *s)
on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC);
}
/*
* Use the cpu notifier to insure that the cpu slabs are flushed when
* necessary.
*/
static int slub_cpu_dead(unsigned int cpu)
{
struct kmem_cache *s;
unsigned long flags;
mutex_lock(&slab_mutex);
list_for_each_entry(s, &slab_caches, list) {
local_irq_save(flags);
__flush_cpu_slab(s, cpu);
local_irq_restore(flags);
}
mutex_unlock(&slab_mutex);
return 0;
}
/*
* Check if the objects in a per cpu structure fit numa
* locality expectations.
@ -4144,9 +4159,8 @@ void __init kmem_cache_init(void)
/* Setup random freelists for each cache */
init_freelist_randomization();
#ifdef CONFIG_SMP
register_cpu_notifier(&slab_notifier);
#endif
cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL,
slub_cpu_dead);
pr_info("SLUB: HWalign=%d, Order=%d-%d, MinObjects=%d, CPUs=%d, Nodes=%d\n",
cache_line_size(),
@ -4210,43 +4224,6 @@ int __kmem_cache_create(struct kmem_cache *s, unsigned long flags)
return err;
}
#ifdef CONFIG_SMP
/*
* Use the cpu notifier to insure that the cpu slabs are flushed when
* necessary.
*/
static int slab_cpuup_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
long cpu = (long)hcpu;
struct kmem_cache *s;
unsigned long flags;
switch (action) {
case CPU_UP_CANCELED:
case CPU_UP_CANCELED_FROZEN:
case CPU_DEAD:
case CPU_DEAD_FROZEN:
mutex_lock(&slab_mutex);
list_for_each_entry(s, &slab_caches, list) {
local_irq_save(flags);
__flush_cpu_slab(s, cpu);
local_irq_restore(flags);
}
mutex_unlock(&slab_mutex);
break;
default:
break;
}
return NOTIFY_OK;
}
static struct notifier_block slab_notifier = {
.notifier_call = slab_cpuup_callback
};
#endif
void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
{
struct kmem_cache *s;