1
0
Fork 0

slub: Push irq disable into allocate_slab()

Do the irq handling in allocate_slab() instead of __slab_alloc().

__slab_alloc() is already cluttered and allocate_slab() is already
fiddling around with gfp flags.

v6->v7:
	Only increment ORDER_FALLBACK if we get a page during fallback

Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
hifive-unleashed-5.1
Christoph Lameter 2011-06-01 12:25:44 -05:00 committed by Pekka Enberg
parent e4a46182e1
commit 7e0528dadc
1 changed files with 13 additions and 10 deletions

View File

@ -1187,6 +1187,11 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
struct kmem_cache_order_objects oo = s->oo;
gfp_t alloc_gfp;
flags &= gfp_allowed_mask;
if (flags & __GFP_WAIT)
local_irq_enable();
flags |= s->allocflags;
/*
@ -1203,12 +1208,17 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
* Try a lower order alloc if possible
*/
page = alloc_slab_page(flags, node, oo);
if (!page)
return NULL;
stat(s, ORDER_FALLBACK);
if (page)
stat(s, ORDER_FALLBACK);
}
if (flags & __GFP_WAIT)
local_irq_disable();
if (!page)
return NULL;
if (kmemcheck_enabled
&& !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
int pages = 1 << oo_order(oo);
@ -1849,15 +1859,8 @@ new_slab:
goto load_freelist;
}
gfpflags &= gfp_allowed_mask;
if (gfpflags & __GFP_WAIT)
local_irq_enable();
page = new_slab(s, gfpflags, node);
if (gfpflags & __GFP_WAIT)
local_irq_disable();
if (page) {
c = __this_cpu_ptr(s->cpu_slab);
stat(s, ALLOC_SLAB);