1
0
Fork 0

cpumask: Add alloc_cpumask_var_node()

Impact: New API

This will be needed in x86 code to allocate the domain and old_domain
cpumasks on the same node as where the containing irq_cfg struct is
allocated.

(Also fixes double-dump_stack on rare CONFIG_DEBUG_PER_CPU_MAPS case)

Signed-off-by: Mike Travis <travis@sgi.com>
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> (re-impl alloc_cpumask_var)
hifive-unleashed-5.1
Mike Travis 2008-12-19 16:56:37 +10:30 committed by Rusty Russell
parent 7be7585393
commit 7b4967c532
2 changed files with 15 additions and 3 deletions

View File

@ -1025,6 +1025,7 @@ static inline size_t cpumask_size(void)
#ifdef CONFIG_CPUMASK_OFFSTACK #ifdef CONFIG_CPUMASK_OFFSTACK
typedef struct cpumask *cpumask_var_t; typedef struct cpumask *cpumask_var_t;
bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags); bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags);
void alloc_bootmem_cpumask_var(cpumask_var_t *mask); void alloc_bootmem_cpumask_var(cpumask_var_t *mask);
void free_cpumask_var(cpumask_var_t mask); void free_cpumask_var(cpumask_var_t mask);
@ -1038,6 +1039,12 @@ static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
return true; return true;
} }
static inline bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
int node)
{
return true;
}
static inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask) static inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask)
{ {
} }

View File

@ -76,15 +76,14 @@ int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
/* These are not inline because of header tangles. */ /* These are not inline because of header tangles. */
#ifdef CONFIG_CPUMASK_OFFSTACK #ifdef CONFIG_CPUMASK_OFFSTACK
bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
{ {
if (likely(slab_is_available())) if (likely(slab_is_available()))
*mask = kmalloc(cpumask_size(), flags); *mask = kmalloc_node(cpumask_size(), flags, node);
else { else {
#ifdef CONFIG_DEBUG_PER_CPU_MAPS #ifdef CONFIG_DEBUG_PER_CPU_MAPS
printk(KERN_ERR printk(KERN_ERR
"=> alloc_cpumask_var: kmalloc not available!\n"); "=> alloc_cpumask_var: kmalloc not available!\n");
dump_stack();
#endif #endif
*mask = NULL; *mask = NULL;
} }
@ -96,6 +95,12 @@ bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
#endif #endif
return *mask != NULL; return *mask != NULL;
} }
EXPORT_SYMBOL(alloc_cpumask_var_node);
bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
{
return alloc_cpumask_var_node(mask, flags, numa_node_id());
}
EXPORT_SYMBOL(alloc_cpumask_var); EXPORT_SYMBOL(alloc_cpumask_var);
void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask) void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask)