cpumask: remove x86's node_to_cpumask now everyone uses cpumask_of_node

Impact: cleanup

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
This commit is contained in:
Rusty Russell 2009-03-13 14:49:52 +10:30
parent b643decad6
commit b9c4398ed4
2 changed files with 0 additions and 67 deletions

View file

@ -57,17 +57,6 @@ static inline int cpu_to_node(int cpu)
}
#define early_cpu_to_node(cpu) cpu_to_node(cpu)
/* Returns a bitmask of CPUs on Node 'node'.
*
* Side note: this function creates the returned cpumask on the stack
* so with a high NR_CPUS count, excessive stack space is used. The
* cpumask_of_node function should be used whenever possible.
*/
static inline cpumask_t node_to_cpumask(int node)
{
return node_to_cpumask_map[node];
}
/* Returns a bitmask of CPUs on Node 'node'. */
static inline const struct cpumask *cpumask_of_node(int node)
{
@ -92,7 +81,6 @@ DECLARE_PER_CPU(int, node_number);
extern int cpu_to_node(int cpu);
extern int early_cpu_to_node(int cpu);
extern const cpumask_t *cpumask_of_node(int node);
extern cpumask_t node_to_cpumask(int node);
#else /* !CONFIG_DEBUG_PER_CPU_MAPS */
@ -114,26 +102,10 @@ static inline const cpumask_t *cpumask_of_node(int node)
return &node_to_cpumask_map[node];
}
/* Returns a bitmask of CPUs on Node 'node'. */
static inline cpumask_t node_to_cpumask(int node)
{
return node_to_cpumask_map[node];
}
#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
extern void setup_node_to_cpumask_map(void);
/*
* Replace default node_to_cpumask_ptr with optimized version
* Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
*/
#define node_to_cpumask_ptr(v, node) \
const cpumask_t *v = cpumask_of_node(node)
#define node_to_cpumask_ptr_next(v, node) \
v = cpumask_of_node(node)
#endif /* CONFIG_X86_64 */
/*
@ -212,10 +184,6 @@ static inline const cpumask_t *cpumask_of_node(int node)
{
return &cpu_online_map;
}
static inline cpumask_t node_to_cpumask(int node)
{
return cpu_online_map;
}
static inline int node_to_first_cpu(int node)
{
return first_cpu(cpu_online_map);
@ -223,15 +191,6 @@ static inline int node_to_first_cpu(int node)
static inline void setup_node_to_cpumask_map(void) { }
/*
* Replace default node_to_cpumask_ptr with optimized version
* Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
*/
#define node_to_cpumask_ptr(v, node) \
const cpumask_t *v = cpumask_of_node(node)
#define node_to_cpumask_ptr_next(v, node) \
v = cpumask_of_node(node)
#endif
#include <asm-generic/topology.h>

View file

@ -826,32 +826,6 @@ const cpumask_t *cpumask_of_node(int node)
}
EXPORT_SYMBOL(cpumask_of_node);
/*
* Returns a bitmask of CPUs on Node 'node'.
*
* Side note: this function creates the returned cpumask on the stack
* so with a high NR_CPUS count, excessive stack space is used. The
* node_to_cpumask_ptr function should be used whenever possible.
*/
cpumask_t node_to_cpumask(int node)
{
if (node_to_cpumask_map == NULL) {
printk(KERN_WARNING
"node_to_cpumask(%d): no node_to_cpumask_map!\n", node);
dump_stack();
return cpu_online_map;
}
if (node >= nr_node_ids) {
printk(KERN_WARNING
"node_to_cpumask(%d): node > nr_node_ids(%d)\n",
node, nr_node_ids);
dump_stack();
return cpu_mask_none;
}
return node_to_cpumask_map[node];
}
EXPORT_SYMBOL(node_to_cpumask);
/*
* --------- end of debug versions of the numa functions ---------
*/