1
0
Fork 0

cpusets: new round-robin rotor for SLAB allocations

We have observed several workloads running on multi-node systems where
memory is assigned unevenly across the nodes in the system.  There are
numerous reasons for this but one is the round-robin rotor in
cpuset_mem_spread_node().

For example, a simple test that writes a multi-page file will allocate
pages on nodes 0 2 4 6 ...  Odd nodes are skipped.  (Sometimes it
allocates on odd nodes & skips even nodes).

An example is shown below.  The program "lfile" writes a file consisting
of 10 pages.  The program then mmaps the file & uses get_mempolicy(...,
MPOL_F_NODE) to determine the nodes where the file pages were allocated.
The output is shown below:

	# ./lfile
	 allocated on nodes: 2 4 6 0 1 2 6 0 2

There is a single rotor that is used for allocating both file pages & slab
pages.  Writing the file allocates both a data page & a slab page
(buffer_head).  This advances the RR rotor 2 nodes for each page
allocated.

A quick confirmation seems to confirm this is the cause of the uneven
allocation:

	# echo 0 >/dev/cpuset/memory_spread_slab
	# ./lfile
	 allocated on nodes: 6 7 8 9 0 1 2 3 4 5

This patch introduces a second rotor that is used for slab allocations.

Signed-off-by: Jack Steiner <steiner@sgi.com>
Acked-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: Paul Menage <menage@google.com>
Cc: Jack Steiner <steiner@sgi.com>
Cc: Robin Holt <holt@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
hifive-unleashed-5.1
Jack Steiner 2010-05-26 14:42:49 -07:00 committed by Linus Torvalds
parent 2c488db27b
commit 6adef3ebe5
4 changed files with 24 additions and 5 deletions

View File

@ -69,6 +69,7 @@ extern void cpuset_task_status_allowed(struct seq_file *m,
struct task_struct *task);
extern int cpuset_mem_spread_node(void);
extern int cpuset_slab_spread_node(void);
static inline int cpuset_do_page_mem_spread(void)
{
@ -194,6 +195,11 @@ static inline int cpuset_mem_spread_node(void)
return 0;
}
static inline int cpuset_slab_spread_node(void)
{
return 0;
}
static inline int cpuset_do_page_mem_spread(void)
{
return 0;

View File

@ -1423,6 +1423,7 @@ struct task_struct {
nodemask_t mems_allowed; /* Protected by alloc_lock */
int mems_allowed_change_disable;
int cpuset_mem_spread_rotor;
int cpuset_slab_spread_rotor;
#endif
#ifdef CONFIG_CGROUPS
/* Control Group info protected by css_set_lock */

View File

@ -2469,7 +2469,8 @@ void cpuset_unlock(void)
}
/**
* cpuset_mem_spread_node() - On which node to begin search for a page
* cpuset_mem_spread_node() - On which node to begin search for a file page
* cpuset_slab_spread_node() - On which node to begin search for a slab page
*
* If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for
* tasks in a cpuset with is_spread_page or is_spread_slab set),
@ -2494,16 +2495,27 @@ void cpuset_unlock(void)
* See kmem_cache_alloc_node().
*/
int cpuset_mem_spread_node(void)
static int cpuset_spread_node(int *rotor)
{
int node;
node = next_node(current->cpuset_mem_spread_rotor, current->mems_allowed);
node = next_node(*rotor, current->mems_allowed);
if (node == MAX_NUMNODES)
node = first_node(current->mems_allowed);
current->cpuset_mem_spread_rotor = node;
*rotor = node;
return node;
}
int cpuset_mem_spread_node(void)
{
return cpuset_spread_node(&current->cpuset_mem_spread_rotor);
}
int cpuset_slab_spread_node(void)
{
return cpuset_spread_node(&current->cpuset_slab_spread_rotor);
}
EXPORT_SYMBOL_GPL(cpuset_mem_spread_node);
/**

View File

@ -3219,7 +3219,7 @@ static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
nid_alloc = nid_here = numa_node_id();
get_mems_allowed();
if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
nid_alloc = cpuset_mem_spread_node();
nid_alloc = cpuset_slab_spread_node();
else if (current->mempolicy)
nid_alloc = slab_node(current->mempolicy);
put_mems_allowed();