1
0
Fork 0

[PATCH] cpuset: don't need to mark cpuset_mems_generation atomic

Drop the atomic_t marking on the cpuset static global
cpuset_mems_generation.  Since all access to it is guarded by the global
manage_mutex, there is no need for further serialization of this value.

Signed-off-by: Paul Jackson <pj@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
hifive-unleashed-5.1
Paul Jackson 2006-03-24 03:16:11 -08:00 committed by Linus Torvalds
parent 8488bc359d
commit 151a44202d
1 changed files with 11 additions and 8 deletions

View File

@ -149,7 +149,7 @@ static inline int is_spread_slab(const struct cpuset *cs)
}
/*
* Increment this atomic integer everytime any cpuset changes its
* Increment this integer everytime any cpuset changes its
* mems_allowed value. Users of cpusets can track this generation
* number, and avoid having to lock and reload mems_allowed unless
* the cpuset they're using changes generation.
@ -163,8 +163,11 @@ static inline int is_spread_slab(const struct cpuset *cs)
* on every visit to __alloc_pages(), to efficiently check whether
* its current->cpuset->mems_allowed has changed, requiring an update
* of its current->mems_allowed.
*
* Since cpuset_mems_generation is guarded by manage_mutex,
* there is no need to mark it atomic.
*/
static atomic_t cpuset_mems_generation = ATOMIC_INIT(1);
static int cpuset_mems_generation;
static struct cpuset top_cpuset = {
.flags = ((1 << CS_CPU_EXCLUSIVE) | (1 << CS_MEM_EXCLUSIVE)),
@ -877,7 +880,7 @@ static int update_nodemask(struct cpuset *cs, char *buf)
mutex_lock(&callback_mutex);
cs->mems_allowed = trialcs.mems_allowed;
cs->mems_generation = atomic_inc_return(&cpuset_mems_generation);
cs->mems_generation = cpuset_mems_generation++;
mutex_unlock(&callback_mutex);
set_cpuset_being_rebound(cs); /* causes mpol_copy() rebind */
@ -1270,11 +1273,11 @@ static ssize_t cpuset_common_file_write(struct file *file, const char __user *us
break;
case FILE_SPREAD_PAGE:
retval = update_flag(CS_SPREAD_PAGE, cs, buffer);
cs->mems_generation = atomic_inc_return(&cpuset_mems_generation);
cs->mems_generation = cpuset_mems_generation++;
break;
case FILE_SPREAD_SLAB:
retval = update_flag(CS_SPREAD_SLAB, cs, buffer);
cs->mems_generation = atomic_inc_return(&cpuset_mems_generation);
cs->mems_generation = cpuset_mems_generation++;
break;
case FILE_TASKLIST:
retval = attach_task(cs, buffer, &pathbuf);
@ -1823,7 +1826,7 @@ static long cpuset_create(struct cpuset *parent, const char *name, int mode)
atomic_set(&cs->count, 0);
INIT_LIST_HEAD(&cs->sibling);
INIT_LIST_HEAD(&cs->children);
cs->mems_generation = atomic_inc_return(&cpuset_mems_generation);
cs->mems_generation = cpuset_mems_generation++;
fmeter_init(&cs->fmeter);
cs->parent = parent;
@ -1913,7 +1916,7 @@ int __init cpuset_init_early(void)
struct task_struct *tsk = current;
tsk->cpuset = &top_cpuset;
tsk->cpuset->mems_generation = atomic_inc_return(&cpuset_mems_generation);
tsk->cpuset->mems_generation = cpuset_mems_generation++;
return 0;
}
@ -1932,7 +1935,7 @@ int __init cpuset_init(void)
top_cpuset.mems_allowed = NODE_MASK_ALL;
fmeter_init(&top_cpuset.fmeter);
top_cpuset.mems_generation = atomic_inc_return(&cpuset_mems_generation);
top_cpuset.mems_generation = cpuset_mems_generation++;
init_task.cpuset = &top_cpuset;