mm: move anon_vma ref out from under CONFIG_foo

We need the anon_vma refcount unconditionally to simplify the anon_vma
lifetime rules.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: Mel Gorman <mel@csn.ul.ie>
Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: Hugh Dickins <hughd@google.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Peter Zijlstra 2011-03-22 16:32:48 -07:00 committed by Linus Torvalds
parent 9e60109f12
commit 83813267c6
2 changed files with 10 additions and 44 deletions

View file

@ -27,18 +27,15 @@
struct anon_vma { struct anon_vma {
struct anon_vma *root; /* Root of this anon_vma tree */ struct anon_vma *root; /* Root of this anon_vma tree */
spinlock_t lock; /* Serialize access to vma list */ spinlock_t lock; /* Serialize access to vma list */
#if defined(CONFIG_KSM) || defined(CONFIG_MIGRATION)
/* /*
* The external_refcount is taken by either KSM or page migration * The refcount is taken on an anon_vma when there is no
* to take a reference to an anon_vma when there is no
* guarantee that the vma of page tables will exist for * guarantee that the vma of page tables will exist for
* the duration of the operation. A caller that takes * the duration of the operation. A caller that takes
* the reference is responsible for clearing up the * the reference is responsible for clearing up the
* anon_vma if they are the last user on release * anon_vma if they are the last user on release
*/ */
atomic_t external_refcount; atomic_t refcount;
#endif
/* /*
* NOTE: the LSB of the head.next is set by * NOTE: the LSB of the head.next is set by
* mm_take_all_locks() _after_ taking the above lock. So the * mm_take_all_locks() _after_ taking the above lock. So the
@ -71,41 +68,12 @@ struct anon_vma_chain {
}; };
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
#if defined(CONFIG_KSM) || defined(CONFIG_MIGRATION)
static inline void anonvma_external_refcount_init(struct anon_vma *anon_vma)
{
atomic_set(&anon_vma->external_refcount, 0);
}
static inline int anonvma_external_refcount(struct anon_vma *anon_vma)
{
return atomic_read(&anon_vma->external_refcount);
}
static inline void get_anon_vma(struct anon_vma *anon_vma) static inline void get_anon_vma(struct anon_vma *anon_vma)
{ {
atomic_inc(&anon_vma->external_refcount); atomic_inc(&anon_vma->refcount);
} }
void put_anon_vma(struct anon_vma *); void put_anon_vma(struct anon_vma *);
#else
static inline void anonvma_external_refcount_init(struct anon_vma *anon_vma)
{
}
static inline int anonvma_external_refcount(struct anon_vma *anon_vma)
{
return 0;
}
static inline void get_anon_vma(struct anon_vma *anon_vma)
{
}
static inline void put_anon_vma(struct anon_vma *anon_vma)
{
}
#endif /* CONFIG_KSM */
static inline struct anon_vma *page_anon_vma(struct page *page) static inline struct anon_vma *page_anon_vma(struct page *page)
{ {

View file

@ -272,7 +272,7 @@ static void anon_vma_unlink(struct anon_vma_chain *anon_vma_chain)
list_del(&anon_vma_chain->same_anon_vma); list_del(&anon_vma_chain->same_anon_vma);
/* We must garbage collect the anon_vma if it's empty */ /* We must garbage collect the anon_vma if it's empty */
empty = list_empty(&anon_vma->head) && !anonvma_external_refcount(anon_vma); empty = list_empty(&anon_vma->head) && !atomic_read(&anon_vma->refcount);
anon_vma_unlock(anon_vma); anon_vma_unlock(anon_vma);
if (empty) { if (empty) {
@ -303,7 +303,7 @@ static void anon_vma_ctor(void *data)
struct anon_vma *anon_vma = data; struct anon_vma *anon_vma = data;
spin_lock_init(&anon_vma->lock); spin_lock_init(&anon_vma->lock);
anonvma_external_refcount_init(anon_vma); atomic_set(&anon_vma->refcount, 0);
INIT_LIST_HEAD(&anon_vma->head); INIT_LIST_HEAD(&anon_vma->head);
} }
@ -1486,7 +1486,6 @@ int try_to_munlock(struct page *page)
return try_to_unmap_file(page, TTU_MUNLOCK); return try_to_unmap_file(page, TTU_MUNLOCK);
} }
#if defined(CONFIG_KSM) || defined(CONFIG_MIGRATION)
/* /*
* Drop an anon_vma refcount, freeing the anon_vma and anon_vma->root * Drop an anon_vma refcount, freeing the anon_vma and anon_vma->root
* if necessary. Be careful to do all the tests under the lock. Once * if necessary. Be careful to do all the tests under the lock. Once
@ -1495,8 +1494,8 @@ int try_to_munlock(struct page *page)
*/ */
void put_anon_vma(struct anon_vma *anon_vma) void put_anon_vma(struct anon_vma *anon_vma)
{ {
BUG_ON(atomic_read(&anon_vma->external_refcount) <= 0); BUG_ON(atomic_read(&anon_vma->refcount) <= 0);
if (atomic_dec_and_lock(&anon_vma->external_refcount, &anon_vma->root->lock)) { if (atomic_dec_and_lock(&anon_vma->refcount, &anon_vma->root->lock)) {
struct anon_vma *root = anon_vma->root; struct anon_vma *root = anon_vma->root;
int empty = list_empty(&anon_vma->head); int empty = list_empty(&anon_vma->head);
int last_root_user = 0; int last_root_user = 0;
@ -1507,8 +1506,8 @@ void put_anon_vma(struct anon_vma *anon_vma)
* the refcount on the root and check if we need to free it. * the refcount on the root and check if we need to free it.
*/ */
if (empty && anon_vma != root) { if (empty && anon_vma != root) {
BUG_ON(atomic_read(&root->external_refcount) <= 0); BUG_ON(atomic_read(&root->refcount) <= 0);
last_root_user = atomic_dec_and_test(&root->external_refcount); last_root_user = atomic_dec_and_test(&root->refcount);
root_empty = list_empty(&root->head); root_empty = list_empty(&root->head);
} }
anon_vma_unlock(anon_vma); anon_vma_unlock(anon_vma);
@ -1520,7 +1519,6 @@ void put_anon_vma(struct anon_vma *anon_vma)
} }
} }
} }
#endif
#ifdef CONFIG_MIGRATION #ifdef CONFIG_MIGRATION
/* /*