[PATCH] mm: rmap optimisation

Optimise rmap functions by minimising atomic operations when we know there
will be no concurrent modifications.

Signed-off-by: Nick Piggin <npiggin@suse.de>
Cc: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Nick Piggin 2006-01-06 00:11:12 -08:00 committed by Linus Torvalds
parent 224abf92b2
commit 9617d95e6e
4 changed files with 43 additions and 15 deletions

View file

@ -324,7 +324,7 @@ void install_arg_page(struct vm_area_struct *vma,
lru_cache_add_active(page); lru_cache_add_active(page);
set_pte_at(mm, address, pte, pte_mkdirty(pte_mkwrite(mk_pte( set_pte_at(mm, address, pte, pte_mkdirty(pte_mkwrite(mk_pte(
page, vma->vm_page_prot)))); page, vma->vm_page_prot))));
page_add_anon_rmap(page, vma, address); page_add_new_anon_rmap(page, vma, address);
pte_unmap_unlock(pte, ptl); pte_unmap_unlock(pte, ptl);
/* no need for flush_tlb */ /* no need for flush_tlb */

View file

@ -71,6 +71,7 @@ void __anon_vma_link(struct vm_area_struct *);
* rmap interfaces called when adding or removing pte of page * rmap interfaces called when adding or removing pte of page
*/ */
void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
void page_add_file_rmap(struct page *); void page_add_file_rmap(struct page *);
void page_remove_rmap(struct page *); void page_remove_rmap(struct page *);

View file

@ -1498,7 +1498,7 @@ gotten:
update_mmu_cache(vma, address, entry); update_mmu_cache(vma, address, entry);
lazy_mmu_prot_update(entry); lazy_mmu_prot_update(entry);
lru_cache_add_active(new_page); lru_cache_add_active(new_page);
page_add_anon_rmap(new_page, vma, address); page_add_new_anon_rmap(new_page, vma, address);
/* Free the old page.. */ /* Free the old page.. */
new_page = old_page; new_page = old_page;
@ -1978,7 +1978,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
inc_mm_counter(mm, anon_rss); inc_mm_counter(mm, anon_rss);
lru_cache_add_active(page); lru_cache_add_active(page);
SetPageReferenced(page); SetPageReferenced(page);
page_add_anon_rmap(page, vma, address); page_add_new_anon_rmap(page, vma, address);
} else { } else {
/* Map the ZERO_PAGE - vm_page_prot is readonly */ /* Map the ZERO_PAGE - vm_page_prot is readonly */
page = ZERO_PAGE(address); page = ZERO_PAGE(address);
@ -2109,7 +2109,7 @@ retry:
if (anon) { if (anon) {
inc_mm_counter(mm, anon_rss); inc_mm_counter(mm, anon_rss);
lru_cache_add_active(new_page); lru_cache_add_active(new_page);
page_add_anon_rmap(new_page, vma, address); page_add_new_anon_rmap(new_page, vma, address);
} else { } else {
inc_mm_counter(mm, file_rss); inc_mm_counter(mm, file_rss);
page_add_file_rmap(new_page); page_add_file_rmap(new_page);

View file

@ -434,6 +434,26 @@ int page_referenced(struct page *page, int is_locked)
return referenced; return referenced;
} }
/**
* page_set_anon_rmap - setup new anonymous rmap
* @page: the page to add the mapping to
* @vma: the vm area in which the mapping is added
* @address: the user virtual address mapped
*/
static void __page_set_anon_rmap(struct page *page,
struct vm_area_struct *vma, unsigned long address)
{
struct anon_vma *anon_vma = vma->anon_vma;
BUG_ON(!anon_vma);
anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
page->mapping = (struct address_space *) anon_vma;
page->index = linear_page_index(vma, address);
inc_page_state(nr_mapped);
}
/** /**
* page_add_anon_rmap - add pte mapping to an anonymous page * page_add_anon_rmap - add pte mapping to an anonymous page
* @page: the page to add the mapping to * @page: the page to add the mapping to
@ -445,20 +465,27 @@ int page_referenced(struct page *page, int is_locked)
void page_add_anon_rmap(struct page *page, void page_add_anon_rmap(struct page *page,
struct vm_area_struct *vma, unsigned long address) struct vm_area_struct *vma, unsigned long address)
{ {
if (atomic_inc_and_test(&page->_mapcount)) { if (atomic_inc_and_test(&page->_mapcount))
struct anon_vma *anon_vma = vma->anon_vma; __page_set_anon_rmap(page, vma, address);
BUG_ON(!anon_vma);
anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
page->mapping = (struct address_space *) anon_vma;
page->index = linear_page_index(vma, address);
inc_page_state(nr_mapped);
}
/* else checking page index and mapping is racy */ /* else checking page index and mapping is racy */
} }
/*
* page_add_new_anon_rmap - add pte mapping to a new anonymous page
* @page: the page to add the mapping to
* @vma: the vm area in which the mapping is added
* @address: the user virtual address mapped
*
* Same as page_add_anon_rmap but must only be called on *new* pages.
* This means the inc-and-test can be bypassed.
*/
void page_add_new_anon_rmap(struct page *page,
struct vm_area_struct *vma, unsigned long address)
{
atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */
__page_set_anon_rmap(page, vma, address);
}
/** /**
* page_add_file_rmap - add pte mapping to a file page * page_add_file_rmap - add pte mapping to a file page
* @page: the page to add the mapping to * @page: the page to add the mapping to