shmem: cleanup shmem_add_to_page_cache

shmem_add_to_page_cache() has three callsites, but only one of them wants
the radix_tree_preload() (an exceptional entry guarantees that the radix
tree node is present in the other cases), and only that site can achieve
mem_cgroup_uncharge_cache_page() (PageSwapCache makes it a no-op in the
other cases).  We did it this way originally to reflect
add_to_page_cache_locked(); but it's confusing now, so move the radix_tree
preloading and mem_cgroup uncharging to that one caller.

Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Michal Hocko <mhocko@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Hugh Dickins 2012-07-11 14:02:48 -07:00 committed by Linus Torvalds
parent d189922862
commit b065b4321f

View file

@ -288,40 +288,31 @@ static int shmem_add_to_page_cache(struct page *page,
struct address_space *mapping, struct address_space *mapping,
pgoff_t index, gfp_t gfp, void *expected) pgoff_t index, gfp_t gfp, void *expected)
{ {
int error = 0; int error;
VM_BUG_ON(!PageLocked(page)); VM_BUG_ON(!PageLocked(page));
VM_BUG_ON(!PageSwapBacked(page)); VM_BUG_ON(!PageSwapBacked(page));
if (!expected) page_cache_get(page);
error = radix_tree_preload(gfp & GFP_RECLAIM_MASK); page->mapping = mapping;
if (!error) { page->index = index;
page_cache_get(page);
page->mapping = mapping;
page->index = index;
spin_lock_irq(&mapping->tree_lock); spin_lock_irq(&mapping->tree_lock);
if (!expected) if (!expected)
error = radix_tree_insert(&mapping->page_tree, error = radix_tree_insert(&mapping->page_tree, index, page);
index, page); else
else error = shmem_radix_tree_replace(mapping, index, expected,
error = shmem_radix_tree_replace(mapping, index, page);
expected, page); if (!error) {
if (!error) { mapping->nrpages++;
mapping->nrpages++; __inc_zone_page_state(page, NR_FILE_PAGES);
__inc_zone_page_state(page, NR_FILE_PAGES); __inc_zone_page_state(page, NR_SHMEM);
__inc_zone_page_state(page, NR_SHMEM); spin_unlock_irq(&mapping->tree_lock);
spin_unlock_irq(&mapping->tree_lock); } else {
} else { page->mapping = NULL;
page->mapping = NULL; spin_unlock_irq(&mapping->tree_lock);
spin_unlock_irq(&mapping->tree_lock); page_cache_release(page);
page_cache_release(page);
}
if (!expected)
radix_tree_preload_end();
} }
if (error)
mem_cgroup_uncharge_cache_page(page);
return error; return error;
} }
@ -1202,11 +1193,18 @@ repeat:
__set_page_locked(page); __set_page_locked(page);
error = mem_cgroup_cache_charge(page, current->mm, error = mem_cgroup_cache_charge(page, current->mm,
gfp & GFP_RECLAIM_MASK); gfp & GFP_RECLAIM_MASK);
if (!error)
error = shmem_add_to_page_cache(page, mapping, index,
gfp, NULL);
if (error) if (error)
goto decused; goto decused;
error = radix_tree_preload(gfp & GFP_RECLAIM_MASK);
if (!error) {
error = shmem_add_to_page_cache(page, mapping, index,
gfp, NULL);
radix_tree_preload_end();
}
if (error) {
mem_cgroup_uncharge_cache_page(page);
goto decused;
}
lru_cache_add_anon(page); lru_cache_add_anon(page);
spin_lock(&info->lock); spin_lock(&info->lock);