1
0
Fork 0

mm: add add_to_swap stub

If we add a failing stub for add_to_swap(), then we can remove the #ifdef
CONFIG_SWAP from mm/vmscan.c.

This was intended as a source cleanup, but looking more closely, it turns
out that the !CONFIG_SWAP case was going to keep_locked for an anonymous
page, whereas now it goes to the more suitable activate_locked, like the
CONFIG_SWAP nr_swap_pages 0 case.

Signed-off-by: Hugh Dickins <hugh@veritas.com>
Cc: Lee Schermerhorn <lee.schermerhorn@hp.com>
Acked-by: Rik van Riel <riel@redhat.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Robin Holt <holt@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
hifive-unleashed-5.1
Hugh Dickins 2009-01-06 14:39:40 -08:00 committed by Linus Torvalds
parent ac47b003d0
commit 60371d971a
2 changed files with 5 additions and 2 deletions

View File

@ -371,6 +371,11 @@ static inline struct page *lookup_swap_cache(swp_entry_t swp)
return NULL;
}
static inline int add_to_swap(struct page *page)
{
return 0;
}
static inline int add_to_swap_cache(struct page *page, swp_entry_t entry,
gfp_t gfp_mask)
{

View File

@ -617,7 +617,6 @@ static unsigned long shrink_page_list(struct list_head *page_list,
referenced && page_mapping_inuse(page))
goto activate_locked;
#ifdef CONFIG_SWAP
/*
* Anonymous process memory has backing store?
* Try to allocate it some swap space here.
@ -629,7 +628,6 @@ static unsigned long shrink_page_list(struct list_head *page_list,
goto activate_locked;
may_enter_fs = 1;
}
#endif /* CONFIG_SWAP */
mapping = page_mapping(page);