1
0
Fork 0

mm/swapfile.c: replace some #ifdef with IS_ENABLED()

In mm/swapfile.c, THP (Transparent Huge Page) swap specific code is
enclosed by #ifdef CONFIG_THP_SWAP/#endif to avoid code dilating when
THP isn't enabled.  But #ifdef/#endif in .c file hurt the code
readability, so Dave suggested to use IS_ENABLED(CONFIG_THP_SWAP)
instead and let compiler to do the dirty job for us.  This has potential
to remove some duplicated code too.  From output of `size`,

		text	   data	    bss	    dec	    hex	filename
THP=y:         26269	   2076	    340	  28685	   700d	mm/swapfile.o
ifdef/endif:   24115	   2028	    340	  26483	   6773	mm/swapfile.o
IS_ENABLED:    24179	   2028	    340	  26547	   67b3	mm/swapfile.o

IS_ENABLED() based solution works quite well, almost as good as that of
#ifdef/#endif.  And from the diffstat, the removed lines are more than
added lines.

One #ifdef for split_swap_cluster() is kept.  Because it is a public
function with a stub implementation for CONFIG_THP_SWAP=n in swap.h.

Link: http://lkml.kernel.org/r/20180720071845.17920-3-ying.huang@intel.com
Signed-off-by: "Huang, Ying" <ying.huang@intel.com>
Suggested-and-acked-by: Dave Hansen <dave.hansen@linux.intel.com>
Reviewed-by: Daniel Jordan <daniel.m.jordan@oracle.com>
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Shaohua Li <shli@kernel.org>
Cc: Hugh Dickins <hughd@google.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Matthew Wilcox <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
hifive-unleashed-5.1
Huang Ying 2018-08-21 21:52:05 -07:00 committed by Linus Torvalds
parent 59d98bf3c2
commit fe5266d5d5
1 changed files with 20 additions and 40 deletions

View File

@ -868,7 +868,6 @@ no_page:
return n_ret;
}
#ifdef CONFIG_THP_SWAP
static int swap_alloc_cluster(struct swap_info_struct *si, swp_entry_t *slot)
{
unsigned long idx;
@ -876,6 +875,15 @@ static int swap_alloc_cluster(struct swap_info_struct *si, swp_entry_t *slot)
unsigned long offset, i;
unsigned char *map;
/*
* Should not even be attempting cluster allocations when huge
* page swap is disabled. Warn and fail the allocation.
*/
if (!IS_ENABLED(CONFIG_THP_SWAP)) {
VM_WARN_ON_ONCE(1);
return 0;
}
if (cluster_list_empty(&si->free_clusters))
return 0;
@ -906,13 +914,6 @@ static void swap_free_cluster(struct swap_info_struct *si, unsigned long idx)
unlock_cluster(ci);
swap_range_free(si, offset, SWAPFILE_CLUSTER);
}
#else
static int swap_alloc_cluster(struct swap_info_struct *si, swp_entry_t *slot)
{
VM_WARN_ON_ONCE(1);
return 0;
}
#endif /* CONFIG_THP_SWAP */
static unsigned long scan_swap_map(struct swap_info_struct *si,
unsigned char usage)
@ -1200,7 +1201,6 @@ static void swapcache_free(swp_entry_t entry)
}
}
#ifdef CONFIG_THP_SWAP
static void swapcache_free_cluster(swp_entry_t entry)
{
unsigned long offset = swp_offset(entry);
@ -1211,6 +1211,9 @@ static void swapcache_free_cluster(swp_entry_t entry)
unsigned int i, free_entries = 0;
unsigned char val;
if (!IS_ENABLED(CONFIG_THP_SWAP))
return;
si = _swap_info_get(entry);
if (!si)
return;
@ -1246,6 +1249,7 @@ static void swapcache_free_cluster(swp_entry_t entry)
}
}
#ifdef CONFIG_THP_SWAP
int split_swap_cluster(swp_entry_t entry)
{
struct swap_info_struct *si;
@ -1260,11 +1264,7 @@ int split_swap_cluster(swp_entry_t entry)
unlock_cluster(ci);
return 0;
}
#else
static inline void swapcache_free_cluster(swp_entry_t entry)
{
}
#endif /* CONFIG_THP_SWAP */
#endif
void put_swap_page(struct page *page, swp_entry_t entry)
{
@ -1414,7 +1414,6 @@ out:
return count;
}
#ifdef CONFIG_THP_SWAP
static bool swap_page_trans_huge_swapped(struct swap_info_struct *si,
swp_entry_t entry)
{
@ -1425,6 +1424,9 @@ static bool swap_page_trans_huge_swapped(struct swap_info_struct *si,
int i;
bool ret = false;
if (!IS_ENABLED(CONFIG_THP_SWAP))
return swap_swapcount(si, entry) != 0;
ci = lock_cluster_or_swap_info(si, offset);
if (!ci || !cluster_is_huge(ci)) {
if (map[roffset] != SWAP_HAS_CACHE)
@ -1447,7 +1449,7 @@ static bool page_swapped(struct page *page)
swp_entry_t entry;
struct swap_info_struct *si;
if (likely(!PageTransCompound(page)))
if (!IS_ENABLED(CONFIG_THP_SWAP) || likely(!PageTransCompound(page)))
return page_swapcount(page) != 0;
page = compound_head(page);
@ -1471,10 +1473,8 @@ static int page_trans_huge_map_swapcount(struct page *page, int *total_mapcount,
/* hugetlbfs shouldn't call it */
VM_BUG_ON_PAGE(PageHuge(page), page);
if (likely(!PageTransCompound(page))) {
mapcount = atomic_read(&page->_mapcount) + 1;
if (total_mapcount)
*total_mapcount = mapcount;
if (!IS_ENABLED(CONFIG_THP_SWAP) || likely(!PageTransCompound(page))) {
mapcount = page_trans_huge_mapcount(page, total_mapcount);
if (PageSwapCache(page))
swapcount = page_swapcount(page);
if (total_swapcount)
@ -1521,26 +1521,6 @@ static int page_trans_huge_map_swapcount(struct page *page, int *total_mapcount,
return map_swapcount;
}
#else
#define swap_page_trans_huge_swapped(si, entry) swap_swapcount(si, entry)
#define page_swapped(page) (page_swapcount(page) != 0)
static int page_trans_huge_map_swapcount(struct page *page, int *total_mapcount,
int *total_swapcount)
{
int mapcount, swapcount = 0;
/* hugetlbfs shouldn't call it */
VM_BUG_ON_PAGE(PageHuge(page), page);
mapcount = page_trans_huge_mapcount(page, total_mapcount);
if (PageSwapCache(page))
swapcount = page_swapcount(page);
if (total_swapcount)
*total_swapcount = swapcount;
return mapcount + swapcount;
}
#endif
/*
* We can write to an anon page without COW if there are no other references