1
0
Fork 0

mm/swapfile.c: call free_swap_slot() in __swap_entry_free()

This is a code cleanup patch without functionality change.

Originally, when __swap_entry_free() is called, and its return value is 0,
free_swap_slot() will always be called to free the swap entry to the
per-CPU pool.  So move the call to free_swap_slot() to __swap_entry_free()
to simplify the code.

Link: http://lkml.kernel.org/r/20180827075535.17406-3-ying.huang@intel.com
Signed-off-by: "Huang, Ying" <ying.huang@intel.com>
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Shaohua Li <shli@kernel.org>
Cc: Hugh Dickins <hughd@google.com>
Cc: Minchan Kim <minchan@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
hifive-unleashed-5.1
Huang Ying 2018-10-26 15:03:49 -07:00 committed by Linus Torvalds
parent bcd49e8671
commit 10e364da10
1 changed files with 4 additions and 6 deletions

View File

@ -1182,6 +1182,8 @@ static unsigned char __swap_entry_free(struct swap_info_struct *p,
ci = lock_cluster_or_swap_info(p, offset);
usage = __swap_entry_free_locked(p, offset, usage);
unlock_cluster_or_swap_info(p, ci);
if (!usage)
free_swap_slot(entry);
return usage;
}
@ -1212,10 +1214,8 @@ void swap_free(swp_entry_t entry)
struct swap_info_struct *p;
p = _swap_info_get(entry);
if (p) {
if (!__swap_entry_free(p, entry, 1))
free_swap_slot(entry);
}
if (p)
__swap_entry_free(p, entry, 1);
}
/*
@ -1637,8 +1637,6 @@ int free_swap_and_cache(swp_entry_t entry)
!swap_page_trans_huge_swapped(p, entry))
__try_to_reclaim_swap(p, swp_offset(entry),
TTRS_UNMAPPED | TTRS_FULL);
else if (!count)
free_swap_slot(entry);
}
return p != NULL;
}