1
0
Fork 0

swapfile: rearrange scan and swap_info

Before making functional changes, rearrange scan_swap_map() to simplify
subsequent diffs.  Actually, there is one functional change in there:
leave cluster_nr negative while scanning for a new cluster - resetting it
early increased the likelihood that when we have difficulty finding a free
cluster, another task may come in and try doing exactly the same - just a
waste of cpu.

Before making functional changes, rearrange struct swap_info_struct
slightly: flags will be needed as an unsigned long (for wait_on_bit), next
is a good int to pair with prio, old_block_size is uninteresting so shift
it to the end.

Signed-off-by: Hugh Dickins <hugh@veritas.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
hifive-unleashed-5.1
Hugh Dickins 2009-01-06 14:39:50 -08:00 committed by Linus Torvalds
parent 81e3397127
commit ebebbbe904
2 changed files with 42 additions and 34 deletions

View File

@ -133,14 +133,14 @@ enum {
* The in-memory structure used to track swap areas. * The in-memory structure used to track swap areas.
*/ */
struct swap_info_struct { struct swap_info_struct {
unsigned int flags; unsigned long flags;
int prio; /* swap priority */ int prio; /* swap priority */
int next; /* next entry on swap list */
struct file *swap_file; struct file *swap_file;
struct block_device *bdev; struct block_device *bdev;
struct list_head extent_list; struct list_head extent_list;
struct swap_extent *curr_swap_extent; struct swap_extent *curr_swap_extent;
unsigned old_block_size; unsigned short *swap_map;
unsigned short * swap_map;
unsigned int lowest_bit; unsigned int lowest_bit;
unsigned int highest_bit; unsigned int highest_bit;
unsigned int cluster_next; unsigned int cluster_next;
@ -148,7 +148,7 @@ struct swap_info_struct {
unsigned int pages; unsigned int pages;
unsigned int max; unsigned int max;
unsigned int inuse_pages; unsigned int inuse_pages;
int next; /* next entry on swap list */ unsigned int old_block_size;
}; };
struct swap_list_t { struct swap_list_t {

View File

@ -89,7 +89,8 @@ void swap_unplug_io_fn(struct backing_dev_info *unused_bdi, struct page *page)
static inline unsigned long scan_swap_map(struct swap_info_struct *si) static inline unsigned long scan_swap_map(struct swap_info_struct *si)
{ {
unsigned long offset, last_in_cluster; unsigned long offset;
unsigned long last_in_cluster;
int latency_ration = LATENCY_LIMIT; int latency_ration = LATENCY_LIMIT;
/* /*
@ -103,10 +104,13 @@ static inline unsigned long scan_swap_map(struct swap_info_struct *si)
*/ */
si->flags += SWP_SCANNING; si->flags += SWP_SCANNING;
if (unlikely(!si->cluster_nr)) { offset = si->cluster_next;
si->cluster_nr = SWAPFILE_CLUSTER - 1;
if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) if (unlikely(!si->cluster_nr--)) {
goto lowest; if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) {
si->cluster_nr = SWAPFILE_CLUSTER - 1;
goto checks;
}
spin_unlock(&swap_lock); spin_unlock(&swap_lock);
offset = si->lowest_bit; offset = si->lowest_bit;
@ -118,43 +122,47 @@ static inline unsigned long scan_swap_map(struct swap_info_struct *si)
last_in_cluster = offset + SWAPFILE_CLUSTER; last_in_cluster = offset + SWAPFILE_CLUSTER;
else if (offset == last_in_cluster) { else if (offset == last_in_cluster) {
spin_lock(&swap_lock); spin_lock(&swap_lock);
si->cluster_next = offset-SWAPFILE_CLUSTER+1; offset -= SWAPFILE_CLUSTER - 1;
goto cluster; si->cluster_next = offset;
si->cluster_nr = SWAPFILE_CLUSTER - 1;
goto checks;
} }
if (unlikely(--latency_ration < 0)) { if (unlikely(--latency_ration < 0)) {
cond_resched(); cond_resched();
latency_ration = LATENCY_LIMIT; latency_ration = LATENCY_LIMIT;
} }
} }
offset = si->lowest_bit;
spin_lock(&swap_lock); spin_lock(&swap_lock);
goto lowest; si->cluster_nr = SWAPFILE_CLUSTER - 1;
} }
si->cluster_nr--; checks:
cluster: if (!(si->flags & SWP_WRITEOK))
offset = si->cluster_next;
if (offset > si->highest_bit)
lowest: offset = si->lowest_bit;
checks: if (!(si->flags & SWP_WRITEOK))
goto no_page; goto no_page;
if (!si->highest_bit) if (!si->highest_bit)
goto no_page; goto no_page;
if (!si->swap_map[offset]) { if (offset > si->highest_bit)
if (offset == si->lowest_bit) offset = si->lowest_bit;
si->lowest_bit++; if (si->swap_map[offset])
if (offset == si->highest_bit) goto scan;
si->highest_bit--;
si->inuse_pages++;
if (si->inuse_pages == si->pages) {
si->lowest_bit = si->max;
si->highest_bit = 0;
}
si->swap_map[offset] = 1;
si->cluster_next = offset + 1;
si->flags -= SWP_SCANNING;
return offset;
}
if (offset == si->lowest_bit)
si->lowest_bit++;
if (offset == si->highest_bit)
si->highest_bit--;
si->inuse_pages++;
if (si->inuse_pages == si->pages) {
si->lowest_bit = si->max;
si->highest_bit = 0;
}
si->swap_map[offset] = 1;
si->cluster_next = offset + 1;
si->flags -= SWP_SCANNING;
return offset;
scan:
spin_unlock(&swap_lock); spin_unlock(&swap_lock);
while (++offset <= si->highest_bit) { while (++offset <= si->highest_bit) {
if (!si->swap_map[offset]) { if (!si->swap_map[offset]) {
@ -167,7 +175,7 @@ checks: if (!(si->flags & SWP_WRITEOK))
} }
} }
spin_lock(&swap_lock); spin_lock(&swap_lock);
goto lowest; goto checks;
no_page: no_page:
si->flags -= SWP_SCANNING; si->flags -= SWP_SCANNING;