1
0
Fork 0

z3fold: fix reclaim lock-ups

Do not try to optimize in-page object layout while the page is under
reclaim.  This fixes lock-ups on reclaim and improves reclaim
performance at the same time.

[akpm@linux-foundation.org: coding-style fixes]
Link: http://lkml.kernel.org/r/20180430125800.444cae9706489f412ad12621@gmail.com
Signed-off-by: Vitaly Wool <vitaly.vul@sony.com>
Reported-by: Guenter Roeck <linux@roeck-us.net>
Tested-by: Guenter Roeck <linux@roeck-us.net>
Cc: <Oleksiy.Avramchenko@sony.com>
Cc: Matthew Wilcox <mawilcox@microsoft.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
hifive-unleashed-5.1
Vitaly Wool 2018-05-11 16:01:46 -07:00 committed by Linus Torvalds
parent ae646f0b9c
commit 6098d7e136
1 changed files with 30 additions and 12 deletions

View File

@ -144,7 +144,8 @@ enum z3fold_page_flags {
PAGE_HEADLESS = 0, PAGE_HEADLESS = 0,
MIDDLE_CHUNK_MAPPED, MIDDLE_CHUNK_MAPPED,
NEEDS_COMPACTING, NEEDS_COMPACTING,
PAGE_STALE PAGE_STALE,
UNDER_RECLAIM
}; };
/***************** /*****************
@ -173,6 +174,7 @@ static struct z3fold_header *init_z3fold_page(struct page *page,
clear_bit(MIDDLE_CHUNK_MAPPED, &page->private); clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
clear_bit(NEEDS_COMPACTING, &page->private); clear_bit(NEEDS_COMPACTING, &page->private);
clear_bit(PAGE_STALE, &page->private); clear_bit(PAGE_STALE, &page->private);
clear_bit(UNDER_RECLAIM, &page->private);
spin_lock_init(&zhdr->page_lock); spin_lock_init(&zhdr->page_lock);
kref_init(&zhdr->refcount); kref_init(&zhdr->refcount);
@ -756,6 +758,10 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
atomic64_dec(&pool->pages_nr); atomic64_dec(&pool->pages_nr);
return; return;
} }
if (test_bit(UNDER_RECLAIM, &page->private)) {
z3fold_page_unlock(zhdr);
return;
}
if (test_and_set_bit(NEEDS_COMPACTING, &page->private)) { if (test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
z3fold_page_unlock(zhdr); z3fold_page_unlock(zhdr);
return; return;
@ -840,6 +846,8 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
kref_get(&zhdr->refcount); kref_get(&zhdr->refcount);
list_del_init(&zhdr->buddy); list_del_init(&zhdr->buddy);
zhdr->cpu = -1; zhdr->cpu = -1;
set_bit(UNDER_RECLAIM, &page->private);
break;
} }
list_del_init(&page->lru); list_del_init(&page->lru);
@ -887,25 +895,35 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
goto next; goto next;
} }
next: next:
spin_lock(&pool->lock);
if (test_bit(PAGE_HEADLESS, &page->private)) { if (test_bit(PAGE_HEADLESS, &page->private)) {
if (ret == 0) { if (ret == 0) {
spin_unlock(&pool->lock);
free_z3fold_page(page); free_z3fold_page(page);
return 0; return 0;
} }
} else if (kref_put(&zhdr->refcount, release_z3fold_page)) { spin_lock(&pool->lock);
atomic64_dec(&pool->pages_nr); list_add(&page->lru, &pool->lru);
spin_unlock(&pool->lock); spin_unlock(&pool->lock);
} else {
z3fold_page_lock(zhdr);
clear_bit(UNDER_RECLAIM, &page->private);
if (kref_put(&zhdr->refcount,
release_z3fold_page_locked)) {
atomic64_dec(&pool->pages_nr);
return 0; return 0;
} }
/* /*
* Add to the beginning of LRU. * if we are here, the page is still not completely
* Pool lock has to be kept here to ensure the page has * free. Take the global pool lock then to be able
* not already been released * to add it back to the lru list
*/ */
spin_lock(&pool->lock);
list_add(&page->lru, &pool->lru); list_add(&page->lru, &pool->lru);
spin_unlock(&pool->lock);
z3fold_page_unlock(zhdr);
}
/* We started off locked to we need to lock the pool back */
spin_lock(&pool->lock);
} }
spin_unlock(&pool->lock); spin_unlock(&pool->lock);
return -EAGAIN; return -EAGAIN;