1
0
Fork 0

mm: migrate: lock buffers before migrate_page_move_mapping()

Lock buffers before calling into migrate_page_move_mapping() so that that
function doesn't have to know about buffers (which is somewhat unexpected
anyway) and all the buffer head logic is in buffer_migrate_page().

Link: http://lkml.kernel.org/r/20181211172143.7358-3-jack@suse.cz
Signed-off-by: Jan Kara <jack@suse.cz>
Acked-by: Mel Gorman <mgorman@suse.de>
Cc: Michal Hocko <mhocko@suse.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
hifive-unleashed-5.1
Jan Kara 2018-12-28 00:39:05 -08:00 committed by Linus Torvalds
parent 0b3901b38d
commit cc4f11e69f
1 changed files with 13 additions and 26 deletions

View File

@ -486,20 +486,6 @@ int migrate_page_move_mapping(struct address_space *mapping,
return -EAGAIN;
}
/*
* In the async migration case of moving a page with buffers, lock the
* buffers using trylock before the mapping is moved. If the mapping
* was moved, we later failed to lock the buffers and could not move
* the mapping back due to an elevated page count, we would have to
* block waiting on other references to be dropped.
*/
if (mode == MIGRATE_ASYNC && head &&
!buffer_migrate_lock_buffers(head, mode)) {
page_ref_unfreeze(page, expected_count);
xas_unlock_irq(&xas);
return -EAGAIN;
}
/*
* Now we know that no one else is looking at the page:
* no turning back from here.
@ -775,24 +761,23 @@ int buffer_migrate_page(struct address_space *mapping,
{
struct buffer_head *bh, *head;
int rc;
int expected_count;
if (!page_has_buffers(page))
return migrate_page(mapping, newpage, page, mode);
/* Check whether page does not have extra refs before we do more work */
expected_count = expected_page_refs(page);
if (page_count(page) != expected_count)
return -EAGAIN;
head = page_buffers(page);
if (!buffer_migrate_lock_buffers(head, mode))
return -EAGAIN;
rc = migrate_page_move_mapping(mapping, newpage, page, head, mode, 0);
rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
if (rc != MIGRATEPAGE_SUCCESS)
return rc;
/*
* In the async case, migrate_page_move_mapping locked the buffers
* with an IRQ-safe spinlock held. In the sync case, the buffers
* need to be locked now
*/
if (mode != MIGRATE_ASYNC)
BUG_ON(!buffer_migrate_lock_buffers(head, mode));
goto unlock_buffers;
ClearPagePrivate(page);
set_page_private(newpage, page_private(page));
@ -814,6 +799,8 @@ int buffer_migrate_page(struct address_space *mapping,
else
migrate_page_states(newpage, page);
rc = MIGRATEPAGE_SUCCESS;
unlock_buffers:
bh = head;
do {
unlock_buffer(bh);
@ -822,7 +809,7 @@ int buffer_migrate_page(struct address_space *mapping,
} while (bh != head);
return MIGRATEPAGE_SUCCESS;
return rc;
}
EXPORT_SYMBOL(buffer_migrate_page);
#endif