mm: fix clear_page_dirty_for_io vs fault race
Fix msync data loss and (less importantly) dirty page accounting inaccuracies due to the race remaining in clear_page_dirty_for_io(). The deleted comment explains what the race was, and the added comments explain how it is fixed. Signed-off-by: Nick Piggin <npiggin@suse.de> Acked-by: Linus Torvalds <torvalds@linux-foundation.org> Cc: Miklos Szeredi <miklos@szeredi.hu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>hifive-unleashed-5.1
parent
83c54070ee
commit
79352894b2
|
@ -1765,6 +1765,15 @@ gotten:
|
||||||
unlock:
|
unlock:
|
||||||
pte_unmap_unlock(page_table, ptl);
|
pte_unmap_unlock(page_table, ptl);
|
||||||
if (dirty_page) {
|
if (dirty_page) {
|
||||||
|
/*
|
||||||
|
* Yes, Virginia, this is actually required to prevent a race
|
||||||
|
* with clear_page_dirty_for_io() from clearing the page dirty
|
||||||
|
* bit after it clear all dirty ptes, but before a racing
|
||||||
|
* do_wp_page installs a dirty pte.
|
||||||
|
*
|
||||||
|
* do_no_page is protected similarly.
|
||||||
|
*/
|
||||||
|
wait_on_page_locked(dirty_page);
|
||||||
set_page_dirty_balance(dirty_page);
|
set_page_dirty_balance(dirty_page);
|
||||||
put_page(dirty_page);
|
put_page(dirty_page);
|
||||||
}
|
}
|
||||||
|
|
|
@ -918,6 +918,8 @@ int clear_page_dirty_for_io(struct page *page)
|
||||||
{
|
{
|
||||||
struct address_space *mapping = page_mapping(page);
|
struct address_space *mapping = page_mapping(page);
|
||||||
|
|
||||||
|
BUG_ON(!PageLocked(page));
|
||||||
|
|
||||||
if (mapping && mapping_cap_account_dirty(mapping)) {
|
if (mapping && mapping_cap_account_dirty(mapping)) {
|
||||||
/*
|
/*
|
||||||
* Yes, Virginia, this is indeed insane.
|
* Yes, Virginia, this is indeed insane.
|
||||||
|
@ -943,14 +945,19 @@ int clear_page_dirty_for_io(struct page *page)
|
||||||
* We basically use the page "master dirty bit"
|
* We basically use the page "master dirty bit"
|
||||||
* as a serialization point for all the different
|
* as a serialization point for all the different
|
||||||
* threads doing their things.
|
* threads doing their things.
|
||||||
*
|
|
||||||
* FIXME! We still have a race here: if somebody
|
|
||||||
* adds the page back to the page tables in
|
|
||||||
* between the "page_mkclean()" and the "TestClearPageDirty()",
|
|
||||||
* we might have it mapped without the dirty bit set.
|
|
||||||
*/
|
*/
|
||||||
if (page_mkclean(page))
|
if (page_mkclean(page))
|
||||||
set_page_dirty(page);
|
set_page_dirty(page);
|
||||||
|
/*
|
||||||
|
* We carefully synchronise fault handlers against
|
||||||
|
* installing a dirty pte and marking the page dirty
|
||||||
|
* at this point. We do this by having them hold the
|
||||||
|
* page lock at some point after installing their
|
||||||
|
* pte, but before marking the page dirty.
|
||||||
|
* Pages are always locked coming in here, so we get
|
||||||
|
* the desired exclusion. See mm/memory.c:do_wp_page()
|
||||||
|
* for more comments.
|
||||||
|
*/
|
||||||
if (TestClearPageDirty(page)) {
|
if (TestClearPageDirty(page)) {
|
||||||
dec_zone_page_state(page, NR_FILE_DIRTY);
|
dec_zone_page_state(page, NR_FILE_DIRTY);
|
||||||
return 1;
|
return 1;
|
||||||
|
|
Loading…
Reference in New Issue