1
0
Fork 0

KVM: s390/mm: new gmap_test_and_clear_dirty function

For live migration kvm needs to test and clear the dirty bit of guest pages.

That for is ptep_test_and_clear_user_dirty, to be sure we are not racing with
other code, we protect the pte. This needs to be done within
the architecture memory management code.

Signed-off-by: Dominik Dingel <dingel@linux.vnet.ibm.com>
Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
hifive-unleashed-5.1
Dominik Dingel 2014-03-24 14:27:58 +01:00 committed by Christian Borntraeger
parent 0a61b222df
commit a0bf4f149b
2 changed files with 23 additions and 0 deletions

View File

@ -838,6 +838,8 @@ unsigned long __gmap_fault(unsigned long address, struct gmap *);
unsigned long gmap_fault(unsigned long address, struct gmap *);
void gmap_discard(unsigned long from, unsigned long to, struct gmap *);
void __gmap_zap(unsigned long address, struct gmap *);
bool gmap_test_and_clear_dirty(unsigned long address, struct gmap *);
void gmap_register_ipte_notifier(struct gmap_notifier *);
void gmap_unregister_ipte_notifier(struct gmap_notifier *);

View File

@ -1401,6 +1401,27 @@ void s390_enable_skey(void)
}
EXPORT_SYMBOL_GPL(s390_enable_skey);
/*
* Test and reset if a guest page is dirty
*/
bool gmap_test_and_clear_dirty(unsigned long address, struct gmap *gmap)
{
pte_t *pte;
spinlock_t *ptl;
bool dirty = false;
pte = get_locked_pte(gmap->mm, address, &ptl);
if (unlikely(!pte))
return false;
if (ptep_test_and_clear_user_dirty(gmap->mm, address, pte))
dirty = true;
spin_unlock(ptl);
return dirty;
}
EXPORT_SYMBOL_GPL(gmap_test_and_clear_dirty);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
int pmdp_clear_flush_young(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp)