1
0
Fork 0

mm: soft-dirty: keep soft-dirty bits over thp migration

Soft dirty bit is designed to keep tracked over page migration.  This
patch makes it work in the same manner for thp migration too.

Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Signed-off-by: Zi Yan <zi.yan@cs.rutgers.edu>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Anshuman Khandual <khandual@linux.vnet.ibm.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: David Nellans <dnellans@nvidia.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Michal Hocko <mhocko@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
hifive-unleashed-5.1
Naoya Horiguchi 2017-09-08 16:11:04 -07:00 committed by Linus Torvalds
parent 84c3fc4e9c
commit ab6e3d0939
5 changed files with 90 additions and 13 deletions

View File

@ -1172,6 +1172,23 @@ static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
{ {
return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY); return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
} }
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
{
return pmd_set_flags(pmd, _PAGE_SWP_SOFT_DIRTY);
}
static inline int pmd_swp_soft_dirty(pmd_t pmd)
{
return pmd_flags(pmd) & _PAGE_SWP_SOFT_DIRTY;
}
static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
{
return pmd_clear_flags(pmd, _PAGE_SWP_SOFT_DIRTY);
}
#endif
#endif #endif
#define PKRU_AD_BIT 0x1 #define PKRU_AD_BIT 0x1

View File

@ -978,17 +978,22 @@ static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
{ {
pmd_t pmd = *pmdp; pmd_t pmd = *pmdp;
/* See comment in change_huge_pmd() */ if (pmd_present(pmd)) {
pmdp_invalidate(vma, addr, pmdp); /* See comment in change_huge_pmd() */
if (pmd_dirty(*pmdp)) pmdp_invalidate(vma, addr, pmdp);
pmd = pmd_mkdirty(pmd); if (pmd_dirty(*pmdp))
if (pmd_young(*pmdp)) pmd = pmd_mkdirty(pmd);
pmd = pmd_mkyoung(pmd); if (pmd_young(*pmdp))
pmd = pmd_mkyoung(pmd);
pmd = pmd_wrprotect(pmd); pmd = pmd_wrprotect(pmd);
pmd = pmd_clear_soft_dirty(pmd); pmd = pmd_clear_soft_dirty(pmd);
set_pmd_at(vma->vm_mm, addr, pmdp, pmd); set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
} else if (is_migration_entry(pmd_to_swp_entry(pmd))) {
pmd = pmd_swp_clear_soft_dirty(pmd);
set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
}
} }
#else #else
static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma, static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,

View File

@ -630,7 +630,24 @@ static inline void ptep_modify_prot_commit(struct mm_struct *mm,
#define arch_start_context_switch(prev) do {} while (0) #define arch_start_context_switch(prev) do {} while (0)
#endif #endif
#ifndef CONFIG_HAVE_ARCH_SOFT_DIRTY #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
#ifndef CONFIG_ARCH_ENABLE_THP_MIGRATION
static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
{
return pmd;
}
static inline int pmd_swp_soft_dirty(pmd_t pmd)
{
return 0;
}
static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
{
return pmd;
}
#endif
#else /* !CONFIG_HAVE_ARCH_SOFT_DIRTY */
static inline int pte_soft_dirty(pte_t pte) static inline int pte_soft_dirty(pte_t pte)
{ {
return 0; return 0;
@ -675,6 +692,21 @@ static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
{ {
return pte; return pte;
} }
static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
{
return pmd;
}
static inline int pmd_swp_soft_dirty(pmd_t pmd)
{
return 0;
}
static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
{
return pmd;
}
#endif #endif
#ifndef __HAVE_PFNMAP_TRACKING #ifndef __HAVE_PFNMAP_TRACKING

View File

@ -183,6 +183,8 @@ static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
{ {
swp_entry_t arch_entry; swp_entry_t arch_entry;
if (pmd_swp_soft_dirty(pmd))
pmd = pmd_swp_clear_soft_dirty(pmd);
arch_entry = __pmd_to_swp_entry(pmd); arch_entry = __pmd_to_swp_entry(pmd);
return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry)); return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
} }

View File

@ -937,6 +937,8 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
if (is_write_migration_entry(entry)) { if (is_write_migration_entry(entry)) {
make_migration_entry_read(&entry); make_migration_entry_read(&entry);
pmd = swp_entry_to_pmd(entry); pmd = swp_entry_to_pmd(entry);
if (pmd_swp_soft_dirty(*src_pmd))
pmd = pmd_swp_mksoft_dirty(pmd);
set_pmd_at(src_mm, addr, src_pmd, pmd); set_pmd_at(src_mm, addr, src_pmd, pmd);
} }
set_pmd_at(dst_mm, addr, dst_pmd, pmd); set_pmd_at(dst_mm, addr, dst_pmd, pmd);
@ -1756,6 +1758,17 @@ static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl,
} }
#endif #endif
static pmd_t move_soft_dirty_pmd(pmd_t pmd)
{
#ifdef CONFIG_MEM_SOFT_DIRTY
if (unlikely(is_pmd_migration_entry(pmd)))
pmd = pmd_swp_mksoft_dirty(pmd);
else if (pmd_present(pmd))
pmd = pmd_mksoft_dirty(pmd);
#endif
return pmd;
}
bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
unsigned long new_addr, unsigned long old_end, unsigned long new_addr, unsigned long old_end,
pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush) pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush)
@ -1798,7 +1811,8 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
pgtable = pgtable_trans_huge_withdraw(mm, old_pmd); pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
pgtable_trans_huge_deposit(mm, new_pmd, pgtable); pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
} }
set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd)); pmd = move_soft_dirty_pmd(pmd);
set_pmd_at(mm, new_addr, new_pmd, pmd);
if (new_ptl != old_ptl) if (new_ptl != old_ptl)
spin_unlock(new_ptl); spin_unlock(new_ptl);
if (force_flush) if (force_flush)
@ -1846,6 +1860,8 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
*/ */
make_migration_entry_read(&entry); make_migration_entry_read(&entry);
newpmd = swp_entry_to_pmd(entry); newpmd = swp_entry_to_pmd(entry);
if (pmd_swp_soft_dirty(*pmd))
newpmd = pmd_swp_mksoft_dirty(newpmd);
set_pmd_at(mm, addr, pmd, newpmd); set_pmd_at(mm, addr, pmd, newpmd);
} }
goto unlock; goto unlock;
@ -2824,6 +2840,7 @@ void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
unsigned long address = pvmw->address; unsigned long address = pvmw->address;
pmd_t pmdval; pmd_t pmdval;
swp_entry_t entry; swp_entry_t entry;
pmd_t pmdswp;
if (!(pvmw->pmd && !pvmw->pte)) if (!(pvmw->pmd && !pvmw->pte))
return; return;
@ -2837,8 +2854,10 @@ void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
if (pmd_dirty(pmdval)) if (pmd_dirty(pmdval))
set_page_dirty(page); set_page_dirty(page);
entry = make_migration_entry(page, pmd_write(pmdval)); entry = make_migration_entry(page, pmd_write(pmdval));
pmdval = swp_entry_to_pmd(entry); pmdswp = swp_entry_to_pmd(entry);
set_pmd_at(mm, address, pvmw->pmd, pmdval); if (pmd_soft_dirty(pmdval))
pmdswp = pmd_swp_mksoft_dirty(pmdswp);
set_pmd_at(mm, address, pvmw->pmd, pmdswp);
page_remove_rmap(page, true); page_remove_rmap(page, true);
put_page(page); put_page(page);
@ -2861,6 +2880,8 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
entry = pmd_to_swp_entry(*pvmw->pmd); entry = pmd_to_swp_entry(*pvmw->pmd);
get_page(new); get_page(new);
pmde = pmd_mkold(mk_huge_pmd(new, vma->vm_page_prot)); pmde = pmd_mkold(mk_huge_pmd(new, vma->vm_page_prot));
if (pmd_swp_soft_dirty(*pvmw->pmd))
pmde = pmd_mksoft_dirty(pmde);
if (is_write_migration_entry(entry)) if (is_write_migration_entry(entry))
pmde = maybe_pmd_mkwrite(pmde, vma); pmde = maybe_pmd_mkwrite(pmde, vma);