1
0
Fork 0

hugetlbfs: add missing TLB flush to hugetlb_cow()

A cow break on a hugetlbfs page with page_count > 1 will set a new pte with
set_huge_pte_at(), w/o any tlb flush operation.  The old pte will remain in
the tlb and subsequent write access to the page will result in a page fault
loop, for as long as it may take until the tlb is flushed from somewhere else.
 This patch introduces an architecture-specific huge_ptep_clear_flush()
function, which is called before the the set_huge_pte_at() in hugetlb_cow().

ATTENTION: This is just a nop on all architectures for now, the s390
implementation will come with our large page patch later.  Other architectures
should define their own huge_ptep_clear_flush() if needed.

Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Gerald Schaefer <gerald.schaefer@de.ibm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: "Luck, Tony" <tony.luck@intel.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "David S. Miller" <davem@davemloft.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
hifive-unleashed-5.1
Gerald Schaefer 2008-04-28 02:13:28 -07:00 committed by Linus Torvalds
parent 6d779079bf
commit 8fe627ec5b
6 changed files with 26 additions and 0 deletions

View File

@ -34,4 +34,9 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
return ptep_get_and_clear(mm, addr, ptep);
}
static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
}
#endif /* _ASM_IA64_HUGETLB_H */

View File

@ -34,4 +34,9 @@ static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
{
}
static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
}
#endif /* _ASM_POWERPC_HUGETLB_H */

View File

@ -46,4 +46,9 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
return ptep_get_and_clear(mm, addr, ptep);
}
static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
}
#endif /* _ASM_SH_HUGETLB_H */

View File

@ -39,4 +39,9 @@ static inline void hugetlb_free_pgd_range(struct mmu_gather **tlb,
free_pgd_range(tlb, addr, end, floor, ceiling);
}
static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
}
#endif /* _ASM_SPARC64_HUGETLB_H */

View File

@ -46,4 +46,9 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
return ptep_get_and_clear(mm, addr, ptep);
}
static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
}
#endif /* _ASM_X86_HUGETLB_H */

View File

@ -892,6 +892,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
ptep = huge_pte_offset(mm, address & HPAGE_MASK);
if (likely(pte_same(*ptep, pte))) {
/* Break COW */
huge_ptep_clear_flush(vma, address, ptep);
set_huge_pte_at(mm, address, ptep,
make_huge_pte(vma, new_page, 1));
/* Make the old page be freed below */