1
0
Fork 0

nios2: update_mmu_cache preload the TLB with the new PTE

Rather than flush the TLB entry when installing a new PTE to allow
the fast TLB reload to re-fill the TLB, just refill the TLB entry
when removing the old one.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Ley Foon Tan <ley.foon.tan@intel.com>
hifive-unleashed-5.1
Nicholas Piggin 2018-11-07 10:35:34 +08:00 committed by Ley Foon Tan
parent b6a1046343
commit 3ac23944de
3 changed files with 36 additions and 9 deletions

View File

@ -30,6 +30,9 @@ struct mm_struct;
* - flush_tlb_page(vma, address) flushes a page
* - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
* - flush_tlb_kernel_page(address) flushes a kernel page
*
* - reload_tlb_page(vma, address, pte) flushes the TLB for address like
* flush_tlb_page, then replaces it with a TLB for pte.
*/
extern void flush_tlb_all(void);
extern void flush_tlb_mm(struct mm_struct *mm);
@ -48,4 +51,7 @@ static inline void flush_tlb_kernel_page(unsigned long address)
flush_tlb_kernel_range(address, address + PAGE_SIZE);
}
extern void reload_tlb_page(struct vm_area_struct *vma, unsigned long addr,
pte_t pte);
#endif /* _ASM_NIOS2_TLBFLUSH_H */

View File

@ -198,13 +198,14 @@ void flush_dcache_page(struct page *page)
EXPORT_SYMBOL(flush_dcache_page);
void update_mmu_cache(struct vm_area_struct *vma,
unsigned long address, pte_t *pte)
unsigned long address, pte_t *ptep)
{
unsigned long pfn = pte_pfn(*pte);
pte_t pte = *ptep;
unsigned long pfn = pte_pfn(pte);
struct page *page;
struct address_space *mapping;
flush_tlb_page(vma, address);
reload_tlb_page(vma, address, pte);
if (!pfn_valid(pfn))
return;

View File

@ -43,13 +43,11 @@ static unsigned long pteaddr_invalid(unsigned long addr)
* This one is only used for pages with the global bit set so we don't care
* much about the ASID.
*/
void flush_tlb_one_pid(unsigned long addr, unsigned long mmu_pid)
static void replace_tlb_one_pid(unsigned long addr, unsigned long mmu_pid, unsigned long tlbacc)
{
unsigned int way;
unsigned long org_misc, pid_misc;
pr_debug("Flush tlb-entry for vaddr=%#lx\n", addr);
/* remember pid/way until we return. */
get_misc_and_pid(&org_misc, &pid_misc);
@ -72,10 +70,11 @@ void flush_tlb_one_pid(unsigned long addr, unsigned long mmu_pid)
if (pid != mmu_pid)
continue;
tlbmisc = TLBMISC_WE | (way << TLBMISC_WAY_SHIFT);
tlbmisc = mmu_pid | TLBMISC_WE | (way << TLBMISC_WAY_SHIFT);
WRCTL(CTL_TLBMISC, tlbmisc);
WRCTL(CTL_PTEADDR, pteaddr_invalid(addr));
WRCTL(CTL_TLBACC, 0);
if (tlbacc == 0)
WRCTL(CTL_PTEADDR, pteaddr_invalid(addr));
WRCTL(CTL_TLBACC, tlbacc);
/*
* There should be only a single entry that maps a
* particular {address,pid} so break after a match.
@ -86,6 +85,20 @@ void flush_tlb_one_pid(unsigned long addr, unsigned long mmu_pid)
WRCTL(CTL_TLBMISC, org_misc);
}
static void flush_tlb_one_pid(unsigned long addr, unsigned long mmu_pid)
{
pr_debug("Flush tlb-entry for vaddr=%#lx\n", addr);
replace_tlb_one_pid(addr, mmu_pid, 0);
}
static void reload_tlb_one_pid(unsigned long addr, unsigned long mmu_pid, pte_t pte)
{
pr_debug("Reload tlb-entry for vaddr=%#lx\n", addr);
replace_tlb_one_pid(addr, mmu_pid, pte_val(pte));
}
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
@ -97,6 +110,13 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
}
}
void reload_tlb_page(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
{
unsigned long mmu_pid = get_pid_from_context(&vma->vm_mm->context);
reload_tlb_one_pid(addr, mmu_pid, pte);
}
/*
* This one is only used for pages with the global bit set so we don't care
* much about the ASID.