diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h index ba2333216c5b..43ef3e99fdd1 100644 --- a/arch/sh/include/asm/pgtable.h +++ b/arch/sh/include/asm/pgtable.h @@ -134,8 +134,19 @@ typedef pte_t *pte_addr_t; #define pgtable_cache_init() do { } while (0) struct vm_area_struct; -extern void update_mmu_cache(struct vm_area_struct * vma, - unsigned long address, pte_t pte); + +extern void __update_cache(struct vm_area_struct *vma, + unsigned long address, pte_t pte); +extern void __update_tlb(struct vm_area_struct *vma, + unsigned long address, pte_t pte); + +static inline void +update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) +{ + __update_cache(vma, address, pte); + __update_tlb(vma, address, pte); +} + extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; extern void paging_init(void); extern void page_table_range_init(unsigned long start, unsigned long end, diff --git a/arch/sh/mm/pg-mmu.c b/arch/sh/mm/pg-mmu.c index 356d2cdcb209..8602f68af4c8 100644 --- a/arch/sh/mm/pg-mmu.c +++ b/arch/sh/mm/pg-mmu.c @@ -134,3 +134,24 @@ void clear_user_highpage(struct page *page, unsigned long vaddr) kunmap_atomic(kaddr, KM_USER0); } EXPORT_SYMBOL(clear_user_highpage); + +void __update_cache(struct vm_area_struct *vma, + unsigned long address, pte_t pte) +{ + struct page *page; + unsigned long pfn = pte_pfn(pte); + + if (!boot_cpu_data.dcache.n_aliases) + return; + + page = pfn_to_page(pfn); + if (pfn_valid(pfn) && page_mapping(page)) { + int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags); + if (dirty) { + unsigned long addr = (unsigned long)page_address(page); + + if (pages_do_alias(addr, address & PAGE_MASK)) + __flush_wback_region((void *)addr, PAGE_SIZE); + } + } +} diff --git a/arch/sh/mm/tlb-nommu.c b/arch/sh/mm/tlb-nommu.c index 71c742b5aee3..0ef5429943df 100644 --- a/arch/sh/mm/tlb-nommu.c +++ b/arch/sh/mm/tlb-nommu.c @@ -46,10 +46,13 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) BUG(); } -void update_mmu_cache(struct vm_area_struct * vma, - unsigned long address, pte_t pte) +void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) +{ +} + +void __update_cache(struct vm_area_struct *vma, + unsigned long address, pte_t pte) { - BUG(); } void __init page_table_range_init(unsigned long start, unsigned long end, diff --git a/arch/sh/mm/tlb-pteaex.c b/arch/sh/mm/tlb-pteaex.c index c39b77363352..9aabd313cede 100644 --- a/arch/sh/mm/tlb-pteaex.c +++ b/arch/sh/mm/tlb-pteaex.c @@ -16,15 +16,14 @@ #include #include -void update_mmu_cache(struct vm_area_struct * vma, - unsigned long address, pte_t pte) +void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) { - unsigned long flags; - unsigned long pteval; - unsigned long vpn; + unsigned long flags, pteval, vpn; - /* Ptrace may call this routine. */ - if (vma && current->active_mm != vma->vm_mm) + /* + * Handle debugger faulting in for debugee. + */ + if (current->active_mm != vma->vm_mm) return; local_irq_save(flags); diff --git a/arch/sh/mm/tlb-sh3.c b/arch/sh/mm/tlb-sh3.c index 9b8459c74abd..425f1f23cf93 100644 --- a/arch/sh/mm/tlb-sh3.c +++ b/arch/sh/mm/tlb-sh3.c @@ -27,32 +27,16 @@ #include #include -void update_mmu_cache(struct vm_area_struct * vma, - unsigned long address, pte_t pte) +void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) { - unsigned long flags; - unsigned long pteval; - unsigned long vpn; - unsigned long pfn = pte_pfn(pte); - struct page *page; + unsigned long flags, pteval, vpn; - /* Ptrace may call this routine. */ - if (vma && current->active_mm != vma->vm_mm) + /* + * Handle debugger faulting in for debugee. + */ + if (current->active_mm != vma->vm_mm) return; - page = pfn_to_page(pfn); - if (pfn_valid(pfn) && page_mapping(page)) { -#if defined(CONFIG_SH7705_CACHE_32KB) - int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags); - if (dirty) { - unsigned long addr = (unsigned long)page_address(page); - - if (pages_do_alias(addr, address & PAGE_MASK)) - __flush_wback_region((void *)addr, PAGE_SIZE); - } -#endif - } - local_irq_save(flags); /* Set PTEH register */ @@ -93,4 +77,3 @@ void local_flush_tlb_one(unsigned long asid, unsigned long page) for (i = 0; i < ways; i++) ctrl_outl(data, addr + (i << 8)); } - diff --git a/arch/sh/mm/tlb-sh4.c b/arch/sh/mm/tlb-sh4.c index cf50082d2435..81199f1e5945 100644 --- a/arch/sh/mm/tlb-sh4.c +++ b/arch/sh/mm/tlb-sh4.c @@ -15,33 +15,16 @@ #include #include -void update_mmu_cache(struct vm_area_struct * vma, - unsigned long address, pte_t pte) +void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) { - unsigned long flags; - unsigned long pteval; - unsigned long vpn; - unsigned long pfn = pte_pfn(pte); - struct page *page; + unsigned long flags, pteval, vpn; - /* Ptrace may call this routine. */ - if (vma && current->active_mm != vma->vm_mm) + /* + * Handle debugger faulting in for debugee. + */ + if (current->active_mm != vma->vm_mm) return; - page = pfn_to_page(pfn); - if (pfn_valid(pfn) && page_mapping(page)) { -#ifndef CONFIG_SMP - int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags); - if (dirty) { - - unsigned long addr = (unsigned long)page_address(page); - - if (pages_do_alias(addr, address & PAGE_MASK)) - __flush_wback_region((void *)addr, PAGE_SIZE); - } -#endif - } - local_irq_save(flags); /* Set PTEH register */ diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c index 3ce40ea34824..f2e44e9ffb75 100644 --- a/arch/sh/mm/tlbflush_64.c +++ b/arch/sh/mm/tlbflush_64.c @@ -329,22 +329,6 @@ do_sigbus: goto no_context; } -void update_mmu_cache(struct vm_area_struct * vma, - unsigned long address, pte_t pte) -{ - /* - * This appears to get called once for every pte entry that gets - * established => I don't think it's efficient to try refilling the - * TLBs with the pages - some may not get accessed even. Also, for - * executable pages, it is impossible to determine reliably here which - * TLB they should be mapped into (or both even). - * - * So, just do nothing here and handle faults on demand. In the - * TLBMISS handling case, the refill is now done anyway after the pte - * has been fixed up, so that deals with most useful cases. - */ -} - void local_flush_tlb_one(unsigned long asid, unsigned long page) { unsigned long long match, pteh=0, lpage; @@ -482,3 +466,12 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) /* FIXME: Optimize this later.. */ flush_tlb_all(); } + +void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) +{ +} + +void __update_cache(struct vm_area_struct *vma, + unsigned long address, pte_t pte) +{ +}