alistair23-linux/include/asm-parisc/tlbflush.h
Hugh Dickins 663b97f7ef [PATCH] mm: flush_tlb_range outside ptlock
There was one small but very significant change in the previous patch:
mprotect's flush_tlb_range fell outside the page_table_lock: as it is in 2.4,
but that doesn't prove it safe in 2.6.

On some architectures flush_tlb_range comes to the same as flush_tlb_mm, which
has always been called from outside page_table_lock in dup_mmap, and is so
proved safe.  Others required a deeper audit: I could find no reliance on
page_table_lock in any; but in ia64 and parisc found some code which looks a
bit as if it might want preemption disabled.  That won't do any actual harm,
so pending a decision from the maintainers, disable preemption there.

Remove comments on page_table_lock from flush_tlb_mm, flush_tlb_range and
flush_tlb_page entries in cachetlb.txt: they were rather misleading (what
generic code does is different from what usually happens), the rules are now
changing, and it's not yet clear where we'll end up (will the generic
tlb_flush_mmu happen always under lock?  never under lock?  or sometimes under
and sometimes not?).

Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-29 21:40:40 -07:00

114 lines
2.7 KiB
C

#ifndef _PARISC_TLBFLUSH_H
#define _PARISC_TLBFLUSH_H
/* TLB flushing routines.... */
#include <linux/config.h>
#include <linux/mm.h>
#include <asm/mmu_context.h>
/* This is for the serialisation of PxTLB broadcasts. At least on the
* N class systems, only one PxTLB inter processor broadcast can be
* active at any one time on the Merced bus. This tlb purge
* synchronisation is fairly lightweight and harmless so we activate
* it on all SMP systems not just the N class. */
#ifdef CONFIG_SMP
extern spinlock_t pa_tlb_lock;
#define purge_tlb_start(x) spin_lock(&pa_tlb_lock)
#define purge_tlb_end(x) spin_unlock(&pa_tlb_lock)
#else
#define purge_tlb_start(x) do { } while(0)
#define purge_tlb_end(x) do { } while (0)
#endif
extern void flush_tlb_all(void);
/*
* flush_tlb_mm()
*
* XXX This code is NOT valid for HP-UX compatibility processes,
* (although it will probably work 99% of the time). HP-UX
* processes are free to play with the space id's and save them
* over long periods of time, etc. so we have to preserve the
* space and just flush the entire tlb. We need to check the
* personality in order to do that, but the personality is not
* currently being set correctly.
*
* Of course, Linux processes could do the same thing, but
* we don't support that (and the compilers, dynamic linker,
* etc. do not do that).
*/
static inline void flush_tlb_mm(struct mm_struct *mm)
{
BUG_ON(mm == &init_mm); /* Should never happen */
#ifdef CONFIG_SMP
flush_tlb_all();
#else
if (mm) {
if (mm->context != 0)
free_sid(mm->context);
mm->context = alloc_sid();
if (mm == current->active_mm)
load_context(mm->context);
}
#endif
}
extern __inline__ void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end)
{
}
static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr)
{
/* For one page, it's not worth testing the split_tlb variable */
mb();
mtsp(vma->vm_mm->context,1);
purge_tlb_start();
pdtlb(addr);
pitlb(addr);
purge_tlb_end();
}
static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
unsigned long npages;
npages = ((end - (start & PAGE_MASK)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
if (npages >= 512) /* 2MB of space: arbitrary, should be tuned */
flush_tlb_all();
else {
preempt_disable();
mtsp(vma->vm_mm->context,1);
purge_tlb_start();
if (split_tlb) {
while (npages--) {
pdtlb(start);
pitlb(start);
start += PAGE_SIZE;
}
} else {
while (npages--) {
pdtlb(start);
start += PAGE_SIZE;
}
preempt_enable();
}
purge_tlb_end();
}
}
#define flush_tlb_kernel_range(start, end) flush_tlb_all()
#endif