1
0
Fork 0

[POWERPC] Fixes for the SLB shadow buffer code

On a machine with hardware 64kB pages and a kernel configured for a
64kB base page size, we need to change the vmalloc segment from 64kB
pages to 4kB pages if some driver creates a non-cacheable mapping in
the vmalloc area.  However, we never updated with SLB shadow buffer.
This fixes it.  Thanks to paulus for finding this.

Also added some write barriers to ensure the shadow buffer contents
are always consistent.

Signed-off-by: Michael Neuling <mikey@neuling.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
hifive-unleashed-5.1
Michael Neuling 2007-08-03 11:55:39 +10:00 committed by Paul Mackerras
parent 5628244059
commit 67439b76f2
4 changed files with 23 additions and 11 deletions

View File

@ -389,8 +389,11 @@ BEGIN_FTR_SECTION
ld r9,PACA_SLBSHADOWPTR(r13)
li r12,0
std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
eieio
std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */
eieio
std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */
eieio
slbie r6
slbie r6 /* Workaround POWER5 < DD2.1 issue */

View File

@ -759,7 +759,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
mmu_psize_defs[mmu_vmalloc_psize].sllp) {
get_paca()->vmalloc_sllp =
mmu_psize_defs[mmu_vmalloc_psize].sllp;
slb_flush_and_rebolt();
slb_vmalloc_update();
}
#endif /* CONFIG_PPC_64K_PAGES */

View File

@ -53,7 +53,8 @@ static inline unsigned long mk_vsid_data(unsigned long ea, unsigned long flags)
return (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | flags;
}
static inline void slb_shadow_update(unsigned long esid, unsigned long vsid,
static inline void slb_shadow_update(unsigned long ea,
unsigned long flags,
unsigned long entry)
{
/*
@ -61,11 +62,11 @@ static inline void slb_shadow_update(unsigned long esid, unsigned long vsid,
* updating it.
*/
get_slb_shadow()->save_area[entry].esid = 0;
barrier();
get_slb_shadow()->save_area[entry].vsid = vsid;
barrier();
get_slb_shadow()->save_area[entry].esid = esid;
smp_wmb();
get_slb_shadow()->save_area[entry].vsid = mk_vsid_data(ea, flags);
smp_wmb();
get_slb_shadow()->save_area[entry].esid = mk_esid_data(ea, entry);
smp_wmb();
}
static inline void create_shadowed_slbe(unsigned long ea, unsigned long flags,
@ -76,8 +77,7 @@ static inline void create_shadowed_slbe(unsigned long ea, unsigned long flags,
* we don't get a stale entry here if we get preempted by PHYP
* between these two statements.
*/
slb_shadow_update(mk_esid_data(ea, entry), mk_vsid_data(ea, flags),
entry);
slb_shadow_update(ea, flags, entry);
asm volatile("slbmte %0,%1" :
: "r" (mk_vsid_data(ea, flags)),
@ -104,8 +104,7 @@ void slb_flush_and_rebolt(void)
ksp_esid_data &= ~SLB_ESID_V;
/* Only third entry (stack) may change here so only resave that */
slb_shadow_update(ksp_esid_data,
mk_vsid_data(ksp_esid_data, lflags), 2);
slb_shadow_update(get_paca()->kstack, lflags, 2);
/* We need to do this all in asm, so we're sure we don't touch
* the stack between the slbia and rebolting it. */
@ -123,6 +122,15 @@ void slb_flush_and_rebolt(void)
: "memory");
}
void slb_vmalloc_update(void)
{
unsigned long vflags;
vflags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmalloc_psize].sllp;
slb_shadow_update(VMALLOC_START, vflags, 1);
slb_flush_and_rebolt();
}
/* Flush all user entries from the segment table of the current processor. */
void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
{

View File

@ -262,6 +262,7 @@ extern void slb_initialize(void);
extern void slb_flush_and_rebolt(void);
extern void stab_initialize(unsigned long stab);
extern void slb_vmalloc_update(void);
#endif /* __ASSEMBLY__ */
/*