1
0
Fork 0

mm: fix race in kunmap_atomic()

Christoph reported a nice splat which illustrated a race in the new stack
based kmap_atomic implementation.

The problem is that we pop our stack slot before we're completely done
resetting its state -- in particular clearing the PTE (sometimes that's
CONFIG_DEBUG_HIGHMEM).  If an interrupt happens before we actually clear
the PTE used for the last slot, that interrupt can reuse the slot in a
dirty state, which triggers a BUG in kmap_atomic().

Fix this by introducing kmap_atomic_idx() which reports the current slot
index without actually releasing it and use that to find the PTE and delay
the _pop() until after we're completely done.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Reported-by: Christoph Hellwig <hch@infradead.org>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
hifive-unleashed-5.1
Peter Zijlstra 2010-10-27 15:32:58 -07:00 committed by Linus Torvalds
parent a8e23a2918
commit 20273941f2
10 changed files with 26 additions and 9 deletions

View File

@ -89,7 +89,7 @@ void __kunmap_atomic(void *kvaddr)
int idx, type;
if (kvaddr >= (void *)FIXADDR_START) {
type = kmap_atomic_idx_pop();
type = kmap_atomic_idx();
idx = type + KM_TYPE_NR * smp_processor_id();
if (cache_is_vivt())
@ -101,6 +101,7 @@ void __kunmap_atomic(void *kvaddr)
#else
(void) idx; /* to kill a warning */
#endif
kmap_atomic_idx_pop();
} else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
/* this address was obtained through kmap_high_get() */
kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));

View File

@ -68,7 +68,7 @@ EXPORT_SYMBOL(__kmap_atomic);
void __kunmap_atomic(void *kvaddr)
{
int type = kmap_atomic_idx_pop();
int type = kmap_atomic_idx();
switch (type) {
case 0: __kunmap_atomic_primary(4, 6); break;
case 1: __kunmap_atomic_primary(5, 7); break;
@ -83,6 +83,7 @@ void __kunmap_atomic(void *kvaddr)
default:
BUG();
}
kmap_atomic_idx_pop();
pagefault_enable();
}
EXPORT_SYMBOL(__kunmap_atomic);

View File

@ -74,7 +74,7 @@ void __kunmap_atomic(void *kvaddr)
return;
}
type = kmap_atomic_idx_pop();
type = kmap_atomic_idx();
#ifdef CONFIG_DEBUG_HIGHMEM
{
int idx = type + KM_TYPE_NR * smp_processor_id();
@ -89,6 +89,7 @@ void __kunmap_atomic(void *kvaddr)
local_flush_tlb_one(vaddr);
}
#endif
kmap_atomic_idx_pop();
pagefault_enable();
}
EXPORT_SYMBOL(__kunmap_atomic);

View File

@ -101,7 +101,7 @@ static inline void __kunmap_atomic(unsigned long vaddr)
return;
}
type = kmap_atomic_idx_pop();
type = kmap_atomic_idx();
#if HIGHMEM_DEBUG
{
@ -119,6 +119,8 @@ static inline void __kunmap_atomic(unsigned long vaddr)
__flush_tlb_one(vaddr);
}
#endif
kmap_atomic_idx_pop();
pagefault_enable();
}
#endif /* __KERNEL__ */

View File

@ -62,7 +62,7 @@ void __kunmap_atomic(void *kvaddr)
return;
}
type = kmap_atomic_idx_pop();
type = kmap_atomic_idx();
#ifdef CONFIG_DEBUG_HIGHMEM
{
@ -79,6 +79,8 @@ void __kunmap_atomic(void *kvaddr)
local_flush_tlb_page(NULL, vaddr);
}
#endif
kmap_atomic_idx_pop();
pagefault_enable();
}
EXPORT_SYMBOL(__kunmap_atomic);

View File

@ -75,7 +75,7 @@ void __kunmap_atomic(void *kvaddr)
return;
}
type = kmap_atomic_idx_pop();
type = kmap_atomic_idx();
#ifdef CONFIG_DEBUG_HIGHMEM
{
@ -104,6 +104,8 @@ void __kunmap_atomic(void *kvaddr)
#endif
}
#endif
kmap_atomic_idx_pop();
pagefault_enable();
}
EXPORT_SYMBOL(__kunmap_atomic);

View File

@ -241,7 +241,7 @@ void __kunmap_atomic(void *kvaddr)
pte_t pteval = *pte;
int idx, type;
type = kmap_atomic_idx_pop();
type = kmap_atomic_idx();
idx = type + KM_TYPE_NR*smp_processor_id();
/*
@ -252,6 +252,7 @@ void __kunmap_atomic(void *kvaddr)
BUG_ON(!pte_present(pteval) && !pte_migrating(pteval));
kmap_atomic_unregister(pte_page(pteval), vaddr);
kpte_clear_flush(pte, vaddr);
kmap_atomic_idx_pop();
} else {
/* Must be a lowmem page */
BUG_ON(vaddr < PAGE_OFFSET);

View File

@ -74,7 +74,7 @@ void __kunmap_atomic(void *kvaddr)
vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
int idx, type;
type = kmap_atomic_idx_pop();
type = kmap_atomic_idx();
idx = type + KM_TYPE_NR * smp_processor_id();
#ifdef CONFIG_DEBUG_HIGHMEM
@ -87,6 +87,7 @@ void __kunmap_atomic(void *kvaddr)
* attributes or becomes a protected page in a hypervisor.
*/
kpte_clear_flush(kmap_pte-idx, vaddr);
kmap_atomic_idx_pop();
}
#ifdef CONFIG_DEBUG_HIGHMEM
else {

View File

@ -98,7 +98,7 @@ iounmap_atomic(void __iomem *kvaddr)
vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
int idx, type;
type = kmap_atomic_idx_pop();
type = kmap_atomic_idx();
idx = type + KM_TYPE_NR * smp_processor_id();
#ifdef CONFIG_DEBUG_HIGHMEM
@ -111,6 +111,7 @@ iounmap_atomic(void __iomem *kvaddr)
* attributes or becomes a protected page in a hypervisor.
*/
kpte_clear_flush(kmap_pte-idx, vaddr);
kmap_atomic_idx_pop();
}
pagefault_enable();

View File

@ -88,6 +88,11 @@ static inline int kmap_atomic_idx_push(void)
return idx;
}
static inline int kmap_atomic_idx(void)
{
return __get_cpu_var(__kmap_atomic_idx) - 1;
}
static inline int kmap_atomic_idx_pop(void)
{
int idx = --__get_cpu_var(__kmap_atomic_idx);