1
0
Fork 0

mm: do not allocate page->ptl dynamically, if spinlock_t fits to long

In struct page we have enough space to fit long-size page->ptl there,
but we use dynamically-allocated page->ptl if size(spinlock_t) is larger
than sizeof(int).

It hurts 64-bit architectures with CONFIG_GENERIC_LOCKBREAK, where
sizeof(spinlock_t) == 8, but it easily fits into struct page.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Hugh Dickins <hughd@google.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
wifi-calibration
Kirill A. Shutemov 2013-12-20 13:35:58 +02:00 committed by Linus Torvalds
parent fff4068cba
commit 597d795a2a
5 changed files with 8 additions and 7 deletions

View File

@ -19,7 +19,7 @@
#define USE_CMPXCHG_LOCKREF \ #define USE_CMPXCHG_LOCKREF \
(IS_ENABLED(CONFIG_ARCH_USE_CMPXCHG_LOCKREF) && \ (IS_ENABLED(CONFIG_ARCH_USE_CMPXCHG_LOCKREF) && \
IS_ENABLED(CONFIG_SMP) && !BLOATED_SPINLOCKS) IS_ENABLED(CONFIG_SMP) && SPINLOCK_SIZE <= 4)
struct lockref { struct lockref {
union { union {

View File

@ -1317,7 +1317,7 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */ #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
#if USE_SPLIT_PTE_PTLOCKS #if USE_SPLIT_PTE_PTLOCKS
#if BLOATED_SPINLOCKS #if ALLOC_SPLIT_PTLOCKS
extern bool ptlock_alloc(struct page *page); extern bool ptlock_alloc(struct page *page);
extern void ptlock_free(struct page *page); extern void ptlock_free(struct page *page);
@ -1325,7 +1325,7 @@ static inline spinlock_t *ptlock_ptr(struct page *page)
{ {
return page->ptl; return page->ptl;
} }
#else /* BLOATED_SPINLOCKS */ #else /* ALLOC_SPLIT_PTLOCKS */
static inline bool ptlock_alloc(struct page *page) static inline bool ptlock_alloc(struct page *page)
{ {
return true; return true;
@ -1339,7 +1339,7 @@ static inline spinlock_t *ptlock_ptr(struct page *page)
{ {
return &page->ptl; return &page->ptl;
} }
#endif /* BLOATED_SPINLOCKS */ #endif /* ALLOC_SPLIT_PTLOCKS */
static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
{ {

View File

@ -26,6 +26,7 @@ struct address_space;
#define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS) #define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
#define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \ #define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \
IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK)) IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK))
#define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8)
/* /*
* Each physical page in the system has a struct page associated with * Each physical page in the system has a struct page associated with
@ -155,7 +156,7 @@ struct page {
* system if PG_buddy is set. * system if PG_buddy is set.
*/ */
#if USE_SPLIT_PTE_PTLOCKS #if USE_SPLIT_PTE_PTLOCKS
#if BLOATED_SPINLOCKS #if ALLOC_SPLIT_PTLOCKS
spinlock_t *ptl; spinlock_t *ptl;
#else #else
spinlock_t ptl; spinlock_t ptl;

View File

@ -22,6 +22,6 @@ void foo(void)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
DEFINE(NR_CPUS_BITS, ilog2(CONFIG_NR_CPUS)); DEFINE(NR_CPUS_BITS, ilog2(CONFIG_NR_CPUS));
#endif #endif
DEFINE(BLOATED_SPINLOCKS, sizeof(spinlock_t) > sizeof(int)); DEFINE(SPINLOCK_SIZE, sizeof(spinlock_t));
/* End of constants */ /* End of constants */
} }

View File

@ -4271,7 +4271,7 @@ void copy_user_huge_page(struct page *dst, struct page *src,
} }
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
#if USE_SPLIT_PTE_PTLOCKS && BLOATED_SPINLOCKS #if ALLOC_SPLIT_PTLOCKS
bool ptlock_alloc(struct page *page) bool ptlock_alloc(struct page *page)
{ {
spinlock_t *ptl; spinlock_t *ptl;