1
0
Fork 0

powerpc/mm: Extend pte_fragment functionality to PPC32

In order to allow the 8xx to handle pte_fragments, this patch
extends the use of pte_fragments to PPC32 platforms.

Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
hifive-unleashed-5.1
Christophe Leroy 2018-11-29 14:07:01 +00:00 committed by Michael Ellerman
parent a74791dd98
commit 32ea4c1499
12 changed files with 55 additions and 49 deletions

View File

@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_POWERPC_BOOK3S_32_MMU_HASH_H_
#define _ASM_POWERPC_BOOK3S_32_MMU_HASH_H_
/*
* 32-bit hash table MMU support
*/
@ -9,6 +10,8 @@
* BATs
*/
#include <asm/page.h>
/* Block size masks */
#define BL_128K 0x000
#define BL_256K 0x001
@ -43,7 +46,7 @@ struct ppc_bat {
u32 batl;
};
typedef struct page *pgtable_t;
typedef pte_t *pgtable_t;
#endif /* !__ASSEMBLY__ */
/*

View File

@ -59,30 +59,31 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp,
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp,
pgtable_t pte_page)
{
*pmdp = __pmd((page_to_pfn(pte_page) << PAGE_SHIFT) | _PMD_PRESENT);
*pmdp = __pmd(__pa(pte_page) | _PMD_PRESENT);
}
#define pmd_pgtable(pmd) pmd_page(pmd)
#define pmd_pgtable(pmd) ((pgtable_t)pmd_page_vaddr(pmd))
extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr);
void pte_frag_destroy(void *pte_frag);
pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel);
void pte_fragment_free(unsigned long *table, int kernel);
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
free_page((unsigned long)pte);
pte_fragment_free((unsigned long *)pte, 1);
}
static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
{
pgtable_page_dtor(ptepage);
__free_page(ptepage);
pte_fragment_free((unsigned long *)ptepage, 0);
}
static inline void pgtable_free(void *table, unsigned index_size)
{
if (!index_size) {
pgtable_page_dtor(virt_to_page(table));
free_page((unsigned long)table);
pte_fragment_free((unsigned long *)table, 0);
} else {
BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE);
kmem_cache_free(PGT_CACHE(index_size), table);
@ -120,6 +121,6 @@ static inline void pgtable_free_tlb(struct mmu_gather *tlb,
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
unsigned long address)
{
pgtable_free_tlb(tlb, page_address(table), 0);
pgtable_free_tlb(tlb, table, 0);
}
#endif /* _ASM_POWERPC_BOOK3S_32_PGALLOC_H */

View File

@ -329,7 +329,7 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
#define pmd_page_vaddr(pmd) \
((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
((unsigned long)__va(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1)))
#define pmd_page(pmd) \
pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
@ -346,7 +346,8 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
#define pte_offset_kernel(dir, addr) \
((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
#define pte_offset_map(dir, addr) \
((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr))
((pte_t *)(kmap_atomic(pmd_page(*(dir))) + \
(pmd_page_vaddr(*(dir)) & ~PAGE_MASK)) + pte_index(addr))
#define pte_unmap(pte) kunmap_atomic(pte)
/*

View File

@ -223,7 +223,7 @@ static inline int arch_dup_mmap(struct mm_struct *oldmm,
return 0;
}
#ifndef CONFIG_PPC_BOOK3S_64
#ifdef CONFIG_PPC_BOOK3E_64
static inline void arch_exit_mmap(struct mm_struct *mm)
{
}

View File

@ -2,6 +2,8 @@
#ifndef _ASM_POWERPC_NOHASH_32_MMU_H_
#define _ASM_POWERPC_NOHASH_32_MMU_H_
#include <asm/page.h>
#if defined(CONFIG_40x)
/* 40x-style software loaded TLB */
#include <asm/nohash/32/mmu-40x.h>
@ -17,7 +19,7 @@
#endif
#ifndef __ASSEMBLY__
typedef struct page *pgtable_t;
typedef pte_t *pgtable_t;
#endif
#endif /* _ASM_POWERPC_NOHASH_32_MMU_H_ */

View File

@ -61,11 +61,10 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp,
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp,
pgtable_t pte_page)
{
*pmdp = __pmd((page_to_pfn(pte_page) << PAGE_SHIFT) | _PMD_USER |
_PMD_PRESENT);
*pmdp = __pmd(__pa(pte_page) | _PMD_USER | _PMD_PRESENT);
}
#define pmd_pgtable(pmd) pmd_page(pmd)
#define pmd_pgtable(pmd) ((pgtable_t)pmd_page_vaddr(pmd))
#else
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp,
@ -77,31 +76,32 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp,
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp,
pgtable_t pte_page)
{
*pmdp = __pmd((unsigned long)lowmem_page_address(pte_page) | _PMD_PRESENT);
*pmdp = __pmd((unsigned long)pte_page | _PMD_PRESENT);
}
#define pmd_pgtable(pmd) pmd_page(pmd)
#define pmd_pgtable(pmd) ((pgtable_t)pmd_page_vaddr(pmd))
#endif
extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr);
void pte_frag_destroy(void *pte_frag);
pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel);
void pte_fragment_free(unsigned long *table, int kernel);
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
free_page((unsigned long)pte);
pte_fragment_free((unsigned long *)pte, 1);
}
static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
{
pgtable_page_dtor(ptepage);
__free_page(ptepage);
pte_fragment_free((unsigned long *)ptepage, 0);
}
static inline void pgtable_free(void *table, unsigned index_size)
{
if (!index_size) {
pgtable_page_dtor(virt_to_page(table));
free_page((unsigned long)table);
pte_fragment_free((unsigned long *)table, 0);
} else {
BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE);
kmem_cache_free(PGT_CACHE(index_size), table);
@ -140,6 +140,6 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
unsigned long address)
{
tlb_flush_pgtable(tlb, address);
pgtable_free_tlb(tlb, page_address(table), 0);
pgtable_free_tlb(tlb, table, 0);
}
#endif /* _ASM_POWERPC_PGALLOC_32_H */

View File

@ -333,12 +333,12 @@ static inline int pte_young(pte_t pte)
*/
#ifndef CONFIG_BOOKE
#define pmd_page_vaddr(pmd) \
((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
((unsigned long)__va(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1)))
#define pmd_page(pmd) \
pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
#else
#define pmd_page_vaddr(pmd) \
((unsigned long) (pmd_val(pmd) & PAGE_MASK))
((unsigned long)(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1)))
#define pmd_page(pmd) \
pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT))
#endif
@ -357,7 +357,8 @@ static inline int pte_young(pte_t pte)
(pmd_bad(*(dir)) ? NULL : (pte_t *)pmd_page_vaddr(*(dir)) + \
pte_index(addr))
#define pte_offset_map(dir, addr) \
((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr))
((pte_t *)(kmap_atomic(pmd_page(*(dir))) + \
(pmd_page_vaddr(*(dir)) & ~PAGE_MASK)) + pte_index(addr))
#define pte_unmap(pte) kunmap_atomic(pte)
/*

View File

@ -125,6 +125,10 @@ static inline void pte_frag_set(mm_context_t *ctx, void *p)
ctx->pte_frag = p;
}
#else
#define PTE_FRAG_NR 1
#define PTE_FRAG_SIZE_SHIFT PAGE_SHIFT
#define PTE_FRAG_SIZE (1UL << PTE_FRAG_SIZE_SHIFT)
static inline void *pte_frag_get(mm_context_t *ctx)
{
return NULL;

View File

@ -18,6 +18,7 @@ obj-$(CONFIG_PPC_BOOK3E_64) += pgtable-book3e.o
obj-$(CONFIG_PPC_BOOK3S_64) += pgtable-hash64.o hash_utils_64.o slb.o \
$(hash64-y) mmu_context_book3s64.o \
pgtable-book3s64.o pgtable-frag.o
obj-$(CONFIG_PPC32) += pgtable-frag.o
obj-$(CONFIG_PPC_RADIX_MMU) += pgtable-radix.o tlb-radix.o
obj-$(CONFIG_PPC_BOOK3S_32) += ppc_mmu_32.o hash_low_32.o mmu_context_hash32.o
obj-$(CONFIG_PPC_BOOK3S) += tlb_hash$(BITS).o

View File

@ -15,6 +15,7 @@
#include <linux/sched/mm.h>
#include <asm/mmu_context.h>
#include <asm/pgalloc.h>
#if defined(CONFIG_PPC32)
static inline void switch_mm_pgdir(struct task_struct *tsk,
@ -97,3 +98,12 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
switch_mmu_context(prev, next, tsk);
}
#ifdef CONFIG_PPC32
void arch_exit_mmap(struct mm_struct *mm)
{
void *frag = pte_frag_get(&mm->context);
if (frag)
pte_frag_destroy(frag);
}
#endif

View File

@ -385,6 +385,7 @@ int init_new_context(struct task_struct *t, struct mm_struct *mm)
#endif
mm->context.id = MMU_NO_CONTEXT;
mm->context.active = 0;
pte_frag_set(&mm->context, NULL);
return 0;
}
@ -487,4 +488,3 @@ void __init mmu_context_init(void)
next_context = FIRST_CONTEXT;
nr_free_contexts = LAST_CONTEXT - FIRST_CONTEXT + 1;
}

View File

@ -45,32 +45,15 @@ extern char etext[], _stext[], _sinittext[], _einittext[];
__ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
pte_t *pte;
if (!slab_is_available())
return memblock_alloc(PTE_FRAG_SIZE, PTE_FRAG_SIZE);
if (slab_is_available()) {
pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
} else {
pte = __va(memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE));
if (pte)
clear_page(pte);
}
return pte;
return (pte_t *)pte_fragment_alloc(mm, address, 1);
}
pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
struct page *ptepage;
gfp_t flags = GFP_KERNEL | __GFP_ZERO | __GFP_ACCOUNT;
ptepage = alloc_pages(flags, 0);
if (!ptepage)
return NULL;
if (!pgtable_page_ctor(ptepage)) {
__free_page(ptepage);
return NULL;
}
return ptepage;
return (pgtable_t)pte_fragment_alloc(mm, address, 0);
}
void __iomem *