diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index d6597038931d..95ffedf14885 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -18,6 +18,8 @@ #include #include +#include "setup.h" + #ifdef CONFIG_PPC_BOOK3S /* @@ -208,15 +210,14 @@ void __init allocate_pacas(void) u64 limit; int cpu; - limit = ppc64_rma_size; - #ifdef CONFIG_PPC_BOOK3S_64 /* - * We can't take SLB misses on the paca, and we want to access them - * in real mode, so allocate them within the RMA and also within - * the first segment. + * We access pacas in real mode, and cannot take SLB faults + * on them when in virtual mode, so allocate them accordingly. */ - limit = min(0x10000000ULL, limit); + limit = min(ppc64_bolted_size(), ppc64_rma_size); +#else + limit = ppc64_rma_size; #endif paca_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpu_ids); diff --git a/arch/powerpc/kernel/setup.h b/arch/powerpc/kernel/setup.h index 21c18071d9d5..3fc11e30308f 100644 --- a/arch/powerpc/kernel/setup.h +++ b/arch/powerpc/kernel/setup.h @@ -51,6 +51,10 @@ void record_spr_defaults(void); static inline void record_spr_defaults(void) { }; #endif +#ifdef CONFIG_PPC64 +u64 ppc64_bolted_size(void); +#endif + /* * Having this in kvm_ppc.h makes include dependencies too * tricky to solve for setup-common.c so have it here. diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index d3124c302146..8f285d6a3db1 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -565,25 +565,31 @@ void __init initialize_cache_info(void) DBG(" <- initialize_cache_info()\n"); } -/* This returns the limit below which memory accesses to the linear - * mapping are guarnateed not to cause a TLB or SLB miss. This is - * used to allocate interrupt or emergency stacks for which our - * exception entry path doesn't deal with being interrupted. +/* + * This returns the limit below which memory accesses to the linear + * mapping are guarnateed not to cause an architectural exception (e.g., + * TLB or SLB miss fault). + * + * This is used to allocate PACAs and various interrupt stacks that + * that are accessed early in interrupt handlers that must not cause + * re-entrant interrupts. */ -static __init u64 safe_stack_limit(void) +__init u64 ppc64_bolted_size(void) { #ifdef CONFIG_PPC_BOOK3E /* Freescale BookE bolts the entire linear mapping */ - if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) + /* XXX: BookE ppc64_rma_limit setup seems to disagree? */ + if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E)) return linear_map_top; /* Other BookE, we assume the first GB is bolted */ return 1ul << 30; #else + /* BookS radix, does not take faults on linear mapping */ if (early_radix_enabled()) return ULONG_MAX; - /* BookS, the first segment is bolted */ - if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) + /* BookS hash, the first segment is bolted */ + if (early_mmu_has_feature(MMU_FTR_1T_SEGMENT)) return 1UL << SID_SHIFT_1T; return 1UL << SID_SHIFT; #endif @@ -591,7 +597,7 @@ static __init u64 safe_stack_limit(void) void __init irqstack_early_init(void) { - u64 limit = safe_stack_limit(); + u64 limit = ppc64_bolted_size(); unsigned int i; /* @@ -676,7 +682,7 @@ void __init emergency_stack_init(void) * initialized in kernel/irq.c. These are initialized here in order * to have emergency stacks available as early as possible. */ - limit = min(safe_stack_limit(), ppc64_rma_size); + limit = min(ppc64_bolted_size(), ppc64_rma_size); for_each_possible_cpu(i) { struct thread_info *ti;