Book3S radix-mode has no SLB interrupt limitation, and hash-mode has a 1T limitation on modern CPUs, so PACA allocation limits can be lifted. Update the paca alloation limits. Share TLB/SLB calculation with the stack allocation code. Signed-off-by: Nicholas Piggin <npiggin@xxxxxxxxx> --- arch/powerpc/kernel/paca.c | 13 +++++-------- arch/powerpc/kernel/setup.h | 4 ++++ arch/powerpc/kernel/setup_64.c | 7 ++++--- 3 files changed, 13 insertions(+), 11 deletions(-) diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index 8d63627e067f..64401f551765 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -18,6 +18,8 @@ #include <asm/pgtable.h> #include <asm/kexec.h> +#include "setup.h" + #ifdef CONFIG_PPC_BOOK3S /* @@ -199,16 +201,11 @@ void __init allocate_pacas(void) u64 limit; int cpu; - limit = ppc64_rma_size; - -#ifdef CONFIG_PPC_BOOK3S_64 /* - * We can't take SLB misses on the paca, and we want to access them - * in real mode, so allocate them within the RMA and also within - * the first segment. + * We access pacas in real mode, and cannot take faults on them when + * in virtual mode, so allocate them accordingly. */ - limit = min(0x10000000ULL, limit); -#endif + limit = min(safe_kva_limit(), ppc64_rma_size); paca_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpu_ids); diff --git a/arch/powerpc/kernel/setup.h b/arch/powerpc/kernel/setup.h index cfba134b3024..b97dfb50298c 100644 --- a/arch/powerpc/kernel/setup.h +++ b/arch/powerpc/kernel/setup.h @@ -45,6 +45,10 @@ void emergency_stack_init(void); static inline void emergency_stack_init(void) { }; #endif +#ifdef CONFIG_PPC64 +u64 safe_kva_limit(void); +#endif + /* * Having this in kvm_ppc.h makes include dependencies too * tricky to solve for setup-common.c so have it here. diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 7393bac3c7f4..35ad5f28f0c1 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -555,10 +555,11 @@ void __init initialize_cache_info(void) * used to allocate interrupt or emergency stacks for which our * exception entry path doesn't deal with being interrupted. */ -static __init u64 safe_stack_limit(void) +__init u64 safe_kva_limit(void) { #ifdef CONFIG_PPC_BOOK3E /* Freescale BookE bolts the entire linear mapping */ + /* XXX: BookE ppc64_rma_limit setup seems to disagree? */ if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) return linear_map_top; /* Other BookE, we assume the first GB is bolted */ @@ -576,7 +577,7 @@ static __init u64 safe_stack_limit(void) void __init irqstack_early_init(void) { - u64 limit = safe_stack_limit(); + u64 limit = safe_kva_limit(); unsigned int i; /* @@ -661,7 +662,7 @@ void __init emergency_stack_init(void) * initialized in kernel/irq.c. These are initialized here in order * to have emergency stacks available as early as possible. */ - limit = min(safe_stack_limit(), ppc64_rma_size); + limit = min(safe_kva_limit(), ppc64_rma_size); for_each_possible_cpu(i) { struct thread_info *ti; -- 2.13.3 -- To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html