1
0
Fork 0

powerpc64: convert to dynamic percpu allocator

Now that percpu allows arbitrary embedding of the first chunk,
powerpc64 can easily be converted to dynamic percpu allocator.
Convert it.  powerpc supports several large page sizes.  Cap atom_size
at 1M.  There isn't much to gain by going above that anyway.

Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
hifive-unleashed-5.1
Tejun Heo 2009-08-14 15:00:53 +09:00
parent bcb2107fdb
commit c2a7e81801
2 changed files with 45 additions and 16 deletions

View File

@ -46,10 +46,10 @@ config GENERIC_HARDIRQS_NO__DO_IRQ
bool bool
default y default y
config HAVE_LEGACY_PER_CPU_AREA config HAVE_SETUP_PER_CPU_AREA
def_bool PPC64 def_bool PPC64
config HAVE_SETUP_PER_CPU_AREA config NEED_PER_CPU_EMBED_FIRST_CHUNK
def_bool PPC64 def_bool PPC64
config IRQ_PER_CPU config IRQ_PER_CPU

View File

@ -57,6 +57,7 @@
#include <asm/cache.h> #include <asm/cache.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/mmu.h> #include <asm/mmu.h>
#include <asm/mmu-hash64.h>
#include <asm/firmware.h> #include <asm/firmware.h>
#include <asm/xmon.h> #include <asm/xmon.h>
#include <asm/udbg.h> #include <asm/udbg.h>
@ -569,25 +570,53 @@ void cpu_die(void)
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#define PCPU_DYN_SIZE ()
static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
{
return __alloc_bootmem_node(NODE_DATA(cpu_to_node(cpu)), size, align,
__pa(MAX_DMA_ADDRESS));
}
static void __init pcpu_fc_free(void *ptr, size_t size)
{
free_bootmem(__pa(ptr), size);
}
static int pcpu_cpu_distance(unsigned int from, unsigned int to)
{
if (cpu_to_node(from) == cpu_to_node(to))
return LOCAL_DISTANCE;
else
return REMOTE_DISTANCE;
}
void __init setup_per_cpu_areas(void) void __init setup_per_cpu_areas(void)
{ {
int i; const size_t dyn_size = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
unsigned long size; size_t atom_size;
char *ptr; unsigned long delta;
unsigned int cpu;
int rc;
/* Copy section for each CPU (we discard the original) */ /*
size = ALIGN(__per_cpu_end - __per_cpu_start, PAGE_SIZE); * Linear mapping is one of 4K, 1M and 16M. For 4K, no need
#ifdef CONFIG_MODULES * to group units. For larger mappings, use 1M atom which
if (size < PERCPU_ENOUGH_ROOM) * should be large enough to contain a number of units.
size = PERCPU_ENOUGH_ROOM; */
#endif if (mmu_linear_psize == MMU_PAGE_4K)
atom_size = PAGE_SIZE;
else
atom_size = 1 << 20;
for_each_possible_cpu(i) { rc = pcpu_embed_first_chunk(0, dyn_size, atom_size, pcpu_cpu_distance,
ptr = alloc_bootmem_pages_node(NODE_DATA(cpu_to_node(i)), size); pcpu_fc_alloc, pcpu_fc_free);
if (rc < 0)
panic("cannot initialize percpu area (err=%d)", rc);
paca[i].data_offset = ptr - __per_cpu_start; delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); for_each_possible_cpu(cpu)
} paca[cpu].data_offset = delta + pcpu_unit_offsets[cpu];
} }
#endif #endif