1
0
Fork 0

powerpc: Delete __cpuinit usage from all users

The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications.  For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.

After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out.  Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.

This removes all the powerpc uses of the __cpuinit macros.  There
are no __CPUINIT users in assembly files in powerpc.

[1] https://lkml.org/lkml/2013/5/20/589

Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Josh Boyer <jwboyer@gmail.com>
Cc: Matt Porter <mporter@kernel.crashing.org>
Cc: Kumar Gala <galak@kernel.crashing.org>
Cc: linuxppc-dev@lists.ozlabs.org
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
wifi-calibration
Paul Gortmaker 2013-06-24 15:30:09 -04:00 committed by Benjamin Herrenschmidt
parent 5eb969d0e8
commit 061d19f279
19 changed files with 54 additions and 50 deletions

View File

@ -350,8 +350,8 @@ static inline u32 rtas_config_addr(int busno, int devfn, int reg)
(devfn << 8) | (reg & 0xff);
}
extern void __cpuinit rtas_give_timebase(void);
extern void __cpuinit rtas_take_timebase(void);
extern void rtas_give_timebase(void);
extern void rtas_take_timebase(void);
#ifdef CONFIG_PPC_RTAS
static inline int page_is_rtas_user_buf(unsigned long pfn)

View File

@ -22,7 +22,7 @@ extern unsigned long vdso64_rt_sigtramp;
extern unsigned long vdso32_sigtramp;
extern unsigned long vdso32_rt_sigtramp;
int __cpuinit vdso_getcpu_init(void);
int vdso_getcpu_init(void);
#else /* __ASSEMBLY__ */

View File

@ -131,7 +131,8 @@ static const char *cache_type_string(const struct cache *cache)
return cache_type_info[cache->type].name;
}
static void __cpuinit cache_init(struct cache *cache, int type, int level, struct device_node *ofnode)
static void cache_init(struct cache *cache, int type, int level,
struct device_node *ofnode)
{
cache->type = type;
cache->level = level;
@ -140,7 +141,7 @@ static void __cpuinit cache_init(struct cache *cache, int type, int level, struc
list_add(&cache->list, &cache_list);
}
static struct cache *__cpuinit new_cache(int type, int level, struct device_node *ofnode)
static struct cache *new_cache(int type, int level, struct device_node *ofnode)
{
struct cache *cache;
@ -324,7 +325,8 @@ static bool cache_node_is_unified(const struct device_node *np)
return of_get_property(np, "cache-unified", NULL);
}
static struct cache *__cpuinit cache_do_one_devnode_unified(struct device_node *node, int level)
static struct cache *cache_do_one_devnode_unified(struct device_node *node,
int level)
{
struct cache *cache;
@ -335,7 +337,8 @@ static struct cache *__cpuinit cache_do_one_devnode_unified(struct device_node *
return cache;
}
static struct cache *__cpuinit cache_do_one_devnode_split(struct device_node *node, int level)
static struct cache *cache_do_one_devnode_split(struct device_node *node,
int level)
{
struct cache *dcache, *icache;
@ -357,7 +360,7 @@ err:
return NULL;
}
static struct cache *__cpuinit cache_do_one_devnode(struct device_node *node, int level)
static struct cache *cache_do_one_devnode(struct device_node *node, int level)
{
struct cache *cache;
@ -369,7 +372,8 @@ static struct cache *__cpuinit cache_do_one_devnode(struct device_node *node, in
return cache;
}
static struct cache *__cpuinit cache_lookup_or_instantiate(struct device_node *node, int level)
static struct cache *cache_lookup_or_instantiate(struct device_node *node,
int level)
{
struct cache *cache;
@ -385,7 +389,7 @@ static struct cache *__cpuinit cache_lookup_or_instantiate(struct device_node *n
return cache;
}
static void __cpuinit link_cache_lists(struct cache *smaller, struct cache *bigger)
static void link_cache_lists(struct cache *smaller, struct cache *bigger)
{
while (smaller->next_local) {
if (smaller->next_local == bigger)
@ -396,13 +400,13 @@ static void __cpuinit link_cache_lists(struct cache *smaller, struct cache *bigg
smaller->next_local = bigger;
}
static void __cpuinit do_subsidiary_caches_debugcheck(struct cache *cache)
static void do_subsidiary_caches_debugcheck(struct cache *cache)
{
WARN_ON_ONCE(cache->level != 1);
WARN_ON_ONCE(strcmp(cache->ofnode->type, "cpu"));
}
static void __cpuinit do_subsidiary_caches(struct cache *cache)
static void do_subsidiary_caches(struct cache *cache)
{
struct device_node *subcache_node;
int level = cache->level;
@ -423,7 +427,7 @@ static void __cpuinit do_subsidiary_caches(struct cache *cache)
}
}
static struct cache *__cpuinit cache_chain_instantiate(unsigned int cpu_id)
static struct cache *cache_chain_instantiate(unsigned int cpu_id)
{
struct device_node *cpu_node;
struct cache *cpu_cache = NULL;
@ -448,7 +452,7 @@ out:
return cpu_cache;
}
static struct cache_dir *__cpuinit cacheinfo_create_cache_dir(unsigned int cpu_id)
static struct cache_dir *cacheinfo_create_cache_dir(unsigned int cpu_id)
{
struct cache_dir *cache_dir;
struct device *dev;
@ -653,7 +657,7 @@ static struct kobj_type cache_index_type = {
.default_attrs = cache_index_default_attrs,
};
static void __cpuinit cacheinfo_create_index_opt_attrs(struct cache_index_dir *dir)
static void cacheinfo_create_index_opt_attrs(struct cache_index_dir *dir)
{
const char *cache_name;
const char *cache_type;
@ -696,7 +700,8 @@ static void __cpuinit cacheinfo_create_index_opt_attrs(struct cache_index_dir *d
kfree(buf);
}
static void __cpuinit cacheinfo_create_index_dir(struct cache *cache, int index, struct cache_dir *cache_dir)
static void cacheinfo_create_index_dir(struct cache *cache, int index,
struct cache_dir *cache_dir)
{
struct cache_index_dir *index_dir;
int rc;
@ -722,7 +727,8 @@ err:
kfree(index_dir);
}
static void __cpuinit cacheinfo_sysfs_populate(unsigned int cpu_id, struct cache *cache_list)
static void cacheinfo_sysfs_populate(unsigned int cpu_id,
struct cache *cache_list)
{
struct cache_dir *cache_dir;
struct cache *cache;
@ -740,7 +746,7 @@ static void __cpuinit cacheinfo_sysfs_populate(unsigned int cpu_id, struct cache
}
}
void __cpuinit cacheinfo_cpu_online(unsigned int cpu_id)
void cacheinfo_cpu_online(unsigned int cpu_id)
{
struct cache *cache;

View File

@ -1172,7 +1172,7 @@ int __init early_init_dt_scan_rtas(unsigned long node,
static arch_spinlock_t timebase_lock;
static u64 timebase = 0;
void __cpuinit rtas_give_timebase(void)
void rtas_give_timebase(void)
{
unsigned long flags;
@ -1189,7 +1189,7 @@ void __cpuinit rtas_give_timebase(void)
local_irq_restore(flags);
}
void __cpuinit rtas_take_timebase(void)
void rtas_take_timebase(void)
{
while (!timebase)
barrier();

View File

@ -480,7 +480,7 @@ static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
secondary_ti = current_set[cpu] = ti;
}
int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
int rc, c;
@ -610,7 +610,7 @@ static struct device_node *cpu_to_l2cache(int cpu)
}
/* Activate a secondary processor. */
__cpuinit void start_secondary(void *unused)
void start_secondary(void *unused)
{
unsigned int cpu = smp_processor_id();
struct device_node *l2_cache;

View File

@ -341,7 +341,7 @@ static struct device_attribute pa6t_attrs[] = {
#endif /* HAS_PPC_PMC_PA6T */
#endif /* HAS_PPC_PMC_CLASSIC */
static void __cpuinit register_cpu_online(unsigned int cpu)
static void register_cpu_online(unsigned int cpu)
{
struct cpu *c = &per_cpu(cpu_devices, cpu);
struct device *s = &c->dev;
@ -502,7 +502,7 @@ ssize_t arch_cpu_release(const char *buf, size_t count)
#endif /* CONFIG_HOTPLUG_CPU */
static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
static int sysfs_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned int)(long)hcpu;
@ -522,7 +522,7 @@ static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
return NOTIFY_OK;
}
static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
static struct notifier_block sysfs_cpu_nb = {
.notifier_call = sysfs_cpu_notify,
};

View File

@ -631,7 +631,6 @@ static int __init get_freq(char *name, int cells, unsigned long *val)
return found;
}
/* should become __cpuinit when secondary_cpu_time_init also is */
void start_cpu_decrementer(void)
{
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)

View File

@ -711,7 +711,7 @@ static void __init vdso_setup_syscall_map(void)
}
#ifdef CONFIG_PPC64
int __cpuinit vdso_getcpu_init(void)
int vdso_getcpu_init(void)
{
unsigned long cpu, node, val;

View File

@ -41,7 +41,7 @@ int icache_44x_need_flush;
unsigned long tlb_47x_boltmap[1024/8];
static void __cpuinit ppc44x_update_tlb_hwater(void)
static void ppc44x_update_tlb_hwater(void)
{
extern unsigned int tlb_44x_patch_hwater_D[];
extern unsigned int tlb_44x_patch_hwater_I[];
@ -134,7 +134,7 @@ static void __init ppc47x_update_boltmap(void)
/*
* "Pins" a 256MB TLB entry in AS0 for kernel lowmem for 47x type MMU
*/
static void __cpuinit ppc47x_pin_tlb(unsigned int virt, unsigned int phys)
static void ppc47x_pin_tlb(unsigned int virt, unsigned int phys)
{
unsigned int rA;
int bolted;
@ -229,7 +229,7 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base,
}
#ifdef CONFIG_SMP
void __cpuinit mmu_init_secondary(int cpu)
void mmu_init_secondary(int cpu)
{
unsigned long addr;
unsigned long memstart = memstart_addr & ~(PPC_PIN_SIZE - 1);

View File

@ -807,7 +807,7 @@ void __init early_init_mmu(void)
}
#ifdef CONFIG_SMP
void __cpuinit early_init_mmu_secondary(void)
void early_init_mmu_secondary(void)
{
/* Initialize hash table for that CPU */
if (!firmware_has_feature(FW_FEATURE_LPAR))

View File

@ -332,8 +332,8 @@ void destroy_context(struct mm_struct *mm)
#ifdef CONFIG_SMP
static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
static int mmu_context_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned int)(long)hcpu;
@ -366,7 +366,7 @@ static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
return NOTIFY_OK;
}
static struct notifier_block __cpuinitdata mmu_context_cpu_nb = {
static struct notifier_block mmu_context_cpu_nb = {
.notifier_call = mmu_context_cpu_notify,
};

View File

@ -516,7 +516,7 @@ static int of_drconf_to_nid_single(struct of_drconf_cell *drmem,
* Figure out to which domain a cpu belongs and stick it there.
* Return the id of the domain used.
*/
static int __cpuinit numa_setup_cpu(unsigned long lcpu)
static int numa_setup_cpu(unsigned long lcpu)
{
int nid = 0;
struct device_node *cpu = of_get_cpu_node(lcpu, NULL);
@ -538,8 +538,7 @@ out:
return nid;
}
static int __cpuinit cpu_numa_callback(struct notifier_block *nfb,
unsigned long action,
static int cpu_numa_callback(struct notifier_block *nfb, unsigned long action,
void *hcpu)
{
unsigned long lcpu = (unsigned long)hcpu;
@ -919,7 +918,7 @@ static void __init *careful_zallocation(int nid, unsigned long size,
return ret;
}
static struct notifier_block __cpuinitdata ppc64_numa_nb = {
static struct notifier_block ppc64_numa_nb = {
.notifier_call = cpu_numa_callback,
.priority = 1 /* Must run before sched domains notifier. */
};

View File

@ -648,7 +648,7 @@ void __init early_init_mmu(void)
__early_init_mmu(1);
}
void __cpuinit early_init_mmu_secondary(void)
void early_init_mmu_secondary(void)
{
__early_init_mmu(0);
}

View File

@ -1786,7 +1786,7 @@ static void power_pmu_setup(int cpu)
cpuhw->mmcr[0] = MMCR0_FC;
}
static int __cpuinit
static int
power_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
{
unsigned int cpu = (long)hcpu;
@ -1803,7 +1803,7 @@ power_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu
return NOTIFY_OK;
}
int __cpuinit register_power_pmu(struct power_pmu *pmu)
int register_power_pmu(struct power_pmu *pmu)
{
if (ppmu)
return -EBUSY; /* something's already registered */

View File

@ -91,12 +91,12 @@ static void __init ppc47x_init_irq(void)
}
#ifdef CONFIG_SMP
static void __cpuinit smp_ppc47x_setup_cpu(int cpu)
static void smp_ppc47x_setup_cpu(int cpu)
{
mpic_setup_this_cpu();
}
static int __cpuinit smp_ppc47x_kick_cpu(int cpu)
static int smp_ppc47x_kick_cpu(int cpu)
{
struct device_node *cpunode = of_get_cpu_node(cpu, NULL);
const u64 *spin_table_addr_prop;

View File

@ -81,12 +81,12 @@ static void __init iss4xx_init_irq(void)
}
#ifdef CONFIG_SMP
static void __cpuinit smp_iss4xx_setup_cpu(int cpu)
static void smp_iss4xx_setup_cpu(int cpu)
{
mpic_setup_this_cpu();
}
static int __cpuinit smp_iss4xx_kick_cpu(int cpu)
static int smp_iss4xx_kick_cpu(int cpu)
{
struct device_node *cpunode = of_get_cpu_node(cpu, NULL);
const u64 *spin_table_addr_prop;

View File

@ -99,7 +99,7 @@ static void mpc85xx_take_timebase(void)
}
#ifdef CONFIG_HOTPLUG_CPU
static void __cpuinit smp_85xx_mach_cpu_die(void)
static void smp_85xx_mach_cpu_die(void)
{
unsigned int cpu = smp_processor_id();
u32 tmp;
@ -141,7 +141,7 @@ static inline u32 read_spin_table_addr_l(void *spin_table)
return in_be32(&((struct epapr_spin_table *)spin_table)->addr_l);
}
static int __cpuinit smp_85xx_kick_cpu(int nr)
static int smp_85xx_kick_cpu(int nr)
{
unsigned long flags;
const u64 *cpu_rel_addr;
@ -362,7 +362,7 @@ static void mpc85xx_smp_machine_kexec(struct kimage *image)
}
#endif /* CONFIG_KEXEC */
static void __cpuinit smp_85xx_setup_cpu(int cpu_nr)
static void smp_85xx_setup_cpu(int cpu_nr)
{
if (smp_85xx_ops.probe == smp_mpic_probe)
mpic_setup_this_cpu();

View File

@ -885,7 +885,7 @@ static int smp_core99_cpu_notify(struct notifier_block *self,
return NOTIFY_OK;
}
static struct notifier_block __cpuinitdata smp_core99_cpu_nb = {
static struct notifier_block smp_core99_cpu_nb = {
.notifier_call = smp_core99_cpu_notify,
};
#endif /* CONFIG_HOTPLUG_CPU */

View File

@ -40,7 +40,7 @@
#define DBG(fmt...)
#endif
static void __cpuinit pnv_smp_setup_cpu(int cpu)
static void pnv_smp_setup_cpu(int cpu)
{
if (cpu != boot_cpuid)
xics_setup_cpu();