1
0
Fork 0

sched: style cleanup, #2

style cleanup of various changes that were done recently.

no code changed:

      text    data     bss     dec     hex filename
     26399    2578      48   29025    7161 sched.o.before
     26399    2578      48   29025    7161 sched.o.after

Signed-off-by: Ingo Molnar <mingo@elte.hu>
hifive-unleashed-5.1
Ingo Molnar 2008-01-25 21:08:19 +01:00
parent d7876a08db
commit 0eab914657
1 changed files with 17 additions and 15 deletions

View File

@ -235,17 +235,17 @@ static void set_se_shares(struct sched_entity *se, unsigned long shares);
* Every task in system belong to this group at bootup.
*/
struct task_group init_task_group = {
.se = init_sched_entity_p,
.se = init_sched_entity_p,
.cfs_rq = init_cfs_rq_p,
};
#ifdef CONFIG_FAIR_USER_SCHED
# define INIT_TASK_GROUP_LOAD 2*NICE_0_LOAD
# define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD)
#else
# define INIT_TASK_GROUP_LOAD NICE_0_LOAD
#endif
#define MIN_GROUP_SHARES 2
#define MIN_GROUP_SHARES 2
static int init_task_group_load = INIT_TASK_GROUP_LOAD;
@ -352,8 +352,8 @@ struct rt_rq {
/*
* We add the notion of a root-domain which will be used to define per-domain
* variables. Each exclusive cpuset essentially defines an island domain by
* fully partitioning the member cpus from any other cpuset. Whenever a new
* variables. Each exclusive cpuset essentially defines an island domain by
* fully partitioning the member cpus from any other cpuset. Whenever a new
* exclusive cpuset is created, we also create and attach a new root-domain
* object.
*
@ -365,12 +365,12 @@ struct root_domain {
cpumask_t span;
cpumask_t online;
/*
/*
* The "RT overload" flag: it gets set if a CPU has more than
* one runnable RT task.
*/
cpumask_t rto_mask;
atomic_t rto_count;
atomic_t rto_count;
};
static struct root_domain def_root_domain;
@ -434,7 +434,7 @@ struct rq {
atomic_t nr_iowait;
#ifdef CONFIG_SMP
struct root_domain *rd;
struct root_domain *rd;
struct sched_domain *sd;
/* For active balancing */
@ -5066,7 +5066,7 @@ int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
if (p->sched_class->set_cpus_allowed)
p->sched_class->set_cpus_allowed(p, &new_mask);
else {
p->cpus_allowed = new_mask;
p->cpus_allowed = new_mask;
p->nr_cpus_allowed = cpus_weight(new_mask);
}
@ -5847,9 +5847,10 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
if (rq->rd) {
struct root_domain *old_rd = rq->rd;
for (class = sched_class_highest; class; class = class->next)
for (class = sched_class_highest; class; class = class->next) {
if (class->leave_domain)
class->leave_domain(rq);
}
if (atomic_dec_and_test(&old_rd->refcount))
kfree(old_rd);
@ -5858,9 +5859,10 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
atomic_inc(&rd->refcount);
rq->rd = rd;
for (class = sched_class_highest; class; class = class->next)
for (class = sched_class_highest; class; class = class->next) {
if (class->join_domain)
class->join_domain(rq);
}
spin_unlock_irqrestore(&rq->lock, flags);
}
@ -5895,11 +5897,11 @@ static struct root_domain *alloc_rootdomain(const cpumask_t *map)
}
/*
* Attach the domain 'sd' to 'cpu' as its base domain. Callers must
* Attach the domain 'sd' to 'cpu' as its base domain. Callers must
* hold the hotplug lock.
*/
static void cpu_attach_domain(struct sched_domain *sd,
struct root_domain *rd, int cpu)
static void
cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
{
struct rq *rq = cpu_rq(cpu);
struct sched_domain *tmp;
@ -7095,7 +7097,7 @@ static int rebalance_shares(struct sched_domain *sd, int this_cpu)
for_each_cpu_mask(i, sdspan)
total_load += tg->cfs_rq[i]->load.weight;
/* Nothing to do if this group has no load */
/* Nothing to do if this group has no load */
if (!total_load)
continue;