* Every task in system belong to this group at bootup.
*/
struct task_group init_task_group = {
- .se = init_sched_entity_p,
+ .se = init_sched_entity_p,
.cfs_rq = init_cfs_rq_p,
};
#ifdef CONFIG_FAIR_USER_SCHED
-# define INIT_TASK_GROUP_LOAD 2*NICE_0_LOAD
+# define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD)
#else
# define INIT_TASK_GROUP_LOAD NICE_0_LOAD
#endif
-#define MIN_GROUP_SHARES 2
+#define MIN_GROUP_SHARES 2
static int init_task_group_load = INIT_TASK_GROUP_LOAD;
/*
* We add the notion of a root-domain which will be used to define per-domain
- * variables. Each exclusive cpuset essentially defines an island domain by
- * fully partitioning the member cpus from any other cpuset. Whenever a new
+ * variables. Each exclusive cpuset essentially defines an island domain by
+ * fully partitioning the member cpus from any other cpuset. Whenever a new
* exclusive cpuset is created, we also create and attach a new root-domain
* object.
*
cpumask_t span;
cpumask_t online;
- /*
+ /*
* The "RT overload" flag: it gets set if a CPU has more than
* one runnable RT task.
*/
cpumask_t rto_mask;
- atomic_t rto_count;
+ atomic_t rto_count;
};
static struct root_domain def_root_domain;
atomic_t nr_iowait;
#ifdef CONFIG_SMP
- struct root_domain *rd;
+ struct root_domain *rd;
struct sched_domain *sd;
/* For active balancing */
if (p->sched_class->set_cpus_allowed)
p->sched_class->set_cpus_allowed(p, &new_mask);
else {
- p->cpus_allowed = new_mask;
+ p->cpus_allowed = new_mask;
p->nr_cpus_allowed = cpus_weight(new_mask);
}
if (rq->rd) {
struct root_domain *old_rd = rq->rd;
- for (class = sched_class_highest; class; class = class->next)
+ for (class = sched_class_highest; class; class = class->next) {
if (class->leave_domain)
class->leave_domain(rq);
+ }
if (atomic_dec_and_test(&old_rd->refcount))
kfree(old_rd);
atomic_inc(&rd->refcount);
rq->rd = rd;
- for (class = sched_class_highest; class; class = class->next)
+ for (class = sched_class_highest; class; class = class->next) {
if (class->join_domain)
class->join_domain(rq);
+ }
spin_unlock_irqrestore(&rq->lock, flags);
}
}
/*
- * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
+ * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
* hold the hotplug lock.
*/
-static void cpu_attach_domain(struct sched_domain *sd,
- struct root_domain *rd, int cpu)
+static void
+cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
{
struct rq *rq = cpu_rq(cpu);
struct sched_domain *tmp;
for_each_cpu_mask(i, sdspan)
total_load += tg->cfs_rq[i]->load.weight;
- /* Nothing to do if this group has no load */
+ /* Nothing to do if this group has no load */
if (!total_load)
continue;