]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - include/linux/sched.h
Merge branch 'linus' into sched/core
[mv-sheeva.git] / include / linux / sched.h
index 46c6f8d5dc06d2dce20f0f6f52e1884519403651..43c9451527321ecdbb945a275b45c39d500d1deb 100644 (file)
@@ -258,6 +258,10 @@ extern spinlock_t mmlist_lock;
 
 struct task_struct;
 
+#ifdef CONFIG_PROVE_RCU
+extern int lockdep_tasklist_lock_is_held(void);
+#endif /* #ifdef CONFIG_PROVE_RCU */
+
 extern void sched_init(void);
 extern void sched_init_smp(void);
 extern asmlinkage void schedule_tail(struct task_struct *prev);
@@ -271,11 +275,17 @@ extern cpumask_var_t nohz_cpu_mask;
 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
 extern int select_nohz_load_balancer(int cpu);
 extern int get_nohz_load_balancer(void);
+extern int nohz_ratelimit(int cpu);
 #else
 static inline int select_nohz_load_balancer(int cpu)
 {
        return 0;
 }
+
+static inline int nohz_ratelimit(int cpu)
+{
+       return 0;
+}
 #endif
 
 /*
@@ -1073,36 +1083,8 @@ struct load_weight {
        unsigned long weight, inv_weight;
 };
 
-/*
- * CFS stats for a schedulable entity (task, task-group etc)
- *
- * Current field usage histogram:
- *
- *     4 se->block_start
- *     4 se->run_node
- *     4 se->sleep_start
- *     6 se->load.weight
- */
-struct sched_entity {
-       struct load_weight      load;           /* for load-balancing */
-       struct rb_node          run_node;
-       struct list_head        group_node;
-       unsigned int            on_rq;
-
-       u64                     exec_start;
-       u64                     sum_exec_runtime;
-       u64                     vruntime;
-       u64                     prev_sum_exec_runtime;
-
-       u64                     last_wakeup;
-       u64                     avg_overlap;
-
-       u64                     nr_migrations;
-
-       u64                     start_runtime;
-       u64                     avg_wakeup;
-
 #ifdef CONFIG_SCHEDSTATS
+struct sched_statistics {
        u64                     wait_start;
        u64                     wait_max;
        u64                     wait_count;
@@ -1134,6 +1116,24 @@ struct sched_entity {
        u64                     nr_wakeups_affine_attempts;
        u64                     nr_wakeups_passive;
        u64                     nr_wakeups_idle;
+};
+#endif
+
+struct sched_entity {
+       struct load_weight      load;           /* for load-balancing */
+       struct rb_node          run_node;
+       struct list_head        group_node;
+       unsigned int            on_rq;
+
+       u64                     exec_start;
+       u64                     sum_exec_runtime;
+       u64                     vruntime;
+       u64                     prev_sum_exec_runtime;
+
+       u64                     nr_migrations;
+
+#ifdef CONFIG_SCHEDSTATS
+       struct sched_statistics statistics;
 #endif
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
@@ -1473,7 +1473,7 @@ struct task_struct {
 
        struct list_head        *scm_work_list;
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
-       /* Index of current stored adress in ret_stack */
+       /* Index of current stored address in ret_stack */
        int curr_ret_stack;
        /* Stack of return addresses for return function tracing */
        struct ftrace_ret_stack *ret_stack;
@@ -2391,9 +2391,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
 
 static inline void thread_group_cputime_init(struct signal_struct *sig)
 {
-       sig->cputimer.cputime = INIT_CPUTIME;
        spin_lock_init(&sig->cputimer.lock);
-       sig->cputimer.running = 0;
 }
 
 static inline void thread_group_cputime_free(struct signal_struct *sig)