]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - kernel/sched/core.c
Merge tag 'driver-core-3.15-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git...
[karo-tx-linux.git] / kernel / sched / core.c
index 9cae286824bbb83541945c29d2e11b8e80ae6a4b..d9d8ece46a15885410a1bb138f4c6c7155573803 100644 (file)
@@ -73,6 +73,7 @@
 #include <linux/init_task.h>
 #include <linux/binfmts.h>
 #include <linux/context_tracking.h>
+#include <linux/compiler.h>
 
 #include <asm/switch_to.h>
 #include <asm/tlb.h>
@@ -2191,7 +2192,7 @@ static inline void post_schedule(struct rq *rq)
  * schedule_tail - first thing a freshly forked thread must call.
  * @prev: the thread we just switched away from.
  */
-asmlinkage void schedule_tail(struct task_struct *prev)
+asmlinkage __visible void schedule_tail(struct task_struct *prev)
        __releases(rq->lock)
 {
        struct rq *rq = this_rq();
@@ -2740,7 +2741,7 @@ static inline void sched_submit_work(struct task_struct *tsk)
                blk_schedule_flush_plug(tsk);
 }
 
-asmlinkage void __sched schedule(void)
+asmlinkage __visible void __sched schedule(void)
 {
        struct task_struct *tsk = current;
 
@@ -2750,7 +2751,7 @@ asmlinkage void __sched schedule(void)
 EXPORT_SYMBOL(schedule);
 
 #ifdef CONFIG_CONTEXT_TRACKING
-asmlinkage void __sched schedule_user(void)
+asmlinkage __visible void __sched schedule_user(void)
 {
        /*
         * If we come here after a random call to set_need_resched(),
@@ -2782,7 +2783,7 @@ void __sched schedule_preempt_disabled(void)
  * off of preempt_enable. Kernel preemptions off return from interrupt
  * occur there and call schedule directly.
  */
-asmlinkage void __sched notrace preempt_schedule(void)
+asmlinkage __visible void __sched notrace preempt_schedule(void)
 {
        /*
         * If there is a non-zero preempt_count or interrupts are disabled,
@@ -2812,7 +2813,7 @@ EXPORT_SYMBOL(preempt_schedule);
  * Note, that this is called and return with irqs disabled. This will
  * protect us against recursive calling from irq.
  */
-asmlinkage void __sched preempt_schedule_irq(void)
+asmlinkage __visible void __sched preempt_schedule_irq(void)
 {
        enum ctx_state prev_state;
 
@@ -2845,52 +2846,6 @@ int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
 }
 EXPORT_SYMBOL(default_wake_function);
 
-static long __sched
-sleep_on_common(wait_queue_head_t *q, int state, long timeout)
-{
-       unsigned long flags;
-       wait_queue_t wait;
-
-       init_waitqueue_entry(&wait, current);
-
-       __set_current_state(state);
-
-       spin_lock_irqsave(&q->lock, flags);
-       __add_wait_queue(q, &wait);
-       spin_unlock(&q->lock);
-       timeout = schedule_timeout(timeout);
-       spin_lock_irq(&q->lock);
-       __remove_wait_queue(q, &wait);
-       spin_unlock_irqrestore(&q->lock, flags);
-
-       return timeout;
-}
-
-void __sched interruptible_sleep_on(wait_queue_head_t *q)
-{
-       sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
-}
-EXPORT_SYMBOL(interruptible_sleep_on);
-
-long __sched
-interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
-{
-       return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout);
-}
-EXPORT_SYMBOL(interruptible_sleep_on_timeout);
-
-void __sched sleep_on(wait_queue_head_t *q)
-{
-       sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
-}
-EXPORT_SYMBOL(sleep_on);
-
-long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
-{
-       return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout);
-}
-EXPORT_SYMBOL(sleep_on_timeout);
-
 #ifdef CONFIG_RT_MUTEXES
 
 /*
@@ -6498,7 +6453,7 @@ static cpumask_var_t fallback_doms;
  * cpu core maps. It is supposed to return 1 if the topology changed
  * or 0 if it stayed the same.
  */
-int __attribute__((weak)) arch_update_cpu_topology(void)
+int __weak arch_update_cpu_topology(void)
 {
        return 0;
 }
@@ -7230,7 +7185,7 @@ void sched_move_task(struct task_struct *tsk)
        if (unlikely(running))
                tsk->sched_class->put_prev_task(rq, tsk);
 
-       tg = container_of(task_css_check(tsk, cpu_cgroup_subsys_id,
+       tg = container_of(task_css_check(tsk, cpu_cgrp_id,
                                lockdep_is_held(&tsk->sighand->siglock)),
                          struct task_group, css);
        tg = autogroup_task_group(tsk, tg);
@@ -7657,7 +7612,7 @@ static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css,
 {
        struct task_struct *task;
 
-       cgroup_taskset_for_each(task, css, tset) {
+       cgroup_taskset_for_each(task, tset) {
 #ifdef CONFIG_RT_GROUP_SCHED
                if (!sched_rt_can_attach(css_tg(css), task))
                        return -EINVAL;
@@ -7675,7 +7630,7 @@ static void cpu_cgroup_attach(struct cgroup_subsys_state *css,
 {
        struct task_struct *task;
 
-       cgroup_taskset_for_each(task, css, tset)
+       cgroup_taskset_for_each(task, tset)
                sched_move_task(task);
 }
 
@@ -8014,8 +7969,7 @@ static struct cftype cpu_files[] = {
        { }     /* terminate */
 };
 
-struct cgroup_subsys cpu_cgroup_subsys = {
-       .name           = "cpu",
+struct cgroup_subsys cpu_cgrp_subsys = {
        .css_alloc      = cpu_cgroup_css_alloc,
        .css_free       = cpu_cgroup_css_free,
        .css_online     = cpu_cgroup_css_online,
@@ -8023,7 +7977,6 @@ struct cgroup_subsys cpu_cgroup_subsys = {
        .can_attach     = cpu_cgroup_can_attach,
        .attach         = cpu_cgroup_attach,
        .exit           = cpu_cgroup_exit,
-       .subsys_id      = cpu_cgroup_subsys_id,
        .base_cftypes   = cpu_files,
        .early_init     = 1,
 };