]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - arch/i386/kernel/process.c
make seccomp zerocost in schedule
[mv-sheeva.git] / arch / i386 / kernel / process.c
index 06dfa65ad18057bda3da9117531d440462d8786b..6c49acb9698210849ed6e74090717c7499a21562 100644 (file)
@@ -538,8 +538,31 @@ int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
        return 1;
 }
 
-static noinline void __switch_to_xtra(struct task_struct *next_p,
-                                   struct tss_struct *tss)
+#ifdef CONFIG_SECCOMP
+void hard_disable_TSC(void)
+{
+       write_cr4(read_cr4() | X86_CR4_TSD);
+}
+void disable_TSC(void)
+{
+       preempt_disable();
+       if (!test_and_set_thread_flag(TIF_NOTSC))
+               /*
+                * Must flip the CPU state synchronously with
+                * TIF_NOTSC in the current running context.
+                */
+               hard_disable_TSC();
+       preempt_enable();
+}
+void hard_enable_TSC(void)
+{
+       write_cr4(read_cr4() & ~X86_CR4_TSD);
+}
+#endif /* CONFIG_SECCOMP */
+
+static noinline void
+__switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
+                struct tss_struct *tss)
 {
        struct thread_struct *next;
 
@@ -555,6 +578,17 @@ static noinline void __switch_to_xtra(struct task_struct *next_p,
                set_debugreg(next->debugreg[7], 7);
        }
 
+#ifdef CONFIG_SECCOMP
+       if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
+           test_tsk_thread_flag(next_p, TIF_NOTSC)) {
+               /* prev and next are different */
+               if (test_tsk_thread_flag(next_p, TIF_NOTSC))
+                       hard_disable_TSC();
+               else
+                       hard_enable_TSC();
+       }
+#endif
+
        if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
                /*
                 * Disable the bitmap via an invalid offset. We still cache
@@ -585,33 +619,6 @@ static noinline void __switch_to_xtra(struct task_struct *next_p,
        tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY;
 }
 
-/*
- * This function selects if the context switch from prev to next
- * has to tweak the TSC disable bit in the cr4.
- */
-static inline void disable_tsc(struct task_struct *prev_p,
-                              struct task_struct *next_p)
-{
-       struct thread_info *prev, *next;
-
-       /*
-        * gcc should eliminate the ->thread_info dereference if
-        * has_secure_computing returns 0 at compile time (SECCOMP=n).
-        */
-       prev = task_thread_info(prev_p);
-       next = task_thread_info(next_p);
-
-       if (has_secure_computing(prev) || has_secure_computing(next)) {
-               /* slow path here */
-               if (has_secure_computing(prev) &&
-                   !has_secure_computing(next)) {
-                       write_cr4(read_cr4() & ~X86_CR4_TSD);
-               } else if (!has_secure_computing(prev) &&
-                          has_secure_computing(next))
-                       write_cr4(read_cr4() | X86_CR4_TSD);
-       }
-}
-
 /*
  *     switch_to(x,yn) should switch tasks from x to y.
  *
@@ -689,11 +696,9 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
        /*
         * Now maybe handle debug registers and/or IO bitmaps
         */
-       if (unlikely((task_thread_info(next_p)->flags & _TIF_WORK_CTXSW)
-           || test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)))
-               __switch_to_xtra(next_p, tss);
-
-       disable_tsc(prev_p, next_p);
+       if (unlikely(task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV ||
+                    task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
+               __switch_to_xtra(prev_p, next_p, tss);
 
        /*
         * Leave lazy mode, flushing any hypercalls made here.