]> git.karo-electronics.de Git - mv-sheeva.git/commitdiff
ptrace: optimize exit_ptrace() for the likely case
authorOleg Nesterov <oleg@redhat.com>
Wed, 11 Aug 2010 01:03:07 +0000 (18:03 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 11 Aug 2010 15:59:19 +0000 (08:59 -0700)
exit_ptrace() takes tasklist_lock unconditionally.  We need this lock to
avoid the race with ptrace_traceme(), it acts as a barrier.

Change its caller, forget_original_parent(), to call exit_ptrace() under
tasklist_lock.  Change exit_ptrace() to drop and reacquire this lock if
needed.

This allows us to add the fastpath list_empty(ptraced) check.  In the
likely no-tracees case exit_ptrace() just returns and we avoid the lock()
+ unlock() sequence.

"Zhang, Yanmin" <yanmin_zhang@linux.intel.com> suggested to add this
check, and he reports that this change adds about 11% improvement in some
tests.

Suggested-and-tested-by: "Zhang, Yanmin" <yanmin_zhang@linux.intel.com>
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Acked-by: Roland McGrath <roland@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
kernel/exit.c
kernel/ptrace.c

index ceffc67b564ae28eb6b46be67bfcf4f0ab7e8c6a..671ed56e0a490a56ed4e8dc15aa6624d60ebaadf 100644 (file)
@@ -771,9 +771,12 @@ static void forget_original_parent(struct task_struct *father)
        struct task_struct *p, *n, *reaper;
        LIST_HEAD(dead_children);
 
-       exit_ptrace(father);
-
        write_lock_irq(&tasklist_lock);
+       /*
+        * Note that exit_ptrace() and find_new_reaper() might
+        * drop tasklist_lock and reacquire it.
+        */
+       exit_ptrace(father);
        reaper = find_new_reaper(father);
 
        list_for_each_entry_safe(p, n, &father->children, sibling) {
index 74a3d693c196810f9eed784a2825ea50a7f9db83..f34d798ef4a25831b98090a7b85fa6d3b93d0de2 100644 (file)
@@ -324,26 +324,32 @@ int ptrace_detach(struct task_struct *child, unsigned int data)
 }
 
 /*
- * Detach all tasks we were using ptrace on.
+ * Detach all tasks we were using ptrace on. Called with tasklist held
+ * for writing, and returns with it held too. But note it can release
+ * and reacquire the lock.
  */
 void exit_ptrace(struct task_struct *tracer)
 {
        struct task_struct *p, *n;
        LIST_HEAD(ptrace_dead);
 
-       write_lock_irq(&tasklist_lock);
+       if (likely(list_empty(&tracer->ptraced)))
+               return;
+
        list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) {
                if (__ptrace_detach(tracer, p))
                        list_add(&p->ptrace_entry, &ptrace_dead);
        }
-       write_unlock_irq(&tasklist_lock);
 
+       write_unlock_irq(&tasklist_lock);
        BUG_ON(!list_empty(&tracer->ptraced));
 
        list_for_each_entry_safe(p, n, &ptrace_dead, ptrace_entry) {
                list_del_init(&p->ptrace_entry);
                release_task(p);
        }
+
+       write_lock_irq(&tasklist_lock);
 }
 
 int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)