]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge branch 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 8 Oct 2009 19:16:35 +0000 (12:16 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 8 Oct 2009 19:16:35 +0000 (12:16 -0700)
* 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  futex: fix requeue_pi key imbalance
  futex: Fix typo in FUTEX_WAIT/WAKE_BITSET_PRIVATE definitions
  rcu: Place root rcu_node structure in separate lockdep class
  rcu: Make hot-unplugged CPU relinquish its own RCU callbacks
  rcu: Move rcu_barrier() to rcutree
  futex: Move exit_pi_state() call to release_mm()
  futex: Nullify robust lists after cleanup
  futex: Fix locking imbalance
  panic: Fix panic message visibility by calling bust_spinlocks(0) before dying
  rcu: Replace the rcu_barrier enum with pointer to call_rcu*() function
  rcu: Clean up code based on review feedback from Josh Triplett, part 4
  rcu: Clean up code based on review feedback from Josh Triplett, part 3
  rcu: Fix rcu_lock_map build failure on CONFIG_PROVE_LOCKING=y
  rcu: Clean up code to address Ingo's checkpatch feedback
  rcu: Clean up code based on review feedback from Josh Triplett, part 2
  rcu: Clean up code based on review feedback from Josh Triplett

1  2 
include/linux/futex.h
init/main.c
kernel/exit.c
kernel/fork.c
kernel/futex.c
kernel/panic.c

diff --combined include/linux/futex.h
index 8ec17997d94fa0aafeaab5cc95824194c9e913f3,78b92ec9edbd4cf8969a0c05b3d05b1f45165717..1e5a26d7923216e2de6883fff7c4faf6f5507036
@@@ -4,6 -4,11 +4,6 @@@
  #include <linux/compiler.h>
  #include <linux/types.h>
  
 -struct inode;
 -struct mm_struct;
 -struct task_struct;
 -union ktime;
 -
  /* Second argument to futex syscall */
  
  
@@@ -33,8 -38,8 +33,8 @@@
  #define FUTEX_LOCK_PI_PRIVATE (FUTEX_LOCK_PI | FUTEX_PRIVATE_FLAG)
  #define FUTEX_UNLOCK_PI_PRIVATE       (FUTEX_UNLOCK_PI | FUTEX_PRIVATE_FLAG)
  #define FUTEX_TRYLOCK_PI_PRIVATE (FUTEX_TRYLOCK_PI | FUTEX_PRIVATE_FLAG)
- #define FUTEX_WAIT_BITSET_PRIVATE     (FUTEX_WAIT_BITS | FUTEX_PRIVATE_FLAG)
- #define FUTEX_WAKE_BITSET_PRIVATE     (FUTEX_WAKE_BITS | FUTEX_PRIVATE_FLAG)
+ #define FUTEX_WAIT_BITSET_PRIVATE     (FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG)
+ #define FUTEX_WAKE_BITSET_PRIVATE     (FUTEX_WAKE_BITSET | FUTEX_PRIVATE_FLAG)
  #define FUTEX_WAIT_REQUEUE_PI_PRIVATE (FUTEX_WAIT_REQUEUE_PI | \
                                         FUTEX_PRIVATE_FLAG)
  #define FUTEX_CMP_REQUEUE_PI_PRIVATE  (FUTEX_CMP_REQUEUE_PI | \
@@@ -124,11 -129,6 +124,11 @@@ struct robust_list_head 
  #define FUTEX_BITSET_MATCH_ANY        0xffffffff
  
  #ifdef __KERNEL__
 +struct inode;
 +struct mm_struct;
 +struct task_struct;
 +union ktime;
 +
  long do_futex(u32 __user *uaddr, int op, u32 val, union ktime *timeout,
              u32 __user *uaddr2, u32 val2, u32 val3);
  
diff --combined init/main.c
index 7449819a4805b14dd3c68d7300cc54ce5d4b8044,833d675677d1e08ecc1ee47e921b36e6d5c2e7d7..5988debfc505c84c8c5f7abeb0b5fb1e2d88f1fc
@@@ -18,6 -18,7 +18,6 @@@
  #include <linux/string.h>
  #include <linux/ctype.h>
  #include <linux/delay.h>
 -#include <linux/utsname.h>
  #include <linux/ioport.h>
  #include <linux/init.h>
  #include <linux/smp_lock.h>
@@@ -67,7 -68,6 +67,7 @@@
  #include <linux/async.h>
  #include <linux/kmemcheck.h>
  #include <linux/kmemtrace.h>
 +#include <linux/sfi.h>
  #include <linux/shmem_fs.h>
  #include <trace/boot.h>
  
@@@ -359,6 -359,11 +359,6 @@@ static inline void smp_prepare_cpus(uns
  
  #else
  
 -#if NR_CPUS > BITS_PER_LONG
 -cpumask_t cpu_mask_all __read_mostly = CPU_MASK_ALL;
 -EXPORT_SYMBOL(cpu_mask_all);
 -#endif
 -
  /* Setup number of possible processor ids */
  int nr_cpu_ids __read_mostly = NR_CPUS;
  EXPORT_SYMBOL(nr_cpu_ids);
@@@ -663,12 -668,12 +663,12 @@@ asmlinkage void __init start_kernel(voi
  #endif
        thread_info_cache_init();
        cred_init();
 -      fork_init(num_physpages);
 +      fork_init(totalram_pages);
        proc_caches_init();
        buffer_init();
        key_init();
        security_init();
 -      vfs_caches_init(num_physpages);
 +      vfs_caches_init(totalram_pages);
        radix_tree_init();
        signals_init();
        /* rootfs populating might need page-writeback */
        check_bugs();
  
        acpi_early_init(); /* before LAPIC and SMP init */
 +      sfi_init_late();
  
        ftrace_init();
  
@@@ -778,7 -782,6 +778,6 @@@ static void __init do_initcalls(void
   */
  static void __init do_basic_setup(void)
  {
-       rcu_init_sched(); /* needed by module_init stage. */
        init_workqueues();
        cpuset_init_smp();
        usermodehelper_init();
diff --combined kernel/exit.c
index 5859f598c951bde881a9e7ce1c29d19fd66b3463,bc2b1fdfc35431f40c1050de2e403a9fe1cf74ae..e61891f801238f3a386e9162f7bed8b456a4e268
@@@ -47,7 -47,7 +47,7 @@@
  #include <linux/tracehook.h>
  #include <linux/fs_struct.h>
  #include <linux/init_task.h>
 -#include <linux/perf_counter.h>
 +#include <linux/perf_event.h>
  #include <trace/events/sched.h>
  
  #include <asm/uaccess.h>
@@@ -154,8 -154,8 +154,8 @@@ static void delayed_put_task_struct(str
  {
        struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
  
 -#ifdef CONFIG_PERF_COUNTERS
 -      WARN_ON_ONCE(tsk->perf_counter_ctxp);
 +#ifdef CONFIG_PERF_EVENTS
 +      WARN_ON_ONCE(tsk->perf_event_ctxp);
  #endif
        trace_sched_process_free(tsk);
        put_task_struct(tsk);
@@@ -359,10 -359,8 +359,10 @@@ void __set_special_pids(struct pid *pid
  {
        struct task_struct *curr = current->group_leader;
  
 -      if (task_session(curr) != pid)
 +      if (task_session(curr) != pid) {
                change_pid(curr, PIDTYPE_SID, pid);
 +              proc_sid_connector(curr);
 +      }
  
        if (task_pgrp(curr) != pid)
                change_pid(curr, PIDTYPE_PGID, pid);
@@@ -947,8 -945,6 +947,8 @@@ NORET_TYPE void do_exit(long code
        if (group_dead) {
                hrtimer_cancel(&tsk->signal->real_timer);
                exit_itimers(tsk->signal);
 +              if (tsk->mm)
 +                      setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm);
        }
        acct_collect(code, group_dead);
        if (group_dead)
                disassociate_ctty(1);
  
        module_put(task_thread_info(tsk)->exec_domain->module);
 -      if (tsk->binfmt)
 -              module_put(tsk->binfmt->module);
  
        proc_exit_connector(tsk);
  
         * Flush inherited counters to the parent - before the parent
         * gets woken up by child-exit notifications.
         */
 -      perf_counter_exit_task(tsk);
 +      perf_event_exit_task(tsk);
  
        exit_notify(tsk, group_dead);
  #ifdef CONFIG_NUMA
        tsk->mempolicy = NULL;
  #endif
  #ifdef CONFIG_FUTEX
-       if (unlikely(!list_empty(&tsk->pi_state_list)))
-               exit_pi_state_list(tsk);
        if (unlikely(current->pi_state_cache))
                kfree(current->pi_state_cache);
  #endif
@@@ -1095,28 -1091,28 +1093,28 @@@ struct wait_opts 
        int __user              *wo_stat;
        struct rusage __user    *wo_rusage;
  
 +      wait_queue_t            child_wait;
        int                     notask_error;
  };
  
 -static struct pid *task_pid_type(struct task_struct *task, enum pid_type type)
 +static inline
 +struct pid *task_pid_type(struct task_struct *task, enum pid_type type)
  {
 -      struct pid *pid = NULL;
 -      if (type == PIDTYPE_PID)
 -              pid = task->pids[type].pid;
 -      else if (type < PIDTYPE_MAX)
 -              pid = task->group_leader->pids[type].pid;
 -      return pid;
 +      if (type != PIDTYPE_PID)
 +              task = task->group_leader;
 +      return task->pids[type].pid;
  }
  
 -static int eligible_child(struct wait_opts *wo, struct task_struct *p)
 +static int eligible_pid(struct wait_opts *wo, struct task_struct *p)
  {
 -      int err;
 -
 -      if (wo->wo_type < PIDTYPE_MAX) {
 -              if (task_pid_type(p, wo->wo_type) != wo->wo_pid)
 -                      return 0;
 -      }
 +      return  wo->wo_type == PIDTYPE_MAX ||
 +              task_pid_type(p, wo->wo_type) == wo->wo_pid;
 +}
  
 +static int eligible_child(struct wait_opts *wo, struct task_struct *p)
 +{
 +      if (!eligible_pid(wo, p))
 +              return 0;
        /* Wait for all children (clone and not) if __WALL is set;
         * otherwise, wait for clone children *only* if __WCLONE is
         * set; otherwise, wait for non-clone children *only*.  (Note:
            && !(wo->wo_flags & __WALL))
                return 0;
  
 -      err = security_task_wait(p);
 -      if (err)
 -              return err;
 -
        return 1;
  }
  
@@@ -1138,20 -1138,18 +1136,20 @@@ static int wait_noreap_copyout(struct w
  
        put_task_struct(p);
        infop = wo->wo_info;
 -      if (!retval)
 -              retval = put_user(SIGCHLD, &infop->si_signo);
 -      if (!retval)
 -              retval = put_user(0, &infop->si_errno);
 -      if (!retval)
 -              retval = put_user((short)why, &infop->si_code);
 -      if (!retval)
 -              retval = put_user(pid, &infop->si_pid);
 -      if (!retval)
 -              retval = put_user(uid, &infop->si_uid);
 -      if (!retval)
 -              retval = put_user(status, &infop->si_status);
 +      if (infop) {
 +              if (!retval)
 +                      retval = put_user(SIGCHLD, &infop->si_signo);
 +              if (!retval)
 +                      retval = put_user(0, &infop->si_errno);
 +              if (!retval)
 +                      retval = put_user((short)why, &infop->si_code);
 +              if (!retval)
 +                      retval = put_user(pid, &infop->si_pid);
 +              if (!retval)
 +                      retval = put_user(uid, &infop->si_uid);
 +              if (!retval)
 +                      retval = put_user(status, &infop->si_status);
 +      }
        if (!retval)
                retval = pid;
        return retval;
@@@ -1208,7 -1206,6 +1206,7 @@@ static int wait_task_zombie(struct wait
        if (likely(!traced) && likely(!task_detached(p))) {
                struct signal_struct *psig;
                struct signal_struct *sig;
 +              unsigned long maxrss;
  
                /*
                 * The resource counters for the group leader are in its
                psig->coublock +=
                        task_io_get_oublock(p) +
                        sig->oublock + sig->coublock;
 +              maxrss = max(sig->maxrss, sig->cmaxrss);
 +              if (psig->cmaxrss < maxrss)
 +                      psig->cmaxrss = maxrss;
                task_io_accounting_add(&psig->ioac, &p->ioac);
                task_io_accounting_add(&psig->ioac, &sig->ioac);
                spin_unlock_irq(&p->real_parent->sighand->siglock);
@@@ -1481,14 -1475,13 +1479,14 @@@ static int wait_task_continued(struct w
   * then ->notask_error is 0 if @p is an eligible child,
   * or another error from security_task_wait(), or still -ECHILD.
   */
 -static int wait_consider_task(struct wait_opts *wo, struct task_struct *parent,
 -                              int ptrace, struct task_struct *p)
 +static int wait_consider_task(struct wait_opts *wo, int ptrace,
 +                              struct task_struct *p)
  {
        int ret = eligible_child(wo, p);
        if (!ret)
                return ret;
  
 +      ret = security_task_wait(p);
        if (unlikely(ret < 0)) {
                /*
                 * If we have not yet seen any eligible child,
@@@ -1550,7 -1543,7 +1548,7 @@@ static int do_wait_thread(struct wait_o
                 * Do not consider detached threads.
                 */
                if (!task_detached(p)) {
 -                      int ret = wait_consider_task(wo, tsk, 0, p);
 +                      int ret = wait_consider_task(wo, 0, p);
                        if (ret)
                                return ret;
                }
@@@ -1564,7 -1557,7 +1562,7 @@@ static int ptrace_do_wait(struct wait_o
        struct task_struct *p;
  
        list_for_each_entry(p, &tsk->ptraced, ptrace_entry) {
 -              int ret = wait_consider_task(wo, tsk, 1, p);
 +              int ret = wait_consider_task(wo, 1, p);
                if (ret)
                        return ret;
        }
        return 0;
  }
  
 +static int child_wait_callback(wait_queue_t *wait, unsigned mode,
 +                              int sync, void *key)
 +{
 +      struct wait_opts *wo = container_of(wait, struct wait_opts,
 +                                              child_wait);
 +      struct task_struct *p = key;
 +
 +      if (!eligible_pid(wo, p))
 +              return 0;
 +
 +      if ((wo->wo_flags & __WNOTHREAD) && wait->private != p->parent)
 +              return 0;
 +
 +      return default_wake_function(wait, mode, sync, key);
 +}
 +
 +void __wake_up_parent(struct task_struct *p, struct task_struct *parent)
 +{
 +      __wake_up_sync_key(&parent->signal->wait_chldexit,
 +                              TASK_INTERRUPTIBLE, 1, p);
 +}
 +
  static long do_wait(struct wait_opts *wo)
  {
 -      DECLARE_WAITQUEUE(wait, current);
        struct task_struct *tsk;
        int retval;
  
        trace_sched_process_wait(wo->wo_pid);
  
 -      add_wait_queue(&current->signal->wait_chldexit,&wait);
 +      init_waitqueue_func_entry(&wo->child_wait, child_wait_callback);
 +      wo->child_wait.private = current;
 +      add_wait_queue(&current->signal->wait_chldexit, &wo->child_wait);
  repeat:
        /*
         * If there is nothing that can match our critiera just get out.
@@@ -1644,7 -1614,32 +1642,7 @@@ notask
        }
  end:
        __set_current_state(TASK_RUNNING);
 -      remove_wait_queue(&current->signal->wait_chldexit,&wait);
 -      if (wo->wo_info) {
 -              struct siginfo __user *infop = wo->wo_info;
 -
 -              if (retval > 0)
 -                      retval = 0;
 -              else {
 -                      /*
 -                       * For a WNOHANG return, clear out all the fields
 -                       * we would set so the user can easily tell the
 -                       * difference.
 -                       */
 -                      if (!retval)
 -                              retval = put_user(0, &infop->si_signo);
 -                      if (!retval)
 -                              retval = put_user(0, &infop->si_errno);
 -                      if (!retval)
 -                              retval = put_user(0, &infop->si_code);
 -                      if (!retval)
 -                              retval = put_user(0, &infop->si_pid);
 -                      if (!retval)
 -                              retval = put_user(0, &infop->si_uid);
 -                      if (!retval)
 -                              retval = put_user(0, &infop->si_status);
 -              }
 -      }
 +      remove_wait_queue(&current->signal->wait_chldexit, &wo->child_wait);
        return retval;
  }
  
@@@ -1689,29 -1684,6 +1687,29 @@@ SYSCALL_DEFINE5(waitid, int, which, pid
        wo.wo_stat      = NULL;
        wo.wo_rusage    = ru;
        ret = do_wait(&wo);
 +
 +      if (ret > 0) {
 +              ret = 0;
 +      } else if (infop) {
 +              /*
 +               * For a WNOHANG return, clear out all the fields
 +               * we would set so the user can easily tell the
 +               * difference.
 +               */
 +              if (!ret)
 +                      ret = put_user(0, &infop->si_signo);
 +              if (!ret)
 +                      ret = put_user(0, &infop->si_errno);
 +              if (!ret)
 +                      ret = put_user(0, &infop->si_code);
 +              if (!ret)
 +                      ret = put_user(0, &infop->si_pid);
 +              if (!ret)
 +                      ret = put_user(0, &infop->si_uid);
 +              if (!ret)
 +                      ret = put_user(0, &infop->si_status);
 +      }
 +
        put_pid(pid);
  
        /* avoid REGPARM breakage on x86: */
diff --combined kernel/fork.c
index 266c6af6ef1b089a1c64ee96428bd153db39217c,341965b0ab1c5cbeab57482a503c6d3f33debd2f..4c20fff8c13a2caae048bc9f2b719bdaf5582dcf
@@@ -49,7 -49,6 +49,7 @@@
  #include <linux/ftrace.h>
  #include <linux/profile.h>
  #include <linux/rmap.h>
 +#include <linux/ksm.h>
  #include <linux/acct.h>
  #include <linux/tsacct_kern.h>
  #include <linux/cn_proc.h>
@@@ -62,8 -61,7 +62,8 @@@
  #include <linux/blkdev.h>
  #include <linux/fs_struct.h>
  #include <linux/magic.h>
 -#include <linux/perf_counter.h>
 +#include <linux/perf_event.h>
 +#include <linux/posix-timers.h>
  
  #include <asm/pgtable.h>
  #include <asm/pgalloc.h>
@@@ -138,17 -136,9 +138,17 @@@ struct kmem_cache *vm_area_cachep
  /* SLAB cache for mm_struct structures (tsk->mm) */
  static struct kmem_cache *mm_cachep;
  
 +static void account_kernel_stack(struct thread_info *ti, int account)
 +{
 +      struct zone *zone = page_zone(virt_to_page(ti));
 +
 +      mod_zone_page_state(zone, NR_KERNEL_STACK, account);
 +}
 +
  void free_task(struct task_struct *tsk)
  {
        prop_local_destroy_single(&tsk->dirties);
 +      account_kernel_stack(tsk->stack, -1);
        free_thread_info(tsk->stack);
        rt_mutex_debug_task_free(tsk);
        ftrace_graph_exit_task(tsk);
@@@ -263,9 -253,6 +263,9 @@@ static struct task_struct *dup_task_str
        tsk->btrace_seq = 0;
  #endif
        tsk->splice_pipe = NULL;
 +
 +      account_kernel_stack(ti, 1);
 +
        return tsk;
  
  out:
@@@ -301,9 -288,6 +301,9 @@@ static int dup_mmap(struct mm_struct *m
        rb_link = &mm->mm_rb.rb_node;
        rb_parent = NULL;
        pprev = &mm->mmap;
 +      retval = ksm_fork(mm, oldmm);
 +      if (retval)
 +              goto out;
  
        for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
                struct file *file;
@@@ -434,30 -418,22 +434,30 @@@ __setup("coredump_filter=", coredump_fi
  
  #include <linux/init_task.h>
  
 +static void mm_init_aio(struct mm_struct *mm)
 +{
 +#ifdef CONFIG_AIO
 +      spin_lock_init(&mm->ioctx_lock);
 +      INIT_HLIST_HEAD(&mm->ioctx_list);
 +#endif
 +}
 +
  static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p)
  {
        atomic_set(&mm->mm_users, 1);
        atomic_set(&mm->mm_count, 1);
        init_rwsem(&mm->mmap_sem);
        INIT_LIST_HEAD(&mm->mmlist);
 -      mm->flags = (current->mm) ? current->mm->flags : default_dump_filter;
 +      mm->flags = (current->mm) ?
 +              (current->mm->flags & MMF_INIT_MASK) : default_dump_filter;
        mm->core_state = NULL;
        mm->nr_ptes = 0;
        set_mm_counter(mm, file_rss, 0);
        set_mm_counter(mm, anon_rss, 0);
        spin_lock_init(&mm->page_table_lock);
 -      spin_lock_init(&mm->ioctx_lock);
 -      INIT_HLIST_HEAD(&mm->ioctx_list);
        mm->free_area_cache = TASK_UNMAPPED_BASE;
        mm->cached_hole_size = ~0UL;
 +      mm_init_aio(mm);
        mm_init_owner(mm, p);
  
        if (likely(!mm_alloc_pgd(mm))) {
@@@ -509,7 -485,6 +509,7 @@@ void mmput(struct mm_struct *mm
  
        if (atomic_dec_and_test(&mm->mm_users)) {
                exit_aio(mm);
 +              ksm_exit(mm);
                exit_mmap(mm);
                set_mm_exe_file(mm, NULL);
                if (!list_empty(&mm->mmlist)) {
                        spin_unlock(&mmlist_lock);
                }
                put_swap_token(mm);
 +              if (mm->binfmt)
 +                      module_put(mm->binfmt->module);
                mmdrop(mm);
        }
  }
@@@ -570,12 -543,18 +570,18 @@@ void mm_release(struct task_struct *tsk
  
        /* Get rid of any futexes when releasing the mm */
  #ifdef CONFIG_FUTEX
-       if (unlikely(tsk->robust_list))
+       if (unlikely(tsk->robust_list)) {
                exit_robust_list(tsk);
+               tsk->robust_list = NULL;
+       }
  #ifdef CONFIG_COMPAT
-       if (unlikely(tsk->compat_robust_list))
+       if (unlikely(tsk->compat_robust_list)) {
                compat_exit_robust_list(tsk);
+               tsk->compat_robust_list = NULL;
+       }
  #endif
+       if (unlikely(!list_empty(&tsk->pi_state_list)))
+               exit_pi_state_list(tsk);
  #endif
  
        /* Get rid of any cached register state */
@@@ -645,14 -624,9 +651,14 @@@ struct mm_struct *dup_mm(struct task_st
        mm->hiwater_rss = get_mm_rss(mm);
        mm->hiwater_vm = mm->total_vm;
  
 +      if (mm->binfmt && !try_module_get(mm->binfmt->module))
 +              goto free_pt;
 +
        return mm;
  
  free_pt:
 +      /* don't put binfmt in mmput, we haven't got module yet */
 +      mm->binfmt = NULL;
        mmput(mm);
  
  fail_nomem:
@@@ -820,10 -794,10 +826,10 @@@ static void posix_cpu_timers_init_group
        thread_group_cputime_init(sig);
  
        /* Expiration times and increments. */
 -      sig->it_virt_expires = cputime_zero;
 -      sig->it_virt_incr = cputime_zero;
 -      sig->it_prof_expires = cputime_zero;
 -      sig->it_prof_incr = cputime_zero;
 +      sig->it[CPUCLOCK_PROF].expires = cputime_zero;
 +      sig->it[CPUCLOCK_PROF].incr = cputime_zero;
 +      sig->it[CPUCLOCK_VIRT].expires = cputime_zero;
 +      sig->it[CPUCLOCK_VIRT].incr = cputime_zero;
  
        /* Cached expiration times. */
        sig->cputime_expires.prof_exp = cputime_zero;
@@@ -881,7 -855,6 +887,7 @@@ static int copy_signal(unsigned long cl
        sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0;
        sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0;
        sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0;
 +      sig->maxrss = sig->cmaxrss = 0;
        task_io_accounting_init(&sig->ioac);
        sig->sum_sched_runtime = 0;
        taskstats_tgid_init(sig);
  
        tty_audit_fork(sig);
  
 +      sig->oom_adj = current->signal->oom_adj;
 +
        return 0;
  }
  
@@@ -993,16 -964,6 +999,16 @@@ static struct task_struct *copy_process
        if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
                return ERR_PTR(-EINVAL);
  
 +      /*
 +       * Siblings of global init remain as zombies on exit since they are
 +       * not reaped by their parent (swapper). To solve this and to avoid
 +       * multi-rooted process trees, prevent global and container-inits
 +       * from creating siblings.
 +       */
 +      if ((clone_flags & CLONE_PARENT) &&
 +                              current->signal->flags & SIGNAL_UNKILLABLE)
 +              return ERR_PTR(-EINVAL);
 +
        retval = security_task_create(clone_flags);
        if (retval)
                goto fork_out;
        if (!try_module_get(task_thread_info(p)->exec_domain->module))
                goto bad_fork_cleanup_count;
  
 -      if (p->binfmt && !try_module_get(p->binfmt->module))
 -              goto bad_fork_cleanup_put_domain;
 -
        p->did_exec = 0;
        delayacct_tsk_init(p);  /* Must remain after dup_task_struct() */
        copy_flags(clone_flags, p);
  
        p->bts = NULL;
  
 +      p->stack_start = stack_start;
 +
        /* Perform scheduler related setup. Assign this task to a CPU. */
        sched_fork(p, clone_flags);
  
 -      retval = perf_counter_init_task(p);
 +      retval = perf_event_init_task(p);
        if (retval)
                goto bad_fork_cleanup_policy;
  
        write_unlock_irq(&tasklist_lock);
        proc_fork_connector(p);
        cgroup_post_fork(p);
 -      perf_counter_fork(p);
 +      perf_event_fork(p);
        return p;
  
  bad_fork_free_pid:
@@@ -1324,13 -1286,16 +1330,13 @@@ bad_fork_cleanup_semundo
  bad_fork_cleanup_audit:
        audit_free(p);
  bad_fork_cleanup_policy:
 -      perf_counter_free_task(p);
 +      perf_event_free_task(p);
  #ifdef CONFIG_NUMA
        mpol_put(p->mempolicy);
  bad_fork_cleanup_cgroup:
  #endif
        cgroup_exit(p, cgroup_callbacks_done);
        delayacct_tsk_free(p);
 -      if (p->binfmt)
 -              module_put(p->binfmt->module);
 -bad_fork_cleanup_put_domain:
        module_put(task_thread_info(p)->exec_domain->module);
  bad_fork_cleanup_count:
        atomic_dec(&p->cred->user->processes);
diff --combined kernel/futex.c
index b911adceb2c488523d0c2809049878dfde1eed27,c3bb2fce11ba2cb10bd69550f8cfc48b9638623c..4949d336d88d21edf82e51fceaa6b3960fad8790
@@@ -916,8 -916,8 +916,8 @@@ retry
        hb1 = hash_futex(&key1);
        hb2 = hash_futex(&key2);
  
-       double_lock_hb(hb1, hb2);
  retry_private:
+       double_lock_hb(hb1, hb2);
        op_ret = futex_atomic_op_inuser(op, uaddr2);
        if (unlikely(op_ret < 0)) {
  
  static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
                                struct hrtimer_sleeper *timeout)
  {
 +      /*
 +       * The task state is guaranteed to be set before another task can
 +       * wake it. set_current_state() is implemented using set_mb() and
 +       * queue_me() calls spin_unlock() upon completion, both serializing
 +       * access to the hash list and forcing another memory barrier.
 +       */
        set_current_state(TASK_INTERRUPTIBLE);
        queue_me(q, hb);
  
@@@ -2117,7 -2111,6 +2117,6 @@@ int handle_early_requeue_pi_wakeup(stru
                 * Unqueue the futex_q and determine which it was.
                 */
                plist_del(&q->list, &q->list.plist);
-               drop_futex_key_refs(&q->key);
  
                if (timeout && !timeout->task)
                        ret = -ETIMEDOUT;
diff --combined kernel/panic.c
index bcdef26e3332f7e197b7fd8740f697010a257bb4,bc4dcb6a389b0c7cdbeb686bc1898630c13e87b7..96b45d0b4ba50853030c295f5b0dbcf9bf33cc88
@@@ -90,6 -90,8 +90,8 @@@ NORET_TYPE void panic(const char * fmt
  
        atomic_notifier_call_chain(&panic_notifier_list, 0, buf);
  
+       bust_spinlocks(0);
        if (!panic_blink)
                panic_blink = no_blink;
  
                mdelay(1);
                i++;
        }
-       bust_spinlocks(0);
  }
  
  EXPORT_SYMBOL(panic);
@@@ -177,7 -178,7 +178,7 @@@ static const struct tnt tnts[] = 
   *  'W' - Taint on warning.
   *  'C' - modules from drivers/staging are loaded.
   *
 - *    The string is overwritten by the next call to print_taint().
 + *    The string is overwritten by the next call to print_tainted().
   */
  const char *print_tainted(void)
  {