]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge branch 'for-3.10' into for-next
authorTejun Heo <tj@kernel.org>
Tue, 19 Mar 2013 20:49:34 +0000 (13:49 -0700)
committerTejun Heo <tj@kernel.org>
Tue, 19 Mar 2013 20:49:34 +0000 (13:49 -0700)
1  2 
kernel/workqueue.c

index 833a1526174a60a6eb89ab7920175909420dec81,e38d035bf671348511e50cfc53de0ebef8e6bc5c..36ff4f847c3c81ac9d0c916f483d2df50f1b2b09
@@@ -4137,36 -4043,127 +4043,133 @@@ static void wq_unbind_fn(struct work_st
  
                spin_unlock_irq(&pool->lock);
                mutex_unlock(&pool->manager_mutex);
 -      }
  
 -      /*
 -       * Call schedule() so that we cross rq->lock and thus can guarantee
 -       * sched callbacks see the %WORKER_UNBOUND flag.  This is necessary
 -       * as scheduler callbacks may be invoked from other cpus.
 -       */
 -      schedule();
 +              /*
 +               * Call schedule() so that we cross rq->lock and thus can
 +               * guarantee sched callbacks see the %WORKER_UNBOUND flag.
 +               * This is necessary as scheduler callbacks may be invoked
 +               * from other cpus.
 +               */
 +              schedule();
  
 -      /*
 -       * Sched callbacks are disabled now.  Zap nr_running.  After this,
 -       * nr_running stays zero and need_more_worker() and keep_working()
 -       * are always true as long as the worklist is not empty.  Pools on
 -       * @cpu now behave as unbound (in terms of concurrency management)
 -       * pools which are served by workers tied to the CPU.
 -       *
 -       * On return from this function, the current worker would trigger
 -       * unbound chain execution of pending work items if other workers
 -       * didn't already.
 -       */
 -      for_each_cpu_worker_pool(pool, cpu)
 +              /*
 +               * Sched callbacks are disabled now.  Zap nr_running.
 +               * After this, nr_running stays zero and need_more_worker()
 +               * and keep_working() are always true as long as the
 +               * worklist is not empty.  This pool now behaves as an
 +               * unbound (in terms of concurrency management) pool which
 +               * are served by workers tied to the pool.
 +               */
                atomic_set(&pool->nr_running, 0);
 +
 +              /*
 +               * With concurrency management just turned off, a busy
 +               * worker blocking could lead to lengthy stalls.  Kick off
 +               * unbound chain execution of currently pending work items.
 +               */
 +              spin_lock_irq(&pool->lock);
 +              wake_up_worker(pool);
 +              spin_unlock_irq(&pool->lock);
 +      }
  }
  
+ /**
+  * rebind_workers - rebind all workers of a pool to the associated CPU
+  * @pool: pool of interest
+  *
+  * @pool->cpu is coming online.  Rebind all workers to the CPU.
+  */
+ static void rebind_workers(struct worker_pool *pool)
+ {
+       struct worker *worker;
+       int wi;
+       lockdep_assert_held(&pool->manager_mutex);
+       /*
+        * Restore CPU affinity of all workers.  As all idle workers should
+        * be on the run-queue of the associated CPU before any local
+        * wake-ups for concurrency management happen, restore CPU affinty
+        * of all workers first and then clear UNBOUND.  As we're called
+        * from CPU_ONLINE, the following shouldn't fail.
+        */
+       for_each_pool_worker(worker, wi, pool)
+               WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
+                                                 pool->attrs->cpumask) < 0);
+       spin_lock_irq(&pool->lock);
+       for_each_pool_worker(worker, wi, pool) {
+               unsigned int worker_flags = worker->flags;
+               /*
+                * A bound idle worker should actually be on the runqueue
+                * of the associated CPU for local wake-ups targeting it to
+                * work.  Kick all idle workers so that they migrate to the
+                * associated CPU.  Doing this in the same loop as
+                * replacing UNBOUND with REBOUND is safe as no worker will
+                * be bound before @pool->lock is released.
+                */
+               if (worker_flags & WORKER_IDLE)
+                       wake_up_process(worker->task);
+               /*
+                * We want to clear UNBOUND but can't directly call
+                * worker_clr_flags() or adjust nr_running.  Atomically
+                * replace UNBOUND with another NOT_RUNNING flag REBOUND.
+                * @worker will clear REBOUND using worker_clr_flags() when
+                * it initiates the next execution cycle thus restoring
+                * concurrency management.  Note that when or whether
+                * @worker clears REBOUND doesn't affect correctness.
+                *
+                * ACCESS_ONCE() is necessary because @worker->flags may be
+                * tested without holding any lock in
+                * wq_worker_waking_up().  Without it, NOT_RUNNING test may
+                * fail incorrectly leading to premature concurrency
+                * management operations.
+                */
+               WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
+               worker_flags |= WORKER_REBOUND;
+               worker_flags &= ~WORKER_UNBOUND;
+               ACCESS_ONCE(worker->flags) = worker_flags;
+       }
+       spin_unlock_irq(&pool->lock);
+ }
+ /**
+  * restore_unbound_workers_cpumask - restore cpumask of unbound workers
+  * @pool: unbound pool of interest
+  * @cpu: the CPU which is coming up
+  *
+  * An unbound pool may end up with a cpumask which doesn't have any online
+  * CPUs.  When a worker of such pool get scheduled, the scheduler resets
+  * its cpus_allowed.  If @cpu is in @pool's cpumask which didn't have any
+  * online CPU before, cpus_allowed of all its workers should be restored.
+  */
+ static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu)
+ {
+       static cpumask_t cpumask;
+       struct worker *worker;
+       int wi;
+       lockdep_assert_held(&pool->manager_mutex);
+       /* is @cpu allowed for @pool? */
+       if (!cpumask_test_cpu(cpu, pool->attrs->cpumask))
+               return;
+       /* is @cpu the only online CPU? */
+       cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask);
+       if (cpumask_weight(&cpumask) != 1)
+               return;
+       /* as we're called from CPU_ONLINE, the following shouldn't fail */
+       for_each_pool_worker(worker, wi, pool)
+               WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
+                                                 pool->attrs->cpumask) < 0);
+ }
  /*
   * Workqueues should be brought up before normal priority CPU notifiers.
   * This will be registered high priority CPU notifier.