]> git.karo-electronics.de Git - mv-sheeva.git/commitdiff
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 7 Sep 2010 21:08:17 +0000 (14:08 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 7 Sep 2010 21:08:17 +0000 (14:08 -0700)
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq:
  workqueue: use zalloc_cpumask_var() for gcwq->mayday_mask
  workqueue: fix GCWQ_DISASSOCIATED initialization
  workqueue: Add a workqueue chapter to the tracepoint docbook
  workqueue: fix cwq->nr_active underflow
  workqueue: improve destroy_workqueue() debuggability
  workqueue: mark lock acquisition on worker_maybe_bind_and_lock()
  workqueue: annotate lock context change
  workqueue: free rescuer on destroy_workqueue

1  2 
kernel/workqueue.c

diff --combined kernel/workqueue.c
index 8bd600c020e5cdf5f2454681bc2e89fcc99c47d0,785542976b0079b92c9dce5c364d9a305dd9a07d..727f24e563aef326b8eba951d2a31a9aa864d32b
@@@ -35,9 -35,6 +35,9 @@@
  #include <linux/lockdep.h>
  #include <linux/idr.h>
  
 +#define CREATE_TRACE_POINTS
 +#include <trace/events/workqueue.h>
 +
  #include "workqueue_sched.h"
  
  enum {
@@@ -90,7 -87,8 +90,8 @@@
  /*
   * Structure fields follow one of the following exclusion rules.
   *
-  * I: Set during initialization and read-only afterwards.
+  * I: Modifiable by initialization/destruction paths and read-only for
+  *    everyone else.
   *
   * P: Preemption protected.  Disabling preemption is enough and should
   *    only be modified and accessed from the local cpu.
@@@ -198,7 -196,7 +199,7 @@@ typedef cpumask_var_t mayday_mask_t
        cpumask_test_and_set_cpu((cpu), (mask))
  #define mayday_clear_cpu(cpu, mask)           cpumask_clear_cpu((cpu), (mask))
  #define for_each_mayday_cpu(cpu, mask)                for_each_cpu((cpu), (mask))
- #define alloc_mayday_mask(maskp, gfp)         alloc_cpumask_var((maskp), (gfp))
+ #define alloc_mayday_mask(maskp, gfp)         zalloc_cpumask_var((maskp), (gfp))
  #define free_mayday_mask(mask)                        free_cpumask_var((mask))
  #else
  typedef unsigned long mayday_mask_t;
@@@ -943,10 -941,14 +944,14 @@@ static void __queue_work(unsigned int c
        struct global_cwq *gcwq;
        struct cpu_workqueue_struct *cwq;
        struct list_head *worklist;
+       unsigned int work_flags;
        unsigned long flags;
  
        debug_work_activate(work);
  
+       if (WARN_ON_ONCE(wq->flags & WQ_DYING))
+               return;
        /* determine gcwq to use */
        if (!(wq->flags & WQ_UNBOUND)) {
                struct global_cwq *last_gcwq;
        BUG_ON(!list_empty(&work->entry));
  
        cwq->nr_in_flight[cwq->work_color]++;
+       work_flags = work_color_to_flags(cwq->work_color);
  
        if (likely(cwq->nr_active < cwq->max_active)) {
                cwq->nr_active++;
                worklist = gcwq_determine_ins_pos(gcwq, cwq);
-       } else
+       } else {
+               work_flags |= WORK_STRUCT_DELAYED;
                worklist = &cwq->delayed_works;
+       }
  
-       insert_work(cwq, work, worklist, work_color_to_flags(cwq->work_color));
+       insert_work(cwq, work, worklist, work_flags);
  
        spin_unlock_irqrestore(&gcwq->lock, flags);
  }
@@@ -1215,6 -1220,7 +1223,7 @@@ static void worker_leave_idle(struct wo
   * bound), %false if offline.
   */
  static bool worker_maybe_bind_and_lock(struct worker *worker)
+ __acquires(&gcwq->lock)
  {
        struct global_cwq *gcwq = worker->gcwq;
        struct task_struct *task = worker->task;
@@@ -1488,6 -1494,8 +1497,8 @@@ static void gcwq_mayday_timeout(unsigne
   * otherwise.
   */
  static bool maybe_create_worker(struct global_cwq *gcwq)
+ __releases(&gcwq->lock)
+ __acquires(&gcwq->lock)
  {
        if (!need_to_create_worker(gcwq))
                return false;
@@@ -1662,6 -1670,7 +1673,7 @@@ static void cwq_activate_first_delayed(
        struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq);
  
        move_linked_works(work, pos, NULL);
+       __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
        cwq->nr_active++;
  }
  
   * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
   * @cwq: cwq of interest
   * @color: color of work which left the queue
+  * @delayed: for a delayed work
   *
   * A work either has completed or is removed from pending queue,
   * decrement nr_in_flight of its cwq and handle workqueue flushing.
   * CONTEXT:
   * spin_lock_irq(gcwq->lock).
   */
- static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
+ static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color,
+                                bool delayed)
  {
        /* ignore uncolored works */
        if (color == WORK_NO_COLOR)
                return;
  
        cwq->nr_in_flight[color]--;
-       cwq->nr_active--;
  
-       if (!list_empty(&cwq->delayed_works)) {
-               /* one down, submit a delayed one */
-               if (cwq->nr_active < cwq->max_active)
-                       cwq_activate_first_delayed(cwq);
+       if (!delayed) {
+               cwq->nr_active--;
+               if (!list_empty(&cwq->delayed_works)) {
+                       /* one down, submit a delayed one */
+                       if (cwq->nr_active < cwq->max_active)
+                               cwq_activate_first_delayed(cwq);
+               }
        }
  
        /* is flush in progress and are we at the flushing tip? */
   * spin_lock_irq(gcwq->lock) which is released and regrabbed.
   */
  static void process_one_work(struct worker *worker, struct work_struct *work)
+ __releases(&gcwq->lock)
+ __acquires(&gcwq->lock)
  {
        struct cpu_workqueue_struct *cwq = get_work_cwq(work);
        struct global_cwq *gcwq = cwq->gcwq;
        work_clear_pending(work);
        lock_map_acquire(&cwq->wq->lockdep_map);
        lock_map_acquire(&lockdep_map);
 +      trace_workqueue_execute_start(work);
        f(work);
 +      /*
 +       * While we must be careful to not use "work" after this, the trace
 +       * point will only record its address.
 +       */
 +      trace_workqueue_execute_end(work);
        lock_map_release(&lockdep_map);
        lock_map_release(&cwq->wq->lockdep_map);
  
        hlist_del_init(&worker->hentry);
        worker->current_work = NULL;
        worker->current_cwq = NULL;
-       cwq_dec_nr_in_flight(cwq, work_color);
+       cwq_dec_nr_in_flight(cwq, work_color, false);
  }
  
  /**
@@@ -2388,7 -2397,8 +2406,8 @@@ static int try_to_grab_pending(struct w
                        debug_work_deactivate(work);
                        list_del_init(&work->entry);
                        cwq_dec_nr_in_flight(get_work_cwq(work),
-                                            get_work_color(work));
+                               get_work_color(work),
+                               *work_data_bits(work) & WORK_STRUCT_DELAYED);
                        ret = 1;
                }
        }
@@@ -2791,7 -2801,6 +2810,6 @@@ struct workqueue_struct *__alloc_workqu
                if (IS_ERR(rescuer->task))
                        goto err;
  
-               wq->rescuer = rescuer;
                rescuer->task->flags |= PF_THREAD_BOUND;
                wake_up_process(rescuer->task);
        }
@@@ -2833,6 -2842,7 +2851,7 @@@ void destroy_workqueue(struct workqueue
  {
        unsigned int cpu;
  
+       wq->flags |= WQ_DYING;
        flush_workqueue(wq);
  
        /*
        if (wq->flags & WQ_RESCUER) {
                kthread_stop(wq->rescuer->task);
                free_mayday_mask(wq->mayday_mask);
+               kfree(wq->rescuer);
        }
  
        free_cwqs(wq);
@@@ -3239,6 -3250,8 +3259,8 @@@ static int __cpuinit trustee_thread(voi
   * multiple times.  To be used by cpu_callback.
   */
  static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state)
+ __releases(&gcwq->lock)
+ __acquires(&gcwq->lock)
  {
        if (!(gcwq->trustee_state == state ||
              gcwq->trustee_state == TRUSTEE_DONE)) {
@@@ -3545,8 -3558,7 +3567,7 @@@ static int __init init_workqueues(void
                spin_lock_init(&gcwq->lock);
                INIT_LIST_HEAD(&gcwq->worklist);
                gcwq->cpu = cpu;
-               if (cpu == WORK_CPU_UNBOUND)
-                       gcwq->flags |= GCWQ_DISASSOCIATED;
+               gcwq->flags |= GCWQ_DISASSOCIATED;
  
                INIT_LIST_HEAD(&gcwq->idle_list);
                for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)
                struct global_cwq *gcwq = get_gcwq(cpu);
                struct worker *worker;
  
+               if (cpu != WORK_CPU_UNBOUND)
+                       gcwq->flags &= ~GCWQ_DISASSOCIATED;
                worker = create_worker(gcwq, true);
                BUG_ON(!worker);
                spin_lock_irq(&gcwq->lock);