2 * linux/kernel/workqueue.c
4 * Generic mechanism for defining kernel helper threads for running
5 * arbitrary tasks in process context.
7 * Started by Ingo Molnar, Copyright (C) 2002
9 * Derived from the taskqueue/keventd code by:
11 * David Woodhouse <dwmw2@infradead.org>
12 * Andrew Morton <andrewm@uow.edu.au>
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
16 * Made to use alloc_percpu by Christoph Lameter <clameter@sgi.com>.
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/init.h>
23 #include <linux/signal.h>
24 #include <linux/completion.h>
25 #include <linux/workqueue.h>
26 #include <linux/slab.h>
27 #include <linux/cpu.h>
28 #include <linux/notifier.h>
29 #include <linux/kthread.h>
30 #include <linux/hardirq.h>
31 #include <linux/mempolicy.h>
32 #include <linux/freezer.h>
33 #include <linux/kallsyms.h>
34 #include <linux/debug_locks.h>
37 * The per-CPU workqueue (if single thread, we always use the first
40 struct cpu_workqueue_struct {
44 struct list_head worklist;
45 wait_queue_head_t more_work;
46 struct work_struct *current_work;
48 struct workqueue_struct *wq;
49 struct task_struct *thread;
52 int run_depth; /* Detect run_workqueue() recursion depth */
53 } ____cacheline_aligned;
56 * The externally visible workqueue abstraction is an array of
59 struct workqueue_struct {
60 struct cpu_workqueue_struct *cpu_wq;
62 struct list_head list; /* Empty if single thread */
63 int freezeable; /* Freeze threads during suspend */
66 /* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
67 threads to each one as cpus come/go. */
68 static DEFINE_MUTEX(workqueue_mutex);
69 static LIST_HEAD(workqueues);
71 static int singlethread_cpu __read_mostly;
72 /* optimization, we could use cpu_possible_map */
73 static cpumask_t cpu_populated_map __read_mostly;
75 /* If it's single threaded, it isn't in the list of workqueues. */
76 static inline int is_single_threaded(struct workqueue_struct *wq)
78 return list_empty(&wq->list);
82 * Set the workqueue on which a work item is to be run
83 * - Must *only* be called if the pending flag is set
85 static inline void set_wq_data(struct work_struct *work, void *wq)
89 BUG_ON(!work_pending(work));
91 new = (unsigned long) wq | (1UL << WORK_STRUCT_PENDING);
92 new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
93 atomic_long_set(&work->data, new);
96 static inline void *get_wq_data(struct work_struct *work)
98 return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
101 static void insert_work(struct cpu_workqueue_struct *cwq,
102 struct work_struct *work, int tail)
104 set_wq_data(work, cwq);
106 list_add_tail(&work->entry, &cwq->worklist);
108 list_add(&work->entry, &cwq->worklist);
109 wake_up(&cwq->more_work);
112 /* Preempt must be disabled. */
113 static void __queue_work(struct cpu_workqueue_struct *cwq,
114 struct work_struct *work)
118 spin_lock_irqsave(&cwq->lock, flags);
119 insert_work(cwq, work, 1);
120 spin_unlock_irqrestore(&cwq->lock, flags);
124 * queue_work - queue work on a workqueue
125 * @wq: workqueue to use
126 * @work: work to queue
128 * Returns 0 if @work was already on a queue, non-zero otherwise.
130 * We queue the work to the CPU it was submitted, but there is no
131 * guarantee that it will be processed by that CPU.
133 int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
135 int ret = 0, cpu = get_cpu();
137 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
138 if (unlikely(is_single_threaded(wq)))
139 cpu = singlethread_cpu;
140 BUG_ON(!list_empty(&work->entry));
141 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
147 EXPORT_SYMBOL_GPL(queue_work);
149 void delayed_work_timer_fn(unsigned long __data)
151 struct delayed_work *dwork = (struct delayed_work *)__data;
152 struct workqueue_struct *wq = get_wq_data(&dwork->work);
153 int cpu = smp_processor_id();
155 if (unlikely(is_single_threaded(wq)))
156 cpu = singlethread_cpu;
158 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), &dwork->work);
162 * queue_delayed_work - queue work on a workqueue after delay
163 * @wq: workqueue to use
164 * @dwork: delayable work to queue
165 * @delay: number of jiffies to wait before queueing
167 * Returns 0 if @work was already on a queue, non-zero otherwise.
169 int fastcall queue_delayed_work(struct workqueue_struct *wq,
170 struct delayed_work *dwork, unsigned long delay)
173 struct timer_list *timer = &dwork->timer;
174 struct work_struct *work = &dwork->work;
176 timer_stats_timer_set_start_info(timer);
178 return queue_work(wq, work);
180 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
181 BUG_ON(timer_pending(timer));
182 BUG_ON(!list_empty(&work->entry));
184 /* This stores wq for the moment, for the timer_fn */
185 set_wq_data(work, wq);
186 timer->expires = jiffies + delay;
187 timer->data = (unsigned long)dwork;
188 timer->function = delayed_work_timer_fn;
194 EXPORT_SYMBOL_GPL(queue_delayed_work);
197 * queue_delayed_work_on - queue work on specific CPU after delay
198 * @cpu: CPU number to execute work on
199 * @wq: workqueue to use
200 * @dwork: work to queue
201 * @delay: number of jiffies to wait before queueing
203 * Returns 0 if @work was already on a queue, non-zero otherwise.
205 int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
206 struct delayed_work *dwork, unsigned long delay)
209 struct timer_list *timer = &dwork->timer;
210 struct work_struct *work = &dwork->work;
212 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
213 BUG_ON(timer_pending(timer));
214 BUG_ON(!list_empty(&work->entry));
216 /* This stores wq for the moment, for the timer_fn */
217 set_wq_data(work, wq);
218 timer->expires = jiffies + delay;
219 timer->data = (unsigned long)dwork;
220 timer->function = delayed_work_timer_fn;
221 add_timer_on(timer, cpu);
226 EXPORT_SYMBOL_GPL(queue_delayed_work_on);
228 static void run_workqueue(struct cpu_workqueue_struct *cwq)
230 spin_lock_irq(&cwq->lock);
232 if (cwq->run_depth > 3) {
233 /* morton gets to eat his hat */
234 printk("%s: recursion depth exceeded: %d\n",
235 __FUNCTION__, cwq->run_depth);
238 while (!list_empty(&cwq->worklist)) {
239 struct work_struct *work = list_entry(cwq->worklist.next,
240 struct work_struct, entry);
241 work_func_t f = work->func;
243 cwq->current_work = work;
244 list_del_init(cwq->worklist.next);
245 spin_unlock_irq(&cwq->lock);
247 BUG_ON(get_wq_data(work) != cwq);
248 if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work)))
252 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
253 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
255 current->comm, preempt_count(),
257 printk(KERN_ERR " last function: ");
258 print_symbol("%s\n", (unsigned long)f);
259 debug_show_held_locks(current);
263 spin_lock_irq(&cwq->lock);
264 cwq->current_work = NULL;
267 spin_unlock_irq(&cwq->lock);
271 * NOTE: the caller must not touch *cwq if this func returns true
273 static int cwq_should_stop(struct cpu_workqueue_struct *cwq)
275 int should_stop = cwq->should_stop;
277 if (unlikely(should_stop)) {
278 spin_lock_irq(&cwq->lock);
279 should_stop = cwq->should_stop && list_empty(&cwq->worklist);
282 spin_unlock_irq(&cwq->lock);
288 static int worker_thread(void *__cwq)
290 struct cpu_workqueue_struct *cwq = __cwq;
292 struct k_sigaction sa;
295 if (!cwq->wq->freezeable)
296 current->flags |= PF_NOFREEZE;
298 set_user_nice(current, -5);
300 /* Block and flush all signals */
301 sigfillset(&blocked);
302 sigprocmask(SIG_BLOCK, &blocked, NULL);
303 flush_signals(current);
306 * We inherited MPOL_INTERLEAVE from the booting kernel.
307 * Set MPOL_DEFAULT to insure node local allocations.
309 numa_default_policy();
311 /* SIG_IGN makes children autoreap: see do_notify_parent(). */
312 sa.sa.sa_handler = SIG_IGN;
314 siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD));
315 do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0);
318 if (cwq->wq->freezeable)
321 prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
322 if (!cwq->should_stop && list_empty(&cwq->worklist))
324 finish_wait(&cwq->more_work, &wait);
326 if (cwq_should_stop(cwq))
336 struct work_struct work;
337 struct completion done;
340 static void wq_barrier_func(struct work_struct *work)
342 struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
343 complete(&barr->done);
346 static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
347 struct wq_barrier *barr, int tail)
349 INIT_WORK(&barr->work, wq_barrier_func);
350 __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work));
352 init_completion(&barr->done);
354 insert_work(cwq, &barr->work, tail);
357 static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
359 if (cwq->thread == current) {
361 * Probably keventd trying to flush its own queue. So simply run
362 * it by hand rather than deadlocking.
366 struct wq_barrier barr;
369 spin_lock_irq(&cwq->lock);
370 if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
371 insert_wq_barrier(cwq, &barr, 1);
374 spin_unlock_irq(&cwq->lock);
377 wait_for_completion(&barr.done);
382 * flush_workqueue - ensure that any scheduled work has run to completion.
383 * @wq: workqueue to flush
385 * Forces execution of the workqueue and blocks until its completion.
386 * This is typically used in driver shutdown handlers.
388 * We sleep until all works which were queued on entry have been handled,
389 * but we are not livelocked by new incoming ones.
391 * This function used to run the workqueues itself. Now we just wait for the
392 * helper threads to do it.
394 void fastcall flush_workqueue(struct workqueue_struct *wq)
398 if (is_single_threaded(wq))
399 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, singlethread_cpu));
403 for_each_cpu_mask(cpu, cpu_populated_map)
404 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
407 EXPORT_SYMBOL_GPL(flush_workqueue);
409 static void wait_on_work(struct cpu_workqueue_struct *cwq,
410 struct work_struct *work)
412 struct wq_barrier barr;
415 spin_lock_irq(&cwq->lock);
416 if (unlikely(cwq->current_work == work)) {
417 insert_wq_barrier(cwq, &barr, 0);
420 spin_unlock_irq(&cwq->lock);
422 if (unlikely(running))
423 wait_for_completion(&barr.done);
427 * flush_work - block until a work_struct's callback has terminated
428 * @wq: the workqueue on which the work is queued
429 * @work: the work which is to be flushed
431 * flush_work() will attempt to cancel the work if it is queued. If the work's
432 * callback appears to be running, flush_work() will block until it has
435 * flush_work() is designed to be used when the caller is tearing down data
436 * structures which the callback function operates upon. It is expected that,
437 * prior to calling flush_work(), the caller has arranged for the work to not
440 void flush_work(struct workqueue_struct *wq, struct work_struct *work)
442 struct cpu_workqueue_struct *cwq;
446 cwq = get_wq_data(work);
447 /* Was it ever queued ? */
452 * This work can't be re-queued, no need to re-check that
453 * get_wq_data() is still the same when we take cwq->lock.
455 spin_lock_irq(&cwq->lock);
456 list_del_init(&work->entry);
458 spin_unlock_irq(&cwq->lock);
460 if (is_single_threaded(wq))
461 wait_on_work(per_cpu_ptr(wq->cpu_wq, singlethread_cpu), work);
465 for_each_cpu_mask(cpu, cpu_populated_map)
466 wait_on_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
469 EXPORT_SYMBOL_GPL(flush_work);
472 static struct workqueue_struct *keventd_wq;
475 * schedule_work - put work task in global workqueue
476 * @work: job to be done
478 * This puts a job in the kernel-global workqueue.
480 int fastcall schedule_work(struct work_struct *work)
482 return queue_work(keventd_wq, work);
484 EXPORT_SYMBOL(schedule_work);
487 * schedule_delayed_work - put work task in global workqueue after delay
488 * @dwork: job to be done
489 * @delay: number of jiffies to wait or 0 for immediate execution
491 * After waiting for a given time this puts a job in the kernel-global
494 int fastcall schedule_delayed_work(struct delayed_work *dwork,
497 timer_stats_timer_set_start_info(&dwork->timer);
498 return queue_delayed_work(keventd_wq, dwork, delay);
500 EXPORT_SYMBOL(schedule_delayed_work);
503 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
505 * @dwork: job to be done
506 * @delay: number of jiffies to wait
508 * After waiting for a given time this puts a job in the kernel-global
509 * workqueue on the specified CPU.
511 int schedule_delayed_work_on(int cpu,
512 struct delayed_work *dwork, unsigned long delay)
514 return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
516 EXPORT_SYMBOL(schedule_delayed_work_on);
519 * schedule_on_each_cpu - call a function on each online CPU from keventd
520 * @func: the function to call
522 * Returns zero on success.
523 * Returns -ve errno on failure.
525 * Appears to be racy against CPU hotplug.
527 * schedule_on_each_cpu() is very slow.
529 int schedule_on_each_cpu(work_func_t func)
532 struct work_struct *works;
534 works = alloc_percpu(struct work_struct);
538 preempt_disable(); /* CPU hotplug */
539 for_each_online_cpu(cpu) {
540 struct work_struct *work = per_cpu_ptr(works, cpu);
542 INIT_WORK(work, func);
543 set_bit(WORK_STRUCT_PENDING, work_data_bits(work));
544 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work);
547 flush_workqueue(keventd_wq);
552 void flush_scheduled_work(void)
554 flush_workqueue(keventd_wq);
556 EXPORT_SYMBOL(flush_scheduled_work);
558 void flush_work_keventd(struct work_struct *work)
560 flush_work(keventd_wq, work);
562 EXPORT_SYMBOL(flush_work_keventd);
565 * cancel_rearming_delayed_workqueue - reliably kill off a delayed work whose handler rearms the delayed work.
566 * @wq: the controlling workqueue structure
567 * @dwork: the delayed work struct
569 void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
570 struct delayed_work *dwork)
572 /* Was it ever queued ? */
573 if (!get_wq_data(&dwork->work))
576 while (!cancel_delayed_work(dwork))
579 EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
582 * cancel_rearming_delayed_work - reliably kill off a delayed keventd work whose handler rearms the delayed work.
583 * @dwork: the delayed work struct
585 void cancel_rearming_delayed_work(struct delayed_work *dwork)
587 cancel_rearming_delayed_workqueue(keventd_wq, dwork);
589 EXPORT_SYMBOL(cancel_rearming_delayed_work);
592 * execute_in_process_context - reliably execute the routine with user context
593 * @fn: the function to execute
594 * @ew: guaranteed storage for the execute work structure (must
595 * be available when the work executes)
597 * Executes the function immediately if process context is available,
598 * otherwise schedules the function for delayed execution.
600 * Returns: 0 - function was executed
601 * 1 - function was scheduled for execution
603 int execute_in_process_context(work_func_t fn, struct execute_work *ew)
605 if (!in_interrupt()) {
610 INIT_WORK(&ew->work, fn);
611 schedule_work(&ew->work);
615 EXPORT_SYMBOL_GPL(execute_in_process_context);
619 return keventd_wq != NULL;
622 int current_is_keventd(void)
624 struct cpu_workqueue_struct *cwq;
625 int cpu = smp_processor_id(); /* preempt-safe: keventd is per-cpu */
630 cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
631 if (current == cwq->thread)
638 static struct cpu_workqueue_struct *
639 init_cpu_workqueue(struct workqueue_struct *wq, int cpu)
641 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
644 spin_lock_init(&cwq->lock);
645 INIT_LIST_HEAD(&cwq->worklist);
646 init_waitqueue_head(&cwq->more_work);
651 static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
653 struct workqueue_struct *wq = cwq->wq;
654 const char *fmt = is_single_threaded(wq) ? "%s" : "%s/%d";
655 struct task_struct *p;
657 p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu);
659 * Nobody can add the work_struct to this cwq,
660 * if (caller is __create_workqueue)
661 * nobody should see this wq
662 * else // caller is CPU_UP_PREPARE
663 * cpu is not on cpu_online_map
664 * so we can abort safely.
670 cwq->should_stop = 0;
671 if (!is_single_threaded(wq))
672 kthread_bind(p, cpu);
674 if (is_single_threaded(wq) || cpu_online(cpu))
680 struct workqueue_struct *__create_workqueue(const char *name,
681 int singlethread, int freezeable)
683 struct workqueue_struct *wq;
684 struct cpu_workqueue_struct *cwq;
687 wq = kzalloc(sizeof(*wq), GFP_KERNEL);
691 wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
698 wq->freezeable = freezeable;
701 INIT_LIST_HEAD(&wq->list);
702 cwq = init_cpu_workqueue(wq, singlethread_cpu);
703 err = create_workqueue_thread(cwq, singlethread_cpu);
705 mutex_lock(&workqueue_mutex);
706 list_add(&wq->list, &workqueues);
708 for_each_possible_cpu(cpu) {
709 cwq = init_cpu_workqueue(wq, cpu);
710 if (err || !cpu_online(cpu))
712 err = create_workqueue_thread(cwq, cpu);
714 mutex_unlock(&workqueue_mutex);
718 destroy_workqueue(wq);
723 EXPORT_SYMBOL_GPL(__create_workqueue);
725 static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
727 struct wq_barrier barr;
730 spin_lock_irq(&cwq->lock);
731 if (cwq->thread != NULL) {
732 insert_wq_barrier(cwq, &barr, 1);
733 cwq->should_stop = 1;
736 spin_unlock_irq(&cwq->lock);
739 wait_for_completion(&barr.done);
741 while (unlikely(cwq->thread != NULL))
744 * Wait until cwq->thread unlocks cwq->lock,
745 * it won't touch *cwq after that.
748 spin_unlock_wait(&cwq->lock);
753 * destroy_workqueue - safely terminate a workqueue
754 * @wq: target workqueue
756 * Safely destroy a workqueue. All work currently pending will be done first.
758 void destroy_workqueue(struct workqueue_struct *wq)
760 struct cpu_workqueue_struct *cwq;
762 if (is_single_threaded(wq)) {
763 cwq = per_cpu_ptr(wq->cpu_wq, singlethread_cpu);
764 cleanup_workqueue_thread(cwq, singlethread_cpu);
768 mutex_lock(&workqueue_mutex);
770 mutex_unlock(&workqueue_mutex);
772 for_each_cpu_mask(cpu, cpu_populated_map) {
773 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
774 cleanup_workqueue_thread(cwq, cpu);
778 free_percpu(wq->cpu_wq);
781 EXPORT_SYMBOL_GPL(destroy_workqueue);
783 static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
784 unsigned long action,
787 unsigned int cpu = (unsigned long)hcpu;
788 struct cpu_workqueue_struct *cwq;
789 struct workqueue_struct *wq;
792 case CPU_LOCK_ACQUIRE:
793 mutex_lock(&workqueue_mutex);
796 case CPU_LOCK_RELEASE:
797 mutex_unlock(&workqueue_mutex);
801 cpu_set(cpu, cpu_populated_map);
804 list_for_each_entry(wq, &workqueues, list) {
805 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
809 if (!create_workqueue_thread(cwq, cpu))
811 printk(KERN_ERR "workqueue for %i failed\n", cpu);
815 wake_up_process(cwq->thread);
818 case CPU_UP_CANCELED:
820 wake_up_process(cwq->thread);
822 cleanup_workqueue_thread(cwq, cpu);
830 void init_workqueues(void)
832 cpu_populated_map = cpu_online_map;
833 singlethread_cpu = first_cpu(cpu_possible_map);
834 hotcpu_notifier(workqueue_cpu_callback, 0);
835 keventd_wq = create_workqueue("events");