X-Git-Url: https://git.karo-electronics.de/?a=blobdiff_plain;f=kernel%2Fworkqueue.c;h=565cf7a1febda94b88582c6e9326d782fb29f96c;hb=8126a044f9e686f4ecf95e32fd89ad1dd48b4183;hp=82c4fa70595cce5c929b8e431047d882749d0227;hpb=94bc2be31a01a3055ec94176e595dfe208e92d3b;p=karo-tx-linux.git diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 82c4fa70595c..565cf7a1febd 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -27,6 +27,7 @@ #include #include #include +#include /* * The per-CPU workqueue (if single thread, we always use the first @@ -147,7 +148,7 @@ int fastcall queue_delayed_work(struct workqueue_struct *wq, return ret; } -static inline void run_workqueue(struct cpu_workqueue_struct *cwq) +static void run_workqueue(struct cpu_workqueue_struct *cwq) { unsigned long flags; @@ -427,22 +428,34 @@ int schedule_delayed_work_on(int cpu, return ret; } -int schedule_on_each_cpu(void (*func) (void *info), void *info) +/** + * schedule_on_each_cpu - call a function on each online CPU from keventd + * @func: the function to call + * @info: a pointer to pass to func() + * + * Returns zero on success. + * Returns -ve errno on failure. + * + * Appears to be racy against CPU hotplug. + * + * schedule_on_each_cpu() is very slow. + */ +int schedule_on_each_cpu(void (*func)(void *info), void *info) { int cpu; - struct work_struct *work; - - work = kmalloc(NR_CPUS * sizeof(struct work_struct), GFP_KERNEL); + struct work_struct *works; - if (!work) + works = alloc_percpu(struct work_struct); + if (!works) return -ENOMEM; + for_each_online_cpu(cpu) { - INIT_WORK(work + cpu, func, info); + INIT_WORK(per_cpu_ptr(works, cpu), func, info); __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), - work + cpu); + per_cpu_ptr(works, cpu)); } flush_workqueue(keventd_wq); - kfree(work); + free_percpu(works); return 0; } @@ -476,6 +489,34 @@ void cancel_rearming_delayed_work(struct work_struct *work) } EXPORT_SYMBOL(cancel_rearming_delayed_work); +/** + * execute_in_process_context - reliably execute the routine with user context + * @fn: the function to execute + * @data: data to pass to the function + * @ew: guaranteed storage for the execute work structure (must + * be available when the work executes) + * + * Executes the function immediately if process context is available, + * otherwise schedules the function for delayed execution. + * + * Returns: 0 - function was executed + * 1 - function was scheduled for execution + */ +int execute_in_process_context(void (*fn)(void *data), void *data, + struct execute_work *ew) +{ + if (!in_interrupt()) { + fn(data); + return 0; + } + + INIT_WORK(&ew->work, fn, data); + schedule_work(&ew->work); + + return 1; +} +EXPORT_SYMBOL_GPL(execute_in_process_context); + int keventd_up(void) { return keventd_wq != NULL; @@ -502,11 +543,11 @@ int current_is_keventd(void) static void take_over_work(struct workqueue_struct *wq, unsigned int cpu) { struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu); - LIST_HEAD(list); + struct list_head list; struct work_struct *work; spin_lock_irq(&cwq->lock); - list_splice_init(&cwq->worklist, &list); + list_replace_init(&cwq->worklist, &list); while (!list_empty(&list)) { printk("Taking work for %s\n", wq->name); @@ -518,7 +559,7 @@ static void take_over_work(struct workqueue_struct *wq, unsigned int cpu) } /* We're holding the cpucontrol mutex here */ -static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, +static int workqueue_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { @@ -549,6 +590,8 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, case CPU_UP_CANCELED: list_for_each_entry(wq, &workqueues, list) { + if (!per_cpu_ptr(wq->cpu_wq, hotcpu)->thread) + continue; /* Unbind so it can run. */ kthread_bind(per_cpu_ptr(wq->cpu_wq, hotcpu)->thread, any_online_cpu(cpu_online_map));