From: Tejun Heo Date: Thu, 6 Sep 2012 19:51:58 +0000 (-0700) Subject: Merge branch 'for-3.6-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj... X-Git-Tag: next-20120907~24^2~1 X-Git-Url: https://git.karo-electronics.de/?a=commitdiff_plain;h=bc1b50b2ed57766383c09b90ddbc8e51de6c8ebc;p=karo-tx-linux.git Merge branch 'for-3.6-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq.git This merge is necessary because hotplug fixes from for-3.6-fixes warrants further cleanups on top. The merge generates a trivial conflict in gcwq_associate() between the following two commits. 96e65306b8 "workqueue: UNBOUND -> REBIND morphing in rebind_workers() should be atomic" e2b6a6d570 "workqueue: use system_highpri_wq for highpri workers in rebind_workers()" Both add local variables to the same block. Any order works. Signed-off-by: Tejun Heo --- bc1b50b2ed57766383c09b90ddbc8e51de6c8ebc diff --cc kernel/workqueue.c index 039d0fae171a,74487ef9963a..3787d3194d37 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@@ -1725,46 -1432,65 +1738,75 @@@ retry goto retry; } - /* - * All idle workers are rebound and waiting for %WORKER_REBIND to - * be cleared inside idle_worker_rebind(). Clear and release. - * Clearing %WORKER_REBIND from this foreign context is safe - * because these workers are still guaranteed to be idle. - */ - for_each_worker_pool(pool, gcwq) - list_for_each_entry(worker, &pool->idle_list, entry) - worker->flags &= ~WORKER_REBIND; - - wake_up_all(&gcwq->rebind_hold); - - /* rebind busy workers */ + /* all idle workers are rebound, rebind busy workers */ for_each_busy_worker(worker, i, pos, gcwq) { - struct work_struct *rebind_work = &worker->rebind_work; + unsigned long worker_flags = worker->flags; + struct work_struct *rebind_work = &worker->rebind_work; + struct workqueue_struct *wq; - /* morph UNBOUND to REBIND */ - worker->flags &= ~WORKER_UNBOUND; - worker->flags |= WORKER_REBIND; + /* morph UNBOUND to REBIND atomically */ + worker_flags &= ~WORKER_UNBOUND; + worker_flags |= WORKER_REBIND; + ACCESS_ONCE(worker->flags) = worker_flags; if (test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(rebind_work))) continue; - /* wq doesn't matter, use the default one */ - debug_work_activate(rebind_work); - insert_work(get_cwq(gcwq->cpu, system_wq), rebind_work, - worker->scheduled.next, - work_color_to_flags(WORK_NO_COLOR)); + debug_work_activate(rebind_work); + + /* + * wq doesn't really matter but let's keep @worker->pool + * and @cwq->pool consistent for sanity. + */ + if (worker_pool_pri(worker->pool)) + wq = system_highpri_wq; + else + wq = system_wq; + + insert_work(get_cwq(gcwq->cpu, wq), rebind_work, + worker->scheduled.next, + work_color_to_flags(WORK_NO_COLOR)); } + + /* + * At this point, each pool is guaranteed to have at least one idle + * worker and all idle workers are waiting for WORKER_REBIND to + * clear. Release management before releasing idle workers; + * otherwise, they can all go become busy as we're holding the + * manager_mutexes, which can lead to deadlock as we don't actually + * create new workers. + */ + gcwq_release_management(gcwq); + + /* + * Clear %WORKER_REBIND and release. Clearing it from this foreign + * context is safe because these workers are still guaranteed to be + * idle. + * + * We need to make sure all idle workers passed WORKER_REBIND wait + * in idle_worker_rebind() before returning; otherwise, workers can + * get stuck at the wait if hotplug cycle repeats. + */ + idle_rebind.cnt = 1; + INIT_COMPLETION(idle_rebind.done); + + for_each_worker_pool(pool, gcwq) { + WARN_ON_ONCE(list_empty(&pool->idle_list)); + list_for_each_entry(worker, &pool->idle_list, entry) { + worker->flags &= ~WORKER_REBIND; + idle_rebind.cnt++; + } + } + + wake_up_all(&gcwq->rebind_hold); + + if (--idle_rebind.cnt) { + spin_unlock_irq(&gcwq->lock); + wait_for_completion(&idle_rebind.done); + } else { + spin_unlock_irq(&gcwq->lock); + } } static struct worker *alloc_worker(void)