};
static int worker_thread(void *__worker);
+static void gcwq_claim_management(struct global_cwq *gcwq);
+static void gcwq_release_management(struct global_cwq *gcwq);
static int worker_pool_pri(struct worker_pool *pool)
{
}
/**
- * rebind_workers - rebind all workers of a gcwq to the associated CPU
+ * gcwq_associate - (re)associate a gcwq to its CPU and rebind its workers
* @gcwq: gcwq of interest
*
- * @gcwq->cpu is coming online. Rebind all workers to the CPU. Rebinding
- * is different for idle and busy ones.
+ * @gcwq->cpu is coming online. Clear %GCWQ_DISASSOCIATED and rebind all
+ * workers to the CPU. Rebinding is different for idle and busy ones.
*
* The idle ones should be rebound synchronously and idle rebinding should
* be complete before any worker starts executing work items with
* On return, all workers are guaranteed to either be bound or have rebind
* work item scheduled.
*/
-static void rebind_workers(struct global_cwq *gcwq)
- __releases(&gcwq->lock) __acquires(&gcwq->lock)
+static void gcwq_associate(struct global_cwq *gcwq)
{
struct idle_rebind idle_rebind;
struct worker_pool *pool;
struct hlist_node *pos;
int i;
- lockdep_assert_held(&gcwq->lock);
+ gcwq_claim_management(gcwq);
+ spin_lock_irq(&gcwq->lock);
- for_each_worker_pool(pool, gcwq)
- lockdep_assert_held(&pool->manager_mutex);
+ gcwq->flags &= ~GCWQ_DISASSOCIATED;
/*
* Rebind idle workers. Interlocked both ways. We wait for
if (--idle_rebind.cnt) {
spin_unlock_irq(&gcwq->lock);
wait_for_completion(&idle_rebind.done);
- spin_lock_irq(&gcwq->lock);
+ } else {
+ spin_unlock_irq(&gcwq->lock);
}
+
+ gcwq_release_management(gcwq);
}
static struct worker *alloc_worker(void)
case CPU_DOWN_FAILED:
case CPU_ONLINE:
- gcwq_claim_management(gcwq);
- spin_lock_irq(&gcwq->lock);
- gcwq->flags &= ~GCWQ_DISASSOCIATED;
- rebind_workers(gcwq);
- spin_unlock_irq(&gcwq->lock);
- gcwq_release_management(gcwq);
+ gcwq_associate(gcwq);
break;
}
return NOTIFY_OK;