struct worker_pool {
spinlock_t lock; /* the pool lock */
int cpu; /* I: the associated cpu */
+ int node; /* I: the associated node ID */
int id; /* I: pool ID */
unsigned int flags; /* X: flags */
static struct worker *create_worker(struct worker_pool *pool)
{
struct worker *worker = NULL;
- int node = pool->cpu >= 0 ? cpu_to_node(pool->cpu) : NUMA_NO_NODE;
int id = -1;
char id_buf[16];
else
snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id);
- worker->task = kthread_create_on_node(worker_thread, worker, node,
+ worker->task = kthread_create_on_node(worker_thread, worker, pool->node,
"kworker/%s", id_buf);
if (IS_ERR(worker->task))
goto fail;
spin_lock_init(&pool->lock);
pool->id = -1;
pool->cpu = -1;
+ pool->node = NUMA_NO_NODE;
pool->flags |= POOL_DISASSOCIATED;
INIT_LIST_HEAD(&pool->worklist);
INIT_LIST_HEAD(&pool->idle_list);
{
u32 hash = wqattrs_hash(attrs);
struct worker_pool *pool;
+ int node;
lockdep_assert_held(&wq_pool_mutex);
lockdep_set_subclass(&pool->lock, 1); /* see put_pwq() */
copy_workqueue_attrs(pool->attrs, attrs);
+ /* if cpumask is contained inside a NUMA node, we belong to that node */
+ if (wq_numa_enabled) {
+ for_each_node(node) {
+ if (cpumask_subset(pool->attrs->cpumask,
+ wq_numa_possible_cpumask[node])) {
+ pool->node = node;
+ break;
+ }
+ }
+ }
+
if (worker_pool_assign_id(pool) < 0)
goto fail;
pool->cpu = cpu;
cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu));
pool->attrs->nice = std_nice[i++];
+ pool->node = cpu_to_node(cpu);
/* alloc pool ID */
mutex_lock(&wq_pool_mutex);