LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
return cpt == CFS_CPT_ANY ?
- cpus_weight(*cptab->ctb_cpumask) :
- cpus_weight(*cptab->ctb_parts[cpt].cpt_cpumask);
+ cpumask_weight(cptab->ctb_cpumask) :
+ cpumask_weight(cptab->ctb_parts[cpt].cpt_cpumask);
}
EXPORT_SYMBOL(cfs_cpt_weight);
LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
return cpt == CFS_CPT_ANY ?
- any_online_cpu(*cptab->ctb_cpumask) != NR_CPUS :
- any_online_cpu(*cptab->ctb_parts[cpt].cpt_cpumask) != NR_CPUS;
+ any_online_cpu(*cptab->ctb_cpumask) < nr_cpu_ids :
+ any_online_cpu(*cptab->ctb_parts[cpt].cpt_cpumask) < nr_cpu_ids;
}
EXPORT_SYMBOL(cfs_cpt_online);
LASSERT(cpt >= 0 && cpt < cptab->ctb_nparts);
- if (cpu < 0 || cpu >= NR_CPUS || !cpu_online(cpu)) {
+ if (cpu < 0 || cpu >= nr_cpu_ids || !cpu_online(cpu)) {
CDEBUG(D_INFO, "CPU %d is invalid or it's offline\n", cpu);
return 0;
}
cptab->ctb_cpu2cpt[cpu] = cpt;
- LASSERT(!cpu_isset(cpu, *cptab->ctb_cpumask));
- LASSERT(!cpu_isset(cpu, *cptab->ctb_parts[cpt].cpt_cpumask));
+ LASSERT(!cpumask_test_cpu(cpu, cptab->ctb_cpumask));
+ LASSERT(!cpumask_test_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask));
- cpu_set(cpu, *cptab->ctb_cpumask);
- cpu_set(cpu, *cptab->ctb_parts[cpt].cpt_cpumask);
+ cpumask_set_cpu(cpu, cptab->ctb_cpumask);
+ cpumask_set_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask);
node = cpu_to_node(cpu);
LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
- if (cpu < 0 || cpu >= NR_CPUS) {
+ if (cpu < 0 || cpu >= nr_cpu_ids) {
CDEBUG(D_INFO, "Invalid CPU id %d\n", cpu);
return;
}
return;
}
- LASSERT(cpu_isset(cpu, *cptab->ctb_parts[cpt].cpt_cpumask));
- LASSERT(cpu_isset(cpu, *cptab->ctb_cpumask));
+ LASSERT(cpumask_test_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask));
+ LASSERT(cpumask_test_cpu(cpu, cptab->ctb_cpumask));
- cpu_clear(cpu, *cptab->ctb_parts[cpt].cpt_cpumask);
- cpu_clear(cpu, *cptab->ctb_cpumask);
+ cpumask_clear_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask);
+ cpumask_clear_cpu(cpu, cptab->ctb_cpumask);
cptab->ctb_cpu2cpt[cpu] = -1;
node = cpu_to_node(cpu);
{
int i;
- if (cpus_weight(*mask) == 0 || any_online_cpu(*mask) == NR_CPUS) {
+ if (cpumask_weight(mask) == 0 || any_online_cpu(*mask) >= nr_cpu_ids) {
CDEBUG(D_INFO, "No online CPU is found in the CPU mask for CPU partition %d\n",
cpt);
return 0;
int
cfs_cpt_of_cpu(struct cfs_cpt_table *cptab, int cpu)
{
- LASSERT(cpu >= 0 && cpu < NR_CPUS);
+ LASSERT(cpu >= 0 && cpu < nr_cpu_ids);
return cptab->ctb_cpu2cpt[cpu];
}
nodemask = cptab->ctb_parts[cpt].cpt_nodemask;
}
- if (any_online_cpu(*cpumask) == NR_CPUS) {
+ if (any_online_cpu(*cpumask) >= nr_cpu_ids) {
CERROR("No online CPU found in CPU partition %d, did someone do CPU hotplug on system? You might need to reload Lustre modules to keep system working well.\n",
cpt);
return -EINVAL;
}
for_each_online_cpu(i) {
- if (cpu_isset(i, *cpumask))
+ if (cpumask_test_cpu(i, cpumask))
continue;
rc = set_cpus_allowed_ptr(current, cpumask);
LASSERT(number > 0);
- if (number >= cpus_weight(*node)) {
- while (!cpus_empty(*node)) {
- cpu = first_cpu(*node);
+ if (number >= cpumask_weight(node)) {
+ while (!cpumask_empty(node)) {
+ cpu = cpumask_first(node);
rc = cfs_cpt_set_cpu(cptab, cpt, cpu);
if (!rc)
return -EINVAL;
- cpu_clear(cpu, *node);
+ cpumask_clear_cpu(cpu, node);
}
return 0;
}
goto out;
}
- while (!cpus_empty(*node)) {
- cpu = first_cpu(*node);
+ while (!cpumask_empty(node)) {
+ cpu = cpumask_first(node);
/* get cpumask for cores in the same socket */
cfs_cpu_core_siblings(cpu, socket);
- cpus_and(*socket, *socket, *node);
+ cpumask_and(socket, socket, node);
- LASSERT(!cpus_empty(*socket));
+ LASSERT(!cpumask_empty(socket));
- while (!cpus_empty(*socket)) {
+ while (!cpumask_empty(socket)) {
int i;
/* get cpumask for hts in the same core */
cfs_cpu_ht_siblings(cpu, core);
- cpus_and(*core, *core, *node);
+ cpumask_and(core, core, node);
- LASSERT(!cpus_empty(*core));
+ LASSERT(!cpumask_empty(core));
for_each_cpu_mask(i, *core) {
- cpu_clear(i, *socket);
- cpu_clear(i, *node);
+ cpumask_clear_cpu(i, socket);
+ cpumask_clear_cpu(i, node);
rc = cfs_cpt_set_cpu(cptab, cpt, i);
if (!rc) {
if (--number == 0)
goto out;
}
- cpu = first_cpu(*socket);
+ cpu = cpumask_first(socket);
}
}
for_each_online_node(i) {
cfs_node_to_cpumask(i, mask);
- while (!cpus_empty(*mask)) {
+ while (!cpumask_empty(mask)) {
struct cfs_cpu_partition *part;
int n;
part = &cptab->ctb_parts[cpt];
- n = num - cpus_weight(*part->cpt_cpumask);
+ n = num - cpumask_weight(part->cpt_cpumask);
LASSERT(n > 0);
rc = cfs_cpt_choose_ncpus(cptab, cpt, mask, n);
if (rc < 0)
goto failed;
- LASSERT(num >= cpus_weight(*part->cpt_cpumask));
- if (num == cpus_weight(*part->cpt_cpumask))
+ LASSERT(num >= cpumask_weight(part->cpt_cpumask));
+ if (num == cpumask_weight(part->cpt_cpumask))
cpt++;
}
}
if (cpt != ncpt ||
- num != cpus_weight(*cptab->ctb_parts[ncpt - 1].cpt_cpumask)) {
+ num != cpumask_weight(cptab->ctb_parts[ncpt - 1].cpt_cpumask)) {
CERROR("Expect %d(%d) CPU partitions but got %d(%d), CPU hotplug/unplug while setting?\n",
cptab->ctb_nparts, num, cpt,
- cpus_weight(*cptab->ctb_parts[ncpt - 1].cpt_cpumask));
+ cpumask_weight(cptab->ctb_parts[ncpt - 1].cpt_cpumask));
goto failed;
}
return NULL;
}
- high = node ? MAX_NUMNODES - 1 : NR_CPUS - 1;
+ high = node ? MAX_NUMNODES - 1 : nr_cpu_ids - 1;
cptab = cfs_cpt_table_alloc(ncpt);
if (cptab == NULL) {