static struct irqaction irq2 = {
.handler = timer_interrupt,
.flags = IRQF_SHARED | IRQF_DISABLED,
- .mask = CPU_MASK_NONE,
.name = "timer",
};
static struct irqaction irq_ipi = {
.handler = crisv32_ipi_interrupt,
.flags = IRQF_DISABLED,
- .mask = CPU_MASK_NONE,
.name = "ipi",
};
static struct irqaction irq_timer = {
.handler = timer_interrupt,
.flags = IRQF_SHARED | IRQF_DISABLED,
- .mask = CPU_MASK_NONE,
.name = "timer"
};
[0] = {
.handler = fpga_interrupt,
.flags = IRQF_DISABLED | IRQF_SHARED,
- .mask = CPU_MASK_NONE,
.name = "fpga.0",
.dev_id = (void *) 0x0028UL,
},
[1] = {
.handler = fpga_interrupt,
.flags = IRQF_DISABLED | IRQF_SHARED,
- .mask = CPU_MASK_NONE,
.name = "fpga.1",
.dev_id = (void *) 0x0050UL,
},
[2] = {
.handler = fpga_interrupt,
.flags = IRQF_DISABLED | IRQF_SHARED,
- .mask = CPU_MASK_NONE,
.name = "fpga.2",
.dev_id = (void *) 0x1c00UL,
},
[3] = {
.handler = fpga_interrupt,
.flags = IRQF_DISABLED | IRQF_SHARED,
- .mask = CPU_MASK_NONE,
.name = "fpga.3",
.dev_id = (void *) 0x6386UL,
}
[0] = {
.handler = fpga_interrupt,
.flags = IRQF_DISABLED,
- .mask = CPU_MASK_NONE,
.name = "fpga.0",
.dev_id = (void *) 0x0700UL,
}
[0] = {
.handler = mb93493_interrupt,
.flags = IRQF_DISABLED | IRQF_SHARED,
- .mask = CPU_MASK_NONE,
.name = "mb93493.0",
.dev_id = (void *) __addr_MB93493_IQSR(0),
},
[1] = {
.handler = mb93493_interrupt,
.flags = IRQF_DISABLED | IRQF_SHARED,
- .mask = CPU_MASK_NONE,
.name = "mb93493.1",
.dev_id = (void *) __addr_MB93493_IQSR(1),
}
static struct irqaction timer_irq = {
.handler = timer_interrupt,
.flags = IRQF_DISABLED,
- .mask = CPU_MASK_NONE,
.name = "timer",
};
.name = "itu",
.handler = timer_interrupt,
.flags = IRQF_DISABLED | IRQF_TIMER,
- .mask = CPU_MASK_NONE,
};
static const int __initdata divide_rate[] = {1, 2, 4, 8};
.name = "timer-16",
.handler = timer_interrupt,
.flags = IRQF_DISABLED | IRQF_TIMER,
- .mask = CPU_MASK_NONE,
};
static const int __initdata divide_rate[] = {1, 2, 4, 8};
.name = "timer-8",
.handler = timer_interrupt,
.flags = IRQF_DISABLED | IRQF_TIMER,
- .mask = CPU_MASK_NONE,
};
static const int __initdata divide_rate[] = {8, 64, 8192};
.name = "tpu",
.handler = timer_interrupt,
.flags = IRQF_DISABLED | IRQF_TIMER,
- .mask = CPU_MASK_NONE,
};
const static int __initdata divide_rate[] = {
*/
#define parent_node(nid) (nid)
-/*
- * Returns the number of the first CPU on Node 'node'.
- */
-#define node_to_first_cpu(node) (cpumask_first(cpumask_of_node(node)))
-
/*
* Determines the node for a given pci bus
*/
static struct irqaction irq0 = {
.handler = timer_interrupt,
.flags = IRQF_DISABLED,
- .mask = CPU_MASK_NONE,
.name = "MFT2",
};
static struct irqaction cascade = {
.handler = no_action,
- .mask = CPU_MASK_NONE,
.name = "cascade",
};
static struct irqaction irq_cascade = {
.handler = no_action,
.flags = 0,
- .mask = CPU_MASK_NONE,
.name = "cascade",
.dev_id = NULL,
.next = NULL,
#define parent_node(node) (node)
#define node_to_cpumask(node) (hub_data(node)->h_cpus)
#define cpumask_of_node(node) (&hub_data(node)->h_cpus)
-#define node_to_first_cpu(node) (cpumask_first(cpumask_of_node(node)))
struct pci_bus;
extern int pcibus_to_node(struct pci_bus *);
static struct irqaction r4030_timer_irqaction = {
.handler = r4030_timer_interrupt,
.flags = IRQF_DISABLED,
- .mask = CPU_MASK_CPU0,
.name = "R4030 timer",
};
action->handler = sibyte_counter_handler;
action->flags = IRQF_DISABLED | IRQF_PERCPU;
- action->mask = cpumask_of_cpu(cpu);
action->name = name;
action->dev_id = cd;
action->handler = sibyte_counter_handler;
action->flags = IRQF_DISABLED | IRQF_PERCPU;
- action->mask = cpumask_of_cpu(cpu);
action->name = name;
action->dev_id = cd;
static struct irqaction irq0 = {
.handler = timer_interrupt,
.flags = IRQF_DISABLED | IRQF_NOBALANCING,
- .mask = CPU_MASK_NONE,
.name = "timer"
};
cd->min_delta_ns = clockevent_delta2ns(0xF, cd);
clockevents_register_device(cd);
- irq0.mask = cpumask_of_cpu(cpu);
setup_irq(0, &irq0);
}
*/
static struct irqaction irq2 = {
.handler = no_action,
- .mask = CPU_MASK_NONE,
.name = "cascade",
};
static struct irqaction cascade = {
.handler = no_action,
- .mask = CPU_MASK_NONE,
.name = "cascade",
};
static struct irqaction cascade_irqaction = {
.handler = no_action,
- .mask = CPU_MASK_NONE,
.name = "cascade",
};
if (i == 1000) {
for_each_online_node(node)
if (NODEPDA(node)->dump_count == 0) {
- cpu = node_to_first_cpu(node);
+ cpu = cpumask_first(cpumask_of_node(node));
for (n=0; n < CNODE_NUM_CPUS(node); cpu++, n++) {
CPUMASK_SETB(nmied_cpus, cpu);
/*
struct irqaction memerr_irq = {
.handler = crime_memerr_intr,
.flags = IRQF_DISABLED,
- .mask = CPU_MASK_NONE,
.name = "CRIME memory error",
};
struct irqaction cpuerr_irq = {
.handler = crime_cpuerr_intr,
.flags = IRQF_DISABLED,
- .mask = CPU_MASK_NONE,
.name = "CRIME CPU error",
};
* IRQ2 is cascade interrupt to second interrupt controller
*/
static struct irqaction sni_rm200_irq2 = {
- no_action, 0, CPU_MASK_NONE, "cascade", NULL, NULL
+ .handler = no_action,
+ .name = "cascade",
};
static struct resource sni_rm200_pic1_resource = {
static struct irqaction cascade_irqaction = {
.handler = no_action,
- .mask = CPU_MASK_NONE,
.name = "cascade",
};
static struct irqaction timer_irq = {
.handler = timer_interrupt,
.flags = IRQF_DISABLED | IRQF_SHARED | IRQF_TIMER,
- .mask = CPU_MASK_NONE,
.name = "timer",
};
#define cpumask_of_node(node) (&numa_cpumask_lookup_table[node])
-static inline int node_to_first_cpu(int node)
-{
- return cpumask_first(cpumask_of_node(node));
-}
-
int of_node_to_nid(struct device_node *device);
struct pci_bus;
static struct irqaction mpc85xxcds_8259_irqaction = {
.handler = mpc85xx_8259_cascade_action,
.flags = IRQF_SHARED,
- .mask = CPU_MASK_NONE,
.name = "8259 cascade",
};
#endif /* PPC_I8259 */
static struct irqaction tbint_irqaction = {
.handler = timebase_interrupt,
- .mask = CPU_MASK_NONE,
.name = "tbint",
};
#if defined(CONFIG_VT) && defined(CONFIG_INPUT_ADBHID) && defined(CONFIG_XMON)
static struct irqaction xmon_irqaction = {
.handler = xmon_irq,
- .mask = CPU_MASK_NONE,
.name = "XMON break",
};
#endif
static struct irqaction xmon_action = {
.handler = xmon_irq,
.flags = 0,
- .mask = CPU_MASK_NONE,
.name = "NMI - XMON"
};
#endif
static struct irqaction gatwick_cascade_action = {
.handler = gatwick_action,
.flags = IRQF_DISABLED,
- .mask = CPU_MASK_NONE,
.name = "cascade",
};
static struct irqaction psurge_irqaction = {
.handler = psurge_primary_intr,
.flags = IRQF_DISABLED,
- .mask = CPU_MASK_NONE,
.name = "primary IPI",
};
static struct irqaction cpm_error_irqaction = {
.handler = cpm_error_interrupt,
- .mask = CPU_MASK_NONE,
.name = "error",
};
#define node_to_cpumask(node) ((void)node, cpu_online_map)
#define cpumask_of_node(node) ((void)node, cpu_online_mask)
-#define node_to_first_cpu(node) ((void)(node),0)
#define pcibus_to_node(bus) ((void)(bus), -1)
#define pcibus_to_cpumask(bus) (pcibus_to_node(bus) == -1 ? \
static struct irqaction irq0 = {
.handler = timer_interrupt,
.flags = IRQF_DISABLED,
- .mask = CPU_MASK_NONE,
.name = "timer",
};
.name = "timer",
.handler = cmt_timer_interrupt,
.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
- .mask = CPU_MASK_NONE,
};
static void cmt_clk_init(struct clk *clk)
.name = "timer",
.handler = mtu2_timer_interrupt,
.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
- .mask = CPU_MASK_NONE,
};
static unsigned int divisors[] = { 1, 4, 16, 64, 1, 1, 256 };
.name = "periodic/oneshot timer",
.handler = tmu_timer_interrupt,
.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
- .mask = CPU_MASK_NONE,
};
static void __init tmu_clk_init(struct clk *clk)
#define node_to_cpumask_ptr_next(v, node) \
v = &(numa_cpumask_lookup_table[node])
-static inline int node_to_first_cpu(int node)
-{
- return cpumask_first(cpumask_of_node(node));
-}
-
struct pci_bus;
#ifdef CONFIG_PCI
extern int pcibus_to_node(struct pci_bus *pbus);
flush_cache_all();
action->flags = irqflags;
- cpus_clear(action->mask);
action->name = devname;
action->dev_id = NULL;
action->next = NULL;
action->handler = handler;
action->flags = irqflags;
- cpus_clear(action->mask);
action->name = devname;
action->next = NULL;
action->dev_id = dev_id;
action->handler = handler;
action->flags = irqflags;
- cpus_clear(action->mask);
action->name = devname;
action->next = NULL;
action->dev_id = dev_id;
{
return cpu_online_map;
}
-static inline int node_to_first_cpu(int node)
-{
- return first_cpu(cpu_online_map);
-}
static inline void setup_node_to_cpumask_map(void) { }
#include <asm-generic/topology.h>
-#ifdef CONFIG_NUMA
-/* Returns the number of the first CPU on Node 'node'. */
-static inline int node_to_first_cpu(int node)
-{
- return cpumask_first(cpumask_of_node(node));
-}
-#endif
-
extern cpumask_t cpu_coregroup_map(int cpu);
extern const struct cpumask *cpu_coregroup_mask(int cpu);
*/
static struct irqaction fpu_irq = {
.handler = math_error_irq,
- .mask = CPU_MASK_NONE,
.name = "fpu",
};
*/
static struct irqaction irq2 = {
.handler = no_action,
- .mask = CPU_MASK_NONE,
.name = "cascade",
};
static struct irqaction irq2 = {
.handler = no_action,
- .mask = CPU_MASK_NONE,
.name = "cascade",
};
DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
static struct irqaction mfgptirq = {
.handler = mfgpt_tick,
.flags = IRQF_DISABLED | IRQF_NOBALANCING,
- .mask = CPU_MASK_NONE,
.name = "mfgpt-timer"
};
static struct irqaction irq0 = {
.handler = timer_interrupt,
.flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_IRQPOLL | IRQF_TIMER,
- .mask = CPU_MASK_NONE,
.name = "timer"
};
static struct irqaction irq0 = {
.handler = timer_interrupt,
.flags = IRQF_DISABLED | IRQF_IRQPOLL | IRQF_NOBALANCING | IRQF_TIMER,
- .mask = CPU_MASK_NONE,
.name = "timer"
};
if (!hpet_enable())
setup_pit_timer();
- irq0.mask = cpumask_of_cpu(0);
setup_irq(0, &irq0);
}
.name = "vmi-timer",
.handler = vmi_timer_interrupt,
.flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_TIMER,
- .mask = CPU_MASK_ALL,
};
static void __devinit vmi_time_init_clockevent(void)
#define print_cpus_func(type) \
static ssize_t print_cpus_##type(struct sysdev_class *class, char *buf) \
{ \
- return print_cpus_map(buf, &cpu_##type##_map); \
+ return print_cpus_map(buf, cpu_##type##_mask); \
} \
static struct sysdev_class_attribute attr_##type##_map = \
_SYSDEV_CLASS_ATTR(type, 0444, print_cpus_##type, NULL)
return 0;
}
-void __exit buffer_sync_cleanup(void)
+void buffer_sync_cleanup(void)
{
free_cpumask_var(marked_cpus);
}
}
EXPORT_SYMBOL(seq_bitmap);
-int seq_bitmap_list(struct seq_file *m, unsigned long *bits,
+int seq_bitmap_list(struct seq_file *m, const unsigned long *bits,
unsigned int nr_bits)
{
if (m->count < m->size) {
#ifndef cpumask_of_node
#define cpumask_of_node(node) ((void)node, cpu_online_mask)
#endif
-#ifndef node_to_first_cpu
-#define node_to_first_cpu(node) ((void)(node),0)
-#endif
#ifndef pcibus_to_node
#define pcibus_to_node(bus) ((void)(bus), -1)
#endif
-#ifndef pcibus_to_cpumask
-#define pcibus_to_cpumask(bus) (pcibus_to_node(bus) == -1 ? \
- CPU_MASK_ALL : \
- node_to_cpumask(pcibus_to_node(bus)) \
- )
-#endif
-
#ifndef cpumask_of_pcibus
#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \
cpu_all_mask : \
static inline void cpuset_cpus_allowed(struct task_struct *p,
struct cpumask *mask)
{
- *mask = cpu_possible_map;
+ cpumask_copy(mask, cpu_possible_mask);
}
static inline void cpuset_cpus_allowed_locked(struct task_struct *p,
struct cpumask *mask)
{
- *mask = cpu_possible_map;
+ cpumask_copy(mask, cpu_possible_mask);
}
static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
unsigned int nr_bits);
static inline int seq_cpumask(struct seq_file *m, const struct cpumask *mask)
{
- return seq_bitmap(m, mask->bits, nr_cpu_ids);
+ return seq_bitmap(m, cpumask_bits(mask), nr_cpu_ids);
}
static inline int seq_nodemask(struct seq_file *m, nodemask_t *mask)
return seq_bitmap(m, mask->bits, MAX_NUMNODES);
}
-int seq_bitmap_list(struct seq_file *m, unsigned long *bits,
+int seq_bitmap_list(struct seq_file *m, const unsigned long *bits,
unsigned int nr_bits);
-static inline int seq_cpumask_list(struct seq_file *m, cpumask_t *mask)
+static inline int seq_cpumask_list(struct seq_file *m,
+ const struct cpumask *mask)
{
- return seq_bitmap_list(m, mask->bits, NR_CPUS);
+ return seq_bitmap_list(m, cpumask_bits(mask), nr_cpu_ids);
}
static inline int seq_nodemask_list(struct seq_file *m, nodemask_t *mask)
* Set up the current CPU as possible to migrate to.
* The other ones will be done by cpu_up/cpu_down()
*/
- cpu = smp_processor_id();
- cpu_set(cpu, cpu_active_map);
+ set_cpu_active(smp_processor_id(), true);
/* FIXME: This should be done in userspace --RR */
for_each_present_cpu(cpu) {
/*
* init can run on any cpu.
*/
- set_cpus_allowed_ptr(current, CPU_MASK_ALL_PTR);
+ set_cpus_allowed_ptr(current, cpu_all_mask);
/*
* Tell the world that we're going to be the grim
* reaper of innocent orphaned children.
goto out;
}
- cpu_clear(cpu, cpu_active_map);
+ set_cpu_active(cpu, false);
/*
* Make sure the all cpus did the reschedule and are not
err = _cpu_down(cpu, 0);
if (cpu_online(cpu))
- cpu_set(cpu, cpu_active_map);
+ set_cpu_active(cpu, true);
out:
cpu_maps_update_done();
goto out_notify;
BUG_ON(!cpu_online(cpu));
- cpu_set(cpu, cpu_active_map);
+ set_cpu_active(cpu, true);
/* Now call notifier in preparation. */
raw_notifier_call_chain(&cpu_chain, CPU_ONLINE | mod, hcpu);
mm->free_area_cache = oldmm->mmap_base;
mm->cached_hole_size = ~0UL;
mm->map_count = 0;
- cpus_clear(mm->cpu_vm_mask);
+ cpumask_clear(mm_cpumask(mm));
mm->mm_rb = RB_ROOT;
rb_link = &mm->mm_rb.rb_node;
rb_parent = NULL;
}
/* We can run anywhere, unlike our parent keventd(). */
- set_cpus_allowed_ptr(current, CPU_MASK_ALL_PTR);
+ set_cpus_allowed_ptr(current, cpu_all_mask);
/*
* Our parent is keventd, which runs with elevated scheduling priority.
*/
sched_setscheduler(create->result, SCHED_NORMAL, ¶m);
set_user_nice(create->result, KTHREAD_NICE_LEVEL);
- set_cpus_allowed_ptr(create->result, CPU_MASK_ALL_PTR);
+ set_cpus_allowed_ptr(create->result, cpu_all_mask);
}
complete(&create->done);
}
set_task_comm(tsk, "kthreadd");
ignore_signals(tsk);
set_user_nice(tsk, KTHREAD_NICE_LEVEL);
- set_cpus_allowed_ptr(tsk, CPU_MASK_ALL_PTR);
+ set_cpus_allowed_ptr(tsk, cpu_all_mask);
current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG;
static atomic_t n_rcu_torture_error;
static long n_rcu_torture_timers = 0;
static struct list_head rcu_torture_removed;
+static cpumask_var_t shuffle_tmp_mask;
static int stutter_pause_test = 0;
*/
static void rcu_torture_shuffle_tasks(void)
{
- cpumask_t tmp_mask;
int i;
- cpus_setall(tmp_mask);
+ cpumask_setall(shuffle_tmp_mask);
get_online_cpus();
/* No point in shuffling if there is only one online CPU (ex: UP) */
}
if (rcu_idle_cpu != -1)
- cpu_clear(rcu_idle_cpu, tmp_mask);
+ cpumask_clear_cpu(rcu_idle_cpu, shuffle_tmp_mask);
- set_cpus_allowed_ptr(current, &tmp_mask);
+ set_cpus_allowed_ptr(current, shuffle_tmp_mask);
if (reader_tasks) {
for (i = 0; i < nrealreaders; i++)
if (reader_tasks[i])
set_cpus_allowed_ptr(reader_tasks[i],
- &tmp_mask);
+ shuffle_tmp_mask);
}
if (fakewriter_tasks) {
for (i = 0; i < nfakewriters; i++)
if (fakewriter_tasks[i])
set_cpus_allowed_ptr(fakewriter_tasks[i],
- &tmp_mask);
+ shuffle_tmp_mask);
}
if (writer_task)
- set_cpus_allowed_ptr(writer_task, &tmp_mask);
+ set_cpus_allowed_ptr(writer_task, shuffle_tmp_mask);
if (stats_task)
- set_cpus_allowed_ptr(stats_task, &tmp_mask);
+ set_cpus_allowed_ptr(stats_task, shuffle_tmp_mask);
if (rcu_idle_cpu == -1)
rcu_idle_cpu = num_online_cpus() - 1;
if (shuffler_task) {
VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task");
kthread_stop(shuffler_task);
+ free_cpumask_var(shuffle_tmp_mask);
}
shuffler_task = NULL;
}
if (test_no_idle_hz) {
rcu_idle_cpu = num_online_cpus() - 1;
+
+ if (!alloc_cpumask_var(&shuffle_tmp_mask, GFP_KERNEL)) {
+ firsterr = -ENOMEM;
+ VERBOSE_PRINTK_ERRSTRING("Failed to alloc mask");
+ goto unwind;
+ }
+
/* Create the shuffler thread */
shuffler_task = kthread_run(rcu_torture_shuffle, NULL,
"rcu_torture_shuffle");
if (IS_ERR(shuffler_task)) {
+ free_cpumask_var(shuffle_tmp_mask);
firsterr = PTR_ERR(shuffler_task);
VERBOSE_PRINTK_ERRSTRING("Failed to create shuffler");
shuffler_task = NULL;
#ifdef CONFIG_SMP
int cpupri_find(struct cpupri *cp,
- struct task_struct *p, cpumask_t *lowest_mask);
+ struct task_struct *p, struct cpumask *lowest_mask);
void cpupri_set(struct cpupri *cp, int cpu, int pri);
int cpupri_init(struct cpupri *cp, bool bootmem);
void cpupri_cleanup(struct cpupri *cp);
static int refcount;
static struct workqueue_struct *stop_machine_wq;
static struct stop_machine_data active, idle;
-static const cpumask_t *active_cpus;
+static const struct cpumask *active_cpus;
static void *stop_machine_work;
static void set_state(enum stopmachine_state newstate)
might_sleep();
lock_map_acquire(&wq->lockdep_map);
lock_map_release(&wq->lockdep_map);
- for_each_cpu_mask_nr(cpu, *cpu_map)
+ for_each_cpu(cpu, cpu_map)
flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
}
EXPORT_SYMBOL_GPL(flush_workqueue);
wq = cwq->wq;
cpu_map = wq_cpu_map(wq);
- for_each_cpu_mask_nr(cpu, *cpu_map)
+ for_each_cpu(cpu, cpu_map)
wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
}
list_del(&wq->list);
spin_unlock(&workqueue_lock);
- for_each_cpu_mask_nr(cpu, *cpu_map)
+ for_each_cpu(cpu, cpu_map)
cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
cpu_maps_update_done();
{
if (unlikely(!__pdata))
return;
- __percpu_depopulate_mask(__pdata, &cpu_possible_map);
+ __percpu_depopulate_mask(__pdata, cpu_possible_mask);
kfree(__percpu_disguise(__pdata));
}
EXPORT_SYMBOL_GPL(free_percpu);
/*
* Some configs put our parent kthread in a limited cpuset,
- * which kthread() overrides, forcing cpus_allowed == CPU_MASK_ALL.
+ * which kthread() overrides, forcing cpus_allowed == cpu_all_mask.
* Our needs are more modest - cut back to our cpusets cpus_allowed.
* This is needed as pdflush's are dynamically created and destroyed.
* The boottime pdflush's are easily placed w/o these 2 lines.
memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
- for_each_cpu_mask_nr(cpu, *cpumask) {
+ for_each_cpu(cpu, cpumask) {
struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
switch (m->mode) {
case SVC_POOL_PERCPU:
{
- set_cpus_allowed_ptr(task, &cpumask_of_cpu(node));
+ set_cpus_allowed_ptr(task, cpumask_of(node));
break;
}
case SVC_POOL_PERNODE: