Keep status in sync until last abuser is gone.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define IRQ_MASKED 0x00002000 /* DEPRECATED */
/* DEPRECATED use irq_setaffinity_pending() instead*/
#define IRQ_MOVE_PENDING 0x00004000
+#define IRQ_AFFINITY_SET 0x02000000 /* DEPRECATED */
#endif
#define IRQ_LEVEL 0x00008000 /* IRQ level triggered */
#define IRQ_WAKEUP 0x00100000 /* IRQ triggers system wakeup */
#define IRQ_NO_BALANCING 0x00400000 /* IRQ is excluded from balancing */
#define IRQ_MOVE_PCNTXT 0x01000000 /* IRQ migration from process context */
-#define IRQ_AFFINITY_SET 0x02000000 /* IRQ affinity was set from userspace*/
#define IRQ_NESTED_THREAD 0x10000000 /* IRQ is nested into another, no own handler thread */
#define IRQF_MODIFY_MASK \
* IRQD_SETAFFINITY_PENDING - Affinity setting is pending
* IRQD_NO_BALANCING - Balancing disabled for this IRQ
* IRQD_PER_CPU - Interrupt is per cpu
+ * IRQD_AFFINITY_SET - Interrupt affinity was set
*/
enum {
/* Bit 0 - 7 reserved for TYPE will use later */
IRQD_SETAFFINITY_PENDING = (1 << 8),
IRQD_NO_BALANCING = (1 << 10),
IRQD_PER_CPU = (1 << 11),
+ IRQD_AFFINITY_SET = (1 << 12),
};
static inline bool irqd_is_setaffinity_pending(struct irq_data *d)
return !(d->state_use_accessors & (IRQD_PER_CPU | IRQD_NO_BALANCING));
}
+static inline bool irqd_affinity_was_set(struct irq_data *d)
+{
+ return d->state_use_accessors & IRQD_AFFINITY_SET;
+}
+
/**
* struct irq_chip - hardware interrupt chip descriptor
*
{
desc->status &= ~IRQ_MOVE_PENDING;
}
+static inline void irq_compat_set_affinity(struct irq_desc *desc)
+{
+ desc->status |= IRQ_AFFINITY_SET;
+}
+
+static inline void irq_compat_clr_affinity(struct irq_desc *desc)
+{
+ desc->status &= ~IRQ_AFFINITY_SET;
+}
#else
static inline void irq_compat_set_progress(struct irq_desc *desc) { }
static inline void irq_compat_clr_progress(struct irq_desc *desc) { }
static inline void irq_compat_clr_masked(struct irq_desc *desc) { }
static inline void irq_compat_set_move_pending(struct irq_desc *desc) { }
static inline void irq_compat_clr_move_pending(struct irq_desc *desc) { }
+static inline void irq_compat_set_affinity(struct irq_desc *desc) { }
+static inline void irq_compat_clr_affinity(struct irq_desc *desc) { }
#endif
d->state_use_accessors |= mask;
}
+static inline bool irqd_has_set(struct irq_data *d, unsigned int mask)
+{
+ return d->state_use_accessors & mask;
+}
kref_get(&desc->affinity_notify->kref);
schedule_work(&desc->affinity_notify->work);
}
- desc->status |= IRQ_AFFINITY_SET;
+ irq_compat_set_affinity(desc);
+ irqd_set(&desc->irq_data, IRQD_AFFINITY_SET);
raw_spin_unlock_irqrestore(&desc->lock, flags);
return ret;
}
* Preserve an userspace affinity setup, but make sure that
* one of the targets is online.
*/
- if (desc->status & (IRQ_AFFINITY_SET)) {
+ if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
if (cpumask_intersects(desc->irq_data.affinity,
cpu_online_mask))
set = desc->irq_data.affinity;
- else
- desc->status &= ~IRQ_AFFINITY_SET;
+ else {
+ irq_compat_clr_affinity(desc);
+ irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
+ }
}
cpumask_and(mask, cpu_online_mask, set);
#define IRQ_PER_CPU GOT_YOU_MORON
#undef IRQ_NO_BALANCING
#define IRQ_NO_BALANCING GOT_YOU_MORON
+#undef IRQ_AFFINITY_SET
+#define IRQ_AFFINITY_SET GOT_YOU_MORON
#undef IRQF_MODIFY_MASK
#define IRQF_MODIFY_MASK GOT_YOU_MORON