return -EINVAL;
mask = 0xff << shift;
- bit = 1 << (cpu + shift);
+ bit = 1 << (cpu_logical_map(cpu) + shift);
- spin_lock(&irq_controller_lock);
+ raw_spin_lock(&irq_controller_lock);
val = readl_relaxed(reg) & ~mask;
writel_relaxed(val | bit, reg);
- spin_unlock(&irq_controller_lock);
+ raw_spin_unlock(&irq_controller_lock);
return IRQ_SET_MASK_OK;
}
oops_enter();
- spin_lock_irq(&die_lock);
+ raw_spin_lock_irq(&die_lock);
console_verbose();
bust_spinlocks(1);
+ if (!user_mode(regs))
+ report_bug(regs->ARM_pc, regs);
ret = __die(str, err, thread, regs);
if (regs && kexec_should_crash(thread->task))
}
}
+#ifdef CONFIG_GENERIC_BUG
+
+int is_valid_bugaddr(unsigned long pc)
+{
+#ifdef CONFIG_THUMB2_KERNEL
+ unsigned short bkpt;
+#else
+ unsigned long bkpt;
+#endif
+
+ if (probe_kernel_address((unsigned *)pc, bkpt))
+ return 0;
+
+ return bkpt == BUG_INSTR_VALUE;
+}
+
+#endif
+
static LIST_HEAD(undef_hook);
- static DEFINE_SPINLOCK(undef_lock);
+ static DEFINE_RAW_SPINLOCK(undef_lock);
void register_undef_hook(struct undef_hook *hook)
{
obj-$(CONFIG_IOMMU_API) += iommu.o
obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o
obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
- obj-$(CONFIG_DMAR) += dmar.o iova.o intel-iommu.o
- obj-$(CONFIG_INTR_REMAP) += dmar.o intr_remapping.o
+ obj-$(CONFIG_DMAR_TABLE) += dmar.o
+ obj-$(CONFIG_INTEL_IOMMU) += iova.o intel-iommu.o
+ obj-$(CONFIG_IRQ_REMAP) += intr_remapping.o
+obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o
+obj-$(CONFIG_OMAP_IOVMM) += omap-iovmm.o
+obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o
return ACCESS_ONCE(head->first) == NULL;
}
- void llist_add(struct llist_node *new, struct llist_head *head);
- void llist_add_batch(struct llist_node *new_first, struct llist_node *new_last,
- struct llist_head *head);
- struct llist_node *llist_del_first(struct llist_head *head);
- struct llist_node *llist_del_all(struct llist_head *head);
+ static inline struct llist_node *llist_next(struct llist_node *node)
+ {
+ return node->next;
+ }
+
+ /**
+ * llist_add - add a new entry
+ * @new: new entry to be added
+ * @head: the head for your lock-less list
+ *
+ * Return whether list is empty before adding.
+ */
+ static inline bool llist_add(struct llist_node *new, struct llist_head *head)
+ {
+ struct llist_node *entry, *old_entry;
+
+ entry = head->first;
+ for (;;) {
+ old_entry = entry;
+ new->next = entry;
+ entry = cmpxchg(&head->first, old_entry, new);
+ if (entry == old_entry)
+ break;
+ }
+
+ return old_entry == NULL;
+ }
+
+ /**
+ * llist_del_all - delete all entries from lock-less list
+ * @head: the head of lock-less list to delete all entries
+ *
+ * If list is empty, return NULL, otherwise, delete all entries and
+ * return the pointer to the first entry. The order of entries
+ * deleted is from the newest to the oldest added one.
+ */
+ static inline struct llist_node *llist_del_all(struct llist_head *head)
+ {
+ return xchg(&head->first, NULL);
+ }
++
++extern bool llist_add_batch(struct llist_node *new_first,
++ struct llist_node *new_last,
++ struct llist_head *head);
++extern struct llist_node *llist_del_first(struct llist_head *head);
++
#endif /* LLIST_H */