# define might_resched() do { } while (0)
#endif
- #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
+ #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
void __might_sleep(const char *file, int line, int preempt_offset);
/**
* might_sleep - annotation for functions that can sleep
#ifdef __CHECKER__
#define BUILD_BUG_ON_NOT_POWER_OF_2(n)
-#define BUILD_BUG_ON_ZERO(e)
-#define BUILD_BUG_ON_NULL(e)
+#define BUILD_BUG_ON_ZERO(e) (0)
+#define BUILD_BUG_ON_NULL(e) ((void*)0)
#define BUILD_BUG_ON(condition)
#else /* __CHECKER__ */
#ifdef CONFIG_TRACING
/* state flags for use by tracers */
unsigned long trace;
- /* bitmask of trace recursion */
+ /* bitmask and counter of trace recursion */
unsigned long trace_recursion;
#endif /* CONFIG_TRACING */
#ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */
if (unlikely(atomic_dec_and_test(&mm->mm_count)))
__mmdrop(mm);
}
-extern int mm_init_cpumask(struct mm_struct *mm, struct mm_struct *oldmm);
/* mmput gets rid of the mappings and all user-space */
extern void mmput(struct mm_struct *);
extern int __cond_resched_lock(spinlock_t *lock);
- #ifdef CONFIG_PREEMPT
+ #ifdef CONFIG_PREEMPT_COUNT
#define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET
#else
#define PREEMPT_LOCK_OFFSET 0
/*
* Return the group to which this tasks belongs.
*
- * We use task_subsys_state_check() and extend the RCU verification
- * with lockdep_is_held(&p->pi_lock) because cpu_cgroup_attach()
- * holds that lock for each task it moves into the cgroup. Therefore
- * by holding that lock, we pin the task to the current cgroup.
+ * We use task_subsys_state_check() and extend the RCU verification with
+ * pi->lock and rq->lock because cpu_cgroup_attach() holds those locks for each
+ * task it moves into the cgroup. Therefore by holding either of those locks,
+ * we pin the task to the current cgroup.
*/
static inline struct task_group *task_group(struct task_struct *p)
{
struct cgroup_subsys_state *css;
css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
- lockdep_is_held(&p->pi_lock));
+ lockdep_is_held(&p->pi_lock) ||
+ lockdep_is_held(&task_rq(p)->lock));
tg = container_of(css, struct task_group, css);
return autogroup_task_group(p, tg);
!(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));
#ifdef CONFIG_LOCKDEP
+ /*
+ * The caller should hold either p->pi_lock or rq->lock, when changing
+ * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
+ *
+ * sched_move_task() holds both and thus holding either pins the cgroup,
+ * see set_task_rq().
+ *
+ * Furthermore, all task_rq users should acquire both locks, see
+ * task_rq_lock().
+ */
WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
lockdep_is_held(&task_rq(p)->lock)));
#endif
#if defined(CONFIG_SMP)
p->on_cpu = 0;
#endif
- #ifdef CONFIG_PREEMPT
+ #ifdef CONFIG_PREEMPT_COUNT
/* Want to start with kernel preemption disabled. */
task_thread_info(p)->preempt_count = 1;
#endif
static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
{
- bool ret = false;
-
- rcu_read_lock();
if (lock->owner != owner)
- goto fail;
+ return false;
/*
* Ensure we emit the owner->on_cpu, dereference _after_ checking
*/
barrier();
- ret = owner->on_cpu;
-fail:
- rcu_read_unlock();
-
- return ret;
+ return owner->on_cpu;
}
/*
if (!sched_feat(OWNER_SPIN))
return 0;
+ rcu_read_lock();
while (owner_running(lock, owner)) {
if (need_resched())
- return 0;
+ break;
arch_mutex_cpu_relax();
}
+ rcu_read_unlock();
/*
- * If the owner changed to another task there is likely
- * heavy contention, stop spinning.
+ * We break out the loop above on need_resched() and when the
+ * owner changed, which is a sign for heavy contention. Return
+ * success only when lock->owner is NULL.
*/
- if (lock->owner)
- return 0;
-
- return 1;
+ return lock->owner == NULL;
}
#endif
scheduler_running = 1;
}
- #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
+ #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
static inline int preempt_count_equals(int preempt_offset)
{
int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
void __might_sleep(const char *file, int line, int preempt_offset)
{
- #ifdef in_atomic
static unsigned long prev_jiffy; /* ratelimiting */
if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
if (irqs_disabled())
print_irqtrace_events(current);
dump_stack();
- #endif
}
EXPORT_SYMBOL(__might_sleep);
#endif
Enables hooks to interrupt enabling and disabling for
either tracing or lock debugging.
- config DEBUG_SPINLOCK_SLEEP
- bool "Spinlock debugging: sleep-inside-spinlock checking"
+ config DEBUG_ATOMIC_SLEEP
+ bool "Sleep inside atomic section checking"
+ select PREEMPT_COUNT
depends on DEBUG_KERNEL
help
If you say Y here, various routines which may sleep will become very
- noisy if they are called with a spinlock held.
+ noisy if they are called inside atomic sections: when a spinlock is
+ held, inside an rcu read side critical section, inside preempt disabled
+ sections, inside an interrupt, etc...
config DEBUG_LOCKING_API_SELFTESTS
bool "Locking API boot-time self-tests"
bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EXPERT
depends on BUG
depends on ARM || AVR32 || M32R || M68K || SPARC32 || SPARC64 || \
- FRV || SUPERH || GENERIC_BUG || BLACKFIN || MN10300
+ FRV || SUPERH || GENERIC_BUG || BLACKFIN || MN10300 || TILE
default y
help
Say Y here to make BUG() panics output the file name and line number