1 /* sched.c - SPU scheduler.
3 * Copyright (C) IBM 2005
4 * Author: Mark Nutter <mnutter@us.ibm.com>
6 * 2006-03-31 NUMA domains added.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 #include <linux/module.h>
26 #include <linux/errno.h>
27 #include <linux/sched.h>
28 #include <linux/kernel.h>
30 #include <linux/completion.h>
31 #include <linux/vmalloc.h>
32 #include <linux/smp.h>
33 #include <linux/stddef.h>
34 #include <linux/unistd.h>
35 #include <linux/numa.h>
36 #include <linux/mutex.h>
37 #include <linux/notifier.h>
40 #include <asm/mmu_context.h>
42 #include <asm/spu_csa.h>
43 #include <asm/spu_priv1.h>
46 #define SPU_TIMESLICE (HZ)
48 struct spu_prio_array {
49 DECLARE_BITMAP(bitmap, MAX_PRIO);
50 struct list_head runq[MAX_PRIO];
52 struct list_head active_list[MAX_NUMNODES];
53 struct mutex active_mutex[MAX_NUMNODES];
56 static struct spu_prio_array *spu_prio;
57 static struct workqueue_struct *spu_sched_wq;
59 static inline int node_allowed(int node)
63 if (!nr_cpus_node(node))
65 mask = node_to_cpumask(node);
66 if (!cpus_intersects(mask, current->cpus_allowed))
71 void spu_start_tick(struct spu_context *ctx)
73 if (ctx->policy == SCHED_RR) {
75 * Make sure the exiting bit is cleared.
77 clear_bit(SPU_SCHED_EXITING, &ctx->sched_flags);
79 queue_delayed_work(spu_sched_wq, &ctx->sched_work, SPU_TIMESLICE);
83 void spu_stop_tick(struct spu_context *ctx)
85 if (ctx->policy == SCHED_RR) {
87 * While the work can be rearming normally setting this flag
88 * makes sure it does not rearm itself anymore.
90 set_bit(SPU_SCHED_EXITING, &ctx->sched_flags);
92 cancel_delayed_work(&ctx->sched_work);
96 void spu_sched_tick(struct work_struct *work)
98 struct spu_context *ctx =
99 container_of(work, struct spu_context, sched_work.work);
104 * If this context is being stopped avoid rescheduling from the
105 * scheduler tick because we would block on the state_mutex.
106 * The caller will yield the spu later on anyway.
108 if (test_bit(SPU_SCHED_EXITING, &ctx->sched_flags))
111 mutex_lock(&ctx->state_mutex);
114 int best = sched_find_first_bit(spu_prio->bitmap);
115 if (best <= ctx->prio) {
120 mutex_unlock(&ctx->state_mutex);
124 * We need to break out of the wait loop in spu_run manually
125 * to ensure this context gets put on the runqueue again
128 wake_up(&ctx->stop_wq);
134 * spu_add_to_active_list - add spu to active list
135 * @spu: spu to add to the active list
137 static void spu_add_to_active_list(struct spu *spu)
139 mutex_lock(&spu_prio->active_mutex[spu->node]);
140 list_add_tail(&spu->list, &spu_prio->active_list[spu->node]);
141 mutex_unlock(&spu_prio->active_mutex[spu->node]);
145 * spu_remove_from_active_list - remove spu from active list
146 * @spu: spu to remove from the active list
148 static void spu_remove_from_active_list(struct spu *spu)
150 int node = spu->node;
152 mutex_lock(&spu_prio->active_mutex[node]);
153 list_del_init(&spu->list);
154 mutex_unlock(&spu_prio->active_mutex[node]);
157 static BLOCKING_NOTIFIER_HEAD(spu_switch_notifier);
159 static void spu_switch_notify(struct spu *spu, struct spu_context *ctx)
161 blocking_notifier_call_chain(&spu_switch_notifier,
162 ctx ? ctx->object_id : 0, spu);
165 int spu_switch_event_register(struct notifier_block * n)
167 return blocking_notifier_chain_register(&spu_switch_notifier, n);
170 int spu_switch_event_unregister(struct notifier_block * n)
172 return blocking_notifier_chain_unregister(&spu_switch_notifier, n);
176 * spu_bind_context - bind spu context to physical spu
177 * @spu: physical spu to bind to
178 * @ctx: context to bind
180 static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
182 pr_debug("%s: pid=%d SPU=%d NODE=%d\n", __FUNCTION__, current->pid,
183 spu->number, spu->node);
187 ctx->ops = &spu_hw_ops;
188 spu->pid = current->pid;
189 spu_associate_mm(spu, ctx->owner);
190 spu->ibox_callback = spufs_ibox_callback;
191 spu->wbox_callback = spufs_wbox_callback;
192 spu->stop_callback = spufs_stop_callback;
193 spu->mfc_callback = spufs_mfc_callback;
194 spu->dma_callback = spufs_dma_callback;
196 spu_unmap_mappings(ctx);
197 spu_restore(&ctx->csa, spu);
198 spu->timestamp = jiffies;
199 spu_cpu_affinity_set(spu, raw_smp_processor_id());
200 spu_switch_notify(spu, ctx);
201 spu_add_to_active_list(spu);
202 ctx->state = SPU_STATE_RUNNABLE;
206 * spu_unbind_context - unbind spu context from physical spu
207 * @spu: physical spu to unbind from
208 * @ctx: context to unbind
210 static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
212 pr_debug("%s: unbind pid=%d SPU=%d NODE=%d\n", __FUNCTION__,
213 spu->pid, spu->number, spu->node);
215 spu_remove_from_active_list(spu);
216 spu_switch_notify(spu, NULL);
217 spu_unmap_mappings(ctx);
218 spu_save(&ctx->csa, spu);
219 spu->timestamp = jiffies;
220 ctx->state = SPU_STATE_SAVED;
221 spu->ibox_callback = NULL;
222 spu->wbox_callback = NULL;
223 spu->stop_callback = NULL;
224 spu->mfc_callback = NULL;
225 spu->dma_callback = NULL;
226 spu_associate_mm(spu, NULL);
228 ctx->ops = &spu_backing_ops;
235 * spu_add_to_rq - add a context to the runqueue
236 * @ctx: context to add
238 static void __spu_add_to_rq(struct spu_context *ctx)
240 int prio = ctx->prio;
242 list_add_tail(&ctx->rq, &spu_prio->runq[prio]);
243 set_bit(prio, spu_prio->bitmap);
246 static void __spu_del_from_rq(struct spu_context *ctx)
248 int prio = ctx->prio;
250 if (!list_empty(&ctx->rq))
251 list_del_init(&ctx->rq);
252 if (list_empty(&spu_prio->runq[prio]))
253 clear_bit(prio, spu_prio->bitmap);
256 static void spu_prio_wait(struct spu_context *ctx)
260 spin_lock(&spu_prio->runq_lock);
261 prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE);
262 if (!signal_pending(current)) {
263 __spu_add_to_rq(ctx);
264 spin_unlock(&spu_prio->runq_lock);
265 mutex_unlock(&ctx->state_mutex);
267 mutex_lock(&ctx->state_mutex);
268 spin_lock(&spu_prio->runq_lock);
269 __spu_del_from_rq(ctx);
271 spin_unlock(&spu_prio->runq_lock);
272 __set_current_state(TASK_RUNNING);
273 remove_wait_queue(&ctx->stop_wq, &wait);
277 * spu_reschedule - try to find a runnable context for a spu
278 * @spu: spu available
280 * This function is called whenever a spu becomes idle. It looks for the
281 * most suitable runnable spu context and schedules it for execution.
283 static void spu_reschedule(struct spu *spu)
289 spin_lock(&spu_prio->runq_lock);
290 best = sched_find_first_bit(spu_prio->bitmap);
291 if (best < MAX_PRIO) {
292 struct list_head *rq = &spu_prio->runq[best];
293 struct spu_context *ctx;
295 BUG_ON(list_empty(rq));
297 ctx = list_entry(rq->next, struct spu_context, rq);
298 __spu_del_from_rq(ctx);
299 wake_up(&ctx->stop_wq);
301 spin_unlock(&spu_prio->runq_lock);
304 static struct spu *spu_get_idle(struct spu_context *ctx)
306 struct spu *spu = NULL;
307 int node = cpu_to_node(raw_smp_processor_id());
310 for (n = 0; n < MAX_NUMNODES; n++, node++) {
311 node = (node < MAX_NUMNODES) ? node : 0;
312 if (!node_allowed(node))
314 spu = spu_alloc_node(node);
322 * find_victim - find a lower priority context to preempt
323 * @ctx: canidate context for running
325 * Returns the freed physical spu to run the new context on.
327 static struct spu *find_victim(struct spu_context *ctx)
329 struct spu_context *victim = NULL;
334 * Look for a possible preemption candidate on the local node first.
335 * If there is no candidate look at the other nodes. This isn't
336 * exactly fair, but so far the whole spu schedule tries to keep
337 * a strong node affinity. We might want to fine-tune this in
341 node = cpu_to_node(raw_smp_processor_id());
342 for (n = 0; n < MAX_NUMNODES; n++, node++) {
343 node = (node < MAX_NUMNODES) ? node : 0;
344 if (!node_allowed(node))
347 mutex_lock(&spu_prio->active_mutex[node]);
348 list_for_each_entry(spu, &spu_prio->active_list[node], list) {
349 struct spu_context *tmp = spu->ctx;
351 if (tmp->rt_priority < ctx->rt_priority &&
352 (!victim || tmp->rt_priority < victim->rt_priority))
355 mutex_unlock(&spu_prio->active_mutex[node]);
359 * This nests ctx->state_mutex, but we always lock
360 * higher priority contexts before lower priority
361 * ones, so this is safe until we introduce
362 * priority inheritance schemes.
364 if (!mutex_trylock(&victim->state_mutex)) {
372 * This race can happen because we've dropped
373 * the active list mutex. No a problem, just
374 * restart the search.
376 mutex_unlock(&victim->state_mutex);
380 spu_unbind_context(spu, victim);
381 mutex_unlock(&victim->state_mutex);
383 * We need to break out of the wait loop in spu_run
384 * manually to ensure this context gets put on the
385 * runqueue again ASAP.
387 wake_up(&victim->stop_wq);
396 * spu_activate - find a free spu for a context and execute it
397 * @ctx: spu context to schedule
398 * @flags: flags (currently ignored)
400 * Tries to find a free spu to run @ctx. If no free spu is available
401 * add the context to the runqueue so it gets woken up once an spu
404 int spu_activate(struct spu_context *ctx, unsigned long flags)
413 spu = spu_get_idle(ctx);
415 * If this is a realtime thread we try to get it running by
416 * preempting a lower priority thread.
418 if (!spu && ctx->rt_priority)
419 spu = find_victim(ctx);
421 spu_bind_context(spu, ctx);
426 } while (!signal_pending(current));
432 * spu_deactivate - unbind a context from it's physical spu
433 * @ctx: spu context to unbind
435 * Unbind @ctx from the physical spu it is running on and schedule
436 * the highest priority context to run on the freed physical spu.
438 void spu_deactivate(struct spu_context *ctx)
440 struct spu *spu = ctx->spu;
443 spu_unbind_context(spu, ctx);
449 * spu_yield - yield a physical spu if others are waiting
450 * @ctx: spu context to yield
452 * Check if there is a higher priority context waiting and if yes
453 * unbind @ctx from the physical spu and schedule the highest
454 * priority context to run on the freed physical spu instead.
456 void spu_yield(struct spu_context *ctx)
460 if (mutex_trylock(&ctx->state_mutex)) {
461 if ((spu = ctx->spu) != NULL) {
462 int best = sched_find_first_bit(spu_prio->bitmap);
463 if (best < MAX_PRIO) {
464 pr_debug("%s: yielding SPU %d NODE %d\n",
465 __FUNCTION__, spu->number, spu->node);
469 mutex_unlock(&ctx->state_mutex);
473 int __init spu_sched_init(void)
477 spu_sched_wq = create_singlethread_workqueue("spusched");
481 spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL);
483 printk(KERN_WARNING "%s: Unable to allocate priority queue.\n",
485 destroy_workqueue(spu_sched_wq);
488 for (i = 0; i < MAX_PRIO; i++) {
489 INIT_LIST_HEAD(&spu_prio->runq[i]);
490 __clear_bit(i, spu_prio->bitmap);
492 __set_bit(MAX_PRIO, spu_prio->bitmap);
493 for (i = 0; i < MAX_NUMNODES; i++) {
494 mutex_init(&spu_prio->active_mutex[i]);
495 INIT_LIST_HEAD(&spu_prio->active_list[i]);
497 spin_lock_init(&spu_prio->runq_lock);
501 void __exit spu_sched_exit(void)
503 struct spu *spu, *tmp;
506 for (node = 0; node < MAX_NUMNODES; node++) {
507 mutex_lock(&spu_prio->active_mutex[node]);
508 list_for_each_entry_safe(spu, tmp, &spu_prio->active_list[node],
510 list_del_init(&spu->list);
513 mutex_unlock(&spu_prio->active_mutex[node]);
516 destroy_workqueue(spu_sched_wq);