2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/kthread.h>
25 #include <linux/wait.h>
26 #include <linux/sched.h>
28 #include "gpu_scheduler.h"
30 #define CREATE_TRACE_POINTS
31 #include "gpu_sched_trace.h"
33 static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
34 static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
35 static void amd_sched_process_job(struct fence *f, struct fence_cb *cb);
37 struct kmem_cache *sched_fence_slab;
38 atomic_t sched_fence_slab_ref = ATOMIC_INIT(0);
40 /* Initialize a given run queue struct */
41 static void amd_sched_rq_init(struct amd_sched_rq *rq)
43 spin_lock_init(&rq->lock);
44 INIT_LIST_HEAD(&rq->entities);
45 rq->current_entity = NULL;
48 static void amd_sched_rq_add_entity(struct amd_sched_rq *rq,
49 struct amd_sched_entity *entity)
51 if (!list_empty(&entity->list))
54 list_add_tail(&entity->list, &rq->entities);
55 spin_unlock(&rq->lock);
58 static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
59 struct amd_sched_entity *entity)
61 if (list_empty(&entity->list))
64 list_del_init(&entity->list);
65 if (rq->current_entity == entity)
66 rq->current_entity = NULL;
67 spin_unlock(&rq->lock);
71 * Select an entity which could provide a job to run
73 * @rq The run queue to check.
75 * Try to find a ready entity, returns NULL if none found.
77 static struct amd_sched_entity *
78 amd_sched_rq_select_entity(struct amd_sched_rq *rq)
80 struct amd_sched_entity *entity;
84 entity = rq->current_entity;
86 list_for_each_entry_continue(entity, &rq->entities, list) {
87 if (amd_sched_entity_is_ready(entity)) {
88 rq->current_entity = entity;
89 spin_unlock(&rq->lock);
95 list_for_each_entry(entity, &rq->entities, list) {
97 if (amd_sched_entity_is_ready(entity)) {
98 rq->current_entity = entity;
99 spin_unlock(&rq->lock);
103 if (entity == rq->current_entity)
107 spin_unlock(&rq->lock);
113 * Init a context entity used by scheduler when submit to HW ring.
115 * @sched The pointer to the scheduler
116 * @entity The pointer to a valid amd_sched_entity
117 * @rq The run queue this entity belongs
118 * @kernel If this is an entity for the kernel
119 * @jobs The max number of jobs in the job queue
121 * return 0 if succeed. negative error code on failure
123 int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
124 struct amd_sched_entity *entity,
125 struct amd_sched_rq *rq,
130 if (!(sched && entity && rq))
133 memset(entity, 0, sizeof(struct amd_sched_entity));
134 INIT_LIST_HEAD(&entity->list);
136 entity->sched = sched;
138 spin_lock_init(&entity->queue_lock);
139 r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL);
143 atomic_set(&entity->fence_seq, 0);
144 entity->fence_context = fence_context_alloc(2);
150 * Query if entity is initialized
152 * @sched Pointer to scheduler instance
153 * @entity The pointer to a valid scheduler entity
155 * return true if entity is initialized, false otherwise
157 static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched,
158 struct amd_sched_entity *entity)
160 return entity->sched == sched &&
165 * Check if entity is idle
167 * @entity The pointer to a valid scheduler entity
169 * Return true if entity don't has any unscheduled jobs.
171 static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
174 if (kfifo_is_empty(&entity->job_queue))
181 * Check if entity is ready
183 * @entity The pointer to a valid scheduler entity
185 * Return true if entity could provide a job.
187 static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity)
189 if (kfifo_is_empty(&entity->job_queue))
192 if (ACCESS_ONCE(entity->dependency))
199 * Destroy a context entity
201 * @sched Pointer to scheduler instance
202 * @entity The pointer to a valid scheduler entity
204 * Cleanup and free the allocated resources.
206 void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
207 struct amd_sched_entity *entity)
209 struct amd_sched_rq *rq = entity->rq;
211 if (!amd_sched_entity_is_initialized(sched, entity))
215 * The client will not queue more IBs during this fini, consume existing
218 wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity));
220 amd_sched_rq_remove_entity(rq, entity);
221 kfifo_free(&entity->job_queue);
224 static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb)
226 struct amd_sched_entity *entity =
227 container_of(cb, struct amd_sched_entity, cb);
228 entity->dependency = NULL;
230 amd_sched_wakeup(entity->sched);
233 static void amd_sched_entity_clear_dep(struct fence *f, struct fence_cb *cb)
235 struct amd_sched_entity *entity =
236 container_of(cb, struct amd_sched_entity, cb);
237 entity->dependency = NULL;
241 static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
243 struct amd_gpu_scheduler *sched = entity->sched;
244 struct fence * fence = entity->dependency;
245 struct amd_sched_fence *s_fence;
247 if (fence->context == entity->fence_context) {
248 /* We can ignore fences from ourself */
249 fence_put(entity->dependency);
253 s_fence = to_amd_sched_fence(fence);
254 if (s_fence && s_fence->sched == sched) {
257 * Fence is from the same scheduler, only need to wait for
260 fence = fence_get(&s_fence->scheduled);
261 fence_put(entity->dependency);
262 entity->dependency = fence;
263 if (!fence_add_callback(fence, &entity->cb,
264 amd_sched_entity_clear_dep))
267 /* Ignore it when it is already scheduled */
272 if (!fence_add_callback(entity->dependency, &entity->cb,
273 amd_sched_entity_wakeup))
276 fence_put(entity->dependency);
280 static struct amd_sched_job *
281 amd_sched_entity_pop_job(struct amd_sched_entity *entity)
283 struct amd_gpu_scheduler *sched = entity->sched;
284 struct amd_sched_job *sched_job;
286 if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job)))
289 while ((entity->dependency = sched->ops->dependency(sched_job)))
290 if (amd_sched_entity_add_dependency_cb(entity))
297 * Helper to submit a job to the job queue
299 * @sched_job The pointer to job required to submit
301 * Returns true if we could submit the job.
303 static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
305 struct amd_gpu_scheduler *sched = sched_job->sched;
306 struct amd_sched_entity *entity = sched_job->s_entity;
307 bool added, first = false;
309 spin_lock(&entity->queue_lock);
310 added = kfifo_in(&entity->job_queue, &sched_job,
311 sizeof(sched_job)) == sizeof(sched_job);
313 if (added && kfifo_len(&entity->job_queue) == sizeof(sched_job))
316 spin_unlock(&entity->queue_lock);
318 /* first job wakes up scheduler */
320 /* Add the entity to the run queue */
321 amd_sched_rq_add_entity(entity->rq, entity);
322 amd_sched_wakeup(sched);
327 /* job_finish is called after hw fence signaled, and
328 * the job had already been deleted from ring_mirror_list
330 static void amd_sched_job_finish(struct work_struct *work)
332 struct amd_sched_job *s_job = container_of(work, struct amd_sched_job,
334 struct amd_gpu_scheduler *sched = s_job->sched;
336 /* remove job from ring_mirror_list */
337 spin_lock(&sched->job_list_lock);
338 list_del_init(&s_job->node);
339 if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
340 struct amd_sched_job *next;
342 spin_unlock(&sched->job_list_lock);
343 cancel_delayed_work_sync(&s_job->work_tdr);
344 spin_lock(&sched->job_list_lock);
346 /* queue TDR for next job */
347 next = list_first_entry_or_null(&sched->ring_mirror_list,
348 struct amd_sched_job, node);
351 schedule_delayed_work(&next->work_tdr, sched->timeout);
353 spin_unlock(&sched->job_list_lock);
354 sched->ops->free_job(s_job);
357 static void amd_sched_job_finish_cb(struct fence *f, struct fence_cb *cb)
359 struct amd_sched_job *job = container_of(cb, struct amd_sched_job,
361 schedule_work(&job->finish_work);
364 static void amd_sched_job_begin(struct amd_sched_job *s_job)
366 struct amd_gpu_scheduler *sched = s_job->sched;
368 spin_lock(&sched->job_list_lock);
369 list_add_tail(&s_job->node, &sched->ring_mirror_list);
370 if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
371 list_first_entry_or_null(&sched->ring_mirror_list,
372 struct amd_sched_job, node) == s_job)
373 schedule_delayed_work(&s_job->work_tdr, sched->timeout);
374 spin_unlock(&sched->job_list_lock);
377 static void amd_sched_job_timedout(struct work_struct *work)
379 struct amd_sched_job *job = container_of(work, struct amd_sched_job,
382 job->sched->ops->timedout_job(job);
385 void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched)
387 struct amd_sched_job *s_job;
389 spin_lock(&sched->job_list_lock);
390 list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
391 if (fence_remove_callback(s_job->s_fence->parent, &s_job->s_fence->cb)) {
392 fence_put(s_job->s_fence->parent);
393 s_job->s_fence->parent = NULL;
396 atomic_set(&sched->hw_rq_count, 0);
397 spin_unlock(&sched->job_list_lock);
400 void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
402 struct amd_sched_job *s_job;
405 spin_lock(&sched->job_list_lock);
406 s_job = list_first_entry_or_null(&sched->ring_mirror_list,
407 struct amd_sched_job, node);
409 schedule_delayed_work(&s_job->work_tdr, sched->timeout);
411 list_for_each_entry(s_job, &sched->ring_mirror_list, node) {
412 struct amd_sched_fence *s_fence = s_job->s_fence;
413 struct fence *fence = sched->ops->run_job(s_job);
415 atomic_inc(&sched->hw_rq_count);
417 s_fence->parent = fence_get(fence);
418 r = fence_add_callback(fence, &s_fence->cb,
419 amd_sched_process_job);
421 amd_sched_process_job(fence, &s_fence->cb);
423 DRM_ERROR("fence add callback failed (%d)\n",
427 DRM_ERROR("Failed to run job!\n");
428 amd_sched_process_job(NULL, &s_fence->cb);
431 spin_unlock(&sched->job_list_lock);
435 * Submit a job to the job queue
437 * @sched_job The pointer to job required to submit
439 * Returns 0 for success, negative error code otherwise.
441 void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
443 struct amd_sched_entity *entity = sched_job->s_entity;
445 trace_amd_sched_job(sched_job);
446 fence_add_callback(&sched_job->s_fence->finished, &sched_job->finish_cb,
447 amd_sched_job_finish_cb);
448 wait_event(entity->sched->job_scheduled,
449 amd_sched_entity_in(sched_job));
452 /* init a sched_job with basic field */
453 int amd_sched_job_init(struct amd_sched_job *job,
454 struct amd_gpu_scheduler *sched,
455 struct amd_sched_entity *entity,
459 job->s_entity = entity;
460 job->s_fence = amd_sched_fence_create(entity, owner);
464 INIT_WORK(&job->finish_work, amd_sched_job_finish);
465 INIT_LIST_HEAD(&job->node);
466 INIT_DELAYED_WORK(&job->work_tdr, amd_sched_job_timedout);
472 * Return ture if we can push more jobs to the hw.
474 static bool amd_sched_ready(struct amd_gpu_scheduler *sched)
476 return atomic_read(&sched->hw_rq_count) <
477 sched->hw_submission_limit;
481 * Wake up the scheduler when it is ready
483 static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
485 if (amd_sched_ready(sched))
486 wake_up_interruptible(&sched->wake_up_worker);
490 * Select next entity to process
492 static struct amd_sched_entity *
493 amd_sched_select_entity(struct amd_gpu_scheduler *sched)
495 struct amd_sched_entity *entity;
498 if (!amd_sched_ready(sched))
501 /* Kernel run queue has higher priority than normal run queue*/
502 for (i = 0; i < AMD_SCHED_MAX_PRIORITY; i++) {
503 entity = amd_sched_rq_select_entity(&sched->sched_rq[i]);
511 static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
513 struct amd_sched_fence *s_fence =
514 container_of(cb, struct amd_sched_fence, cb);
515 struct amd_gpu_scheduler *sched = s_fence->sched;
517 atomic_dec(&sched->hw_rq_count);
518 amd_sched_fence_finished(s_fence);
520 trace_amd_sched_process_job(s_fence);
521 fence_put(&s_fence->finished);
522 wake_up_interruptible(&sched->wake_up_worker);
525 static bool amd_sched_blocked(struct amd_gpu_scheduler *sched)
527 if (kthread_should_park()) {
535 static int amd_sched_main(void *param)
537 struct sched_param sparam = {.sched_priority = 1};
538 struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
541 sched_setscheduler(current, SCHED_FIFO, &sparam);
543 while (!kthread_should_stop()) {
544 struct amd_sched_entity *entity = NULL;
545 struct amd_sched_fence *s_fence;
546 struct amd_sched_job *sched_job;
549 wait_event_interruptible(sched->wake_up_worker,
550 (!amd_sched_blocked(sched) &&
551 (entity = amd_sched_select_entity(sched))) ||
552 kthread_should_stop());
557 sched_job = amd_sched_entity_pop_job(entity);
561 s_fence = sched_job->s_fence;
563 atomic_inc(&sched->hw_rq_count);
564 amd_sched_job_begin(sched_job);
566 fence = sched->ops->run_job(sched_job);
567 amd_sched_fence_scheduled(s_fence);
569 s_fence->parent = fence_get(fence);
570 r = fence_add_callback(fence, &s_fence->cb,
571 amd_sched_process_job);
573 amd_sched_process_job(fence, &s_fence->cb);
575 DRM_ERROR("fence add callback failed (%d)\n",
579 DRM_ERROR("Failed to run job!\n");
580 amd_sched_process_job(NULL, &s_fence->cb);
583 count = kfifo_out(&entity->job_queue, &sched_job,
585 WARN_ON(count != sizeof(sched_job));
586 wake_up(&sched->job_scheduled);
592 * Init a gpu scheduler instance
594 * @sched The pointer to the scheduler
595 * @ops The backend operations for this scheduler.
596 * @hw_submissions Number of hw submissions to do.
597 * @name Name used for debugging
599 * Return 0 on success, otherwise error code.
601 int amd_sched_init(struct amd_gpu_scheduler *sched,
602 const struct amd_sched_backend_ops *ops,
603 unsigned hw_submission, long timeout, const char *name)
607 sched->hw_submission_limit = hw_submission;
609 sched->timeout = timeout;
610 for (i = 0; i < AMD_SCHED_MAX_PRIORITY; i++)
611 amd_sched_rq_init(&sched->sched_rq[i]);
613 init_waitqueue_head(&sched->wake_up_worker);
614 init_waitqueue_head(&sched->job_scheduled);
615 INIT_LIST_HEAD(&sched->ring_mirror_list);
616 spin_lock_init(&sched->job_list_lock);
617 atomic_set(&sched->hw_rq_count, 0);
618 if (atomic_inc_return(&sched_fence_slab_ref) == 1) {
619 sched_fence_slab = kmem_cache_create(
620 "amd_sched_fence", sizeof(struct amd_sched_fence), 0,
621 SLAB_HWCACHE_ALIGN, NULL);
622 if (!sched_fence_slab)
626 /* Each scheduler will run on a seperate kernel thread */
627 sched->thread = kthread_run(amd_sched_main, sched, sched->name);
628 if (IS_ERR(sched->thread)) {
629 DRM_ERROR("Failed to create scheduler for %s.\n", name);
630 return PTR_ERR(sched->thread);
637 * Destroy a gpu scheduler
639 * @sched The pointer to the scheduler
641 void amd_sched_fini(struct amd_gpu_scheduler *sched)
644 kthread_stop(sched->thread);
645 if (atomic_dec_and_test(&sched_fence_slab_ref))
646 kmem_cache_destroy(sched_fence_slab);