2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/kthread.h>
25 #include <linux/wait.h>
26 #include <linux/sched.h>
28 #include "gpu_scheduler.h"
30 #define CREATE_TRACE_POINTS
31 #include "gpu_sched_trace.h"
33 static struct amd_sched_job *
34 amd_sched_entity_pop_job(struct amd_sched_entity *entity);
35 static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
37 /* Initialize a given run queue struct */
38 static void amd_sched_rq_init(struct amd_sched_rq *rq)
40 spin_lock_init(&rq->lock);
41 INIT_LIST_HEAD(&rq->entities);
42 rq->current_entity = NULL;
45 static void amd_sched_rq_add_entity(struct amd_sched_rq *rq,
46 struct amd_sched_entity *entity)
49 list_add_tail(&entity->list, &rq->entities);
50 spin_unlock(&rq->lock);
53 static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
54 struct amd_sched_entity *entity)
57 list_del_init(&entity->list);
58 if (rq->current_entity == entity)
59 rq->current_entity = NULL;
60 spin_unlock(&rq->lock);
64 * Select next job from a specified run queue with round robin policy.
65 * Return NULL if nothing available.
67 static struct amd_sched_job *
68 amd_sched_rq_select_job(struct amd_sched_rq *rq)
70 struct amd_sched_entity *entity;
71 struct amd_sched_job *sched_job;
75 entity = rq->current_entity;
77 list_for_each_entry_continue(entity, &rq->entities, list) {
78 sched_job = amd_sched_entity_pop_job(entity);
80 rq->current_entity = entity;
81 spin_unlock(&rq->lock);
87 list_for_each_entry(entity, &rq->entities, list) {
89 sched_job = amd_sched_entity_pop_job(entity);
91 rq->current_entity = entity;
92 spin_unlock(&rq->lock);
96 if (entity == rq->current_entity)
100 spin_unlock(&rq->lock);
106 * Init a context entity used by scheduler when submit to HW ring.
108 * @sched The pointer to the scheduler
109 * @entity The pointer to a valid amd_sched_entity
110 * @rq The run queue this entity belongs
111 * @kernel If this is an entity for the kernel
112 * @jobs The max number of jobs in the job queue
114 * return 0 if succeed. negative error code on failure
116 int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
117 struct amd_sched_entity *entity,
118 struct amd_sched_rq *rq,
123 if (!(sched && entity && rq))
126 memset(entity, 0, sizeof(struct amd_sched_entity));
127 INIT_LIST_HEAD(&entity->list);
129 entity->sched = sched;
131 spin_lock_init(&entity->queue_lock);
132 r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL);
136 atomic_set(&entity->fence_seq, 0);
137 entity->fence_context = fence_context_alloc(1);
139 /* Add the entity to the run queue */
140 amd_sched_rq_add_entity(rq, entity);
146 * Query if entity is initialized
148 * @sched Pointer to scheduler instance
149 * @entity The pointer to a valid scheduler entity
151 * return true if entity is initialized, false otherwise
153 static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched,
154 struct amd_sched_entity *entity)
156 return entity->sched == sched &&
161 * Check if entity is idle
163 * @entity The pointer to a valid scheduler entity
165 * Return true if entity don't has any unscheduled jobs.
167 static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
170 if (kfifo_is_empty(&entity->job_queue))
177 * Destroy a context entity
179 * @sched Pointer to scheduler instance
180 * @entity The pointer to a valid scheduler entity
182 * Cleanup and free the allocated resources.
184 void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
185 struct amd_sched_entity *entity)
187 struct amd_sched_rq *rq = entity->rq;
189 if (!amd_sched_entity_is_initialized(sched, entity))
193 * The client will not queue more IBs during this fini, consume existing
196 wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity));
198 amd_sched_rq_remove_entity(rq, entity);
199 kfifo_free(&entity->job_queue);
202 static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb)
204 struct amd_sched_entity *entity =
205 container_of(cb, struct amd_sched_entity, cb);
206 entity->dependency = NULL;
208 amd_sched_wakeup(entity->sched);
211 static struct amd_sched_job *
212 amd_sched_entity_pop_job(struct amd_sched_entity *entity)
214 struct amd_gpu_scheduler *sched = entity->sched;
215 struct amd_sched_job *sched_job;
217 if (ACCESS_ONCE(entity->dependency))
220 if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job)))
223 while ((entity->dependency = sched->ops->dependency(sched_job))) {
225 if (fence_add_callback(entity->dependency, &entity->cb,
226 amd_sched_entity_wakeup))
227 fence_put(entity->dependency);
236 * Helper to submit a job to the job queue
238 * @sched_job The pointer to job required to submit
240 * Returns true if we could submit the job.
242 static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
244 struct amd_sched_entity *entity = sched_job->s_entity;
245 bool added, first = false;
247 spin_lock(&entity->queue_lock);
248 added = kfifo_in(&entity->job_queue, &sched_job,
249 sizeof(sched_job)) == sizeof(sched_job);
251 if (added && kfifo_len(&entity->job_queue) == sizeof(sched_job))
254 spin_unlock(&entity->queue_lock);
256 /* first job wakes up scheduler */
258 amd_sched_wakeup(sched_job->sched);
264 * Submit a job to the job queue
266 * @sched_job The pointer to job required to submit
268 * Returns 0 for success, negative error code otherwise.
270 int amd_sched_entity_push_job(struct amd_sched_job *sched_job)
272 struct amd_sched_entity *entity = sched_job->s_entity;
273 struct amd_sched_fence *fence = amd_sched_fence_create(
274 entity, sched_job->owner);
279 fence_get(&fence->base);
280 sched_job->s_fence = fence;
282 wait_event(entity->sched->job_scheduled,
283 amd_sched_entity_in(sched_job));
284 trace_amd_sched_job(sched_job);
289 * Return ture if we can push more jobs to the hw.
291 static bool amd_sched_ready(struct amd_gpu_scheduler *sched)
293 return atomic_read(&sched->hw_rq_count) <
294 sched->hw_submission_limit;
298 * Wake up the scheduler when it is ready
300 static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
302 if (amd_sched_ready(sched))
303 wake_up_interruptible(&sched->wake_up_worker);
309 static struct amd_sched_job *
310 amd_sched_select_job(struct amd_gpu_scheduler *sched)
312 struct amd_sched_job *sched_job;
314 if (!amd_sched_ready(sched))
317 /* Kernel run queue has higher priority than normal run queue*/
318 sched_job = amd_sched_rq_select_job(&sched->kernel_rq);
319 if (sched_job == NULL)
320 sched_job = amd_sched_rq_select_job(&sched->sched_rq);
325 static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
327 struct amd_sched_fence *s_fence =
328 container_of(cb, struct amd_sched_fence, cb);
329 struct amd_gpu_scheduler *sched = s_fence->sched;
331 atomic_dec(&sched->hw_rq_count);
332 amd_sched_fence_signal(s_fence);
333 fence_put(&s_fence->base);
334 wake_up_interruptible(&sched->wake_up_worker);
337 static int amd_sched_main(void *param)
339 struct sched_param sparam = {.sched_priority = 1};
340 struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
343 sched_setscheduler(current, SCHED_FIFO, &sparam);
345 while (!kthread_should_stop()) {
346 struct amd_sched_entity *entity;
347 struct amd_sched_fence *s_fence;
348 struct amd_sched_job *sched_job;
351 wait_event_interruptible(sched->wake_up_worker,
352 kthread_should_stop() ||
353 (sched_job = amd_sched_select_job(sched)));
358 entity = sched_job->s_entity;
359 s_fence = sched_job->s_fence;
360 atomic_inc(&sched->hw_rq_count);
361 fence = sched->ops->run_job(sched_job);
363 r = fence_add_callback(fence, &s_fence->cb,
364 amd_sched_process_job);
366 amd_sched_process_job(fence, &s_fence->cb);
368 DRM_ERROR("fence add callback failed (%d)\n", r);
371 DRM_ERROR("Failed to run job!\n");
372 amd_sched_process_job(NULL, &s_fence->cb);
375 count = kfifo_out(&entity->job_queue, &sched_job,
377 WARN_ON(count != sizeof(sched_job));
378 wake_up(&sched->job_scheduled);
384 * Init a gpu scheduler instance
386 * @sched The pointer to the scheduler
387 * @ops The backend operations for this scheduler.
388 * @hw_submissions Number of hw submissions to do.
389 * @name Name used for debugging
391 * Return 0 on success, otherwise error code.
393 int amd_sched_init(struct amd_gpu_scheduler *sched,
394 struct amd_sched_backend_ops *ops,
395 unsigned hw_submission, const char *name)
398 sched->hw_submission_limit = hw_submission;
400 amd_sched_rq_init(&sched->sched_rq);
401 amd_sched_rq_init(&sched->kernel_rq);
403 init_waitqueue_head(&sched->wake_up_worker);
404 init_waitqueue_head(&sched->job_scheduled);
405 atomic_set(&sched->hw_rq_count, 0);
407 /* Each scheduler will run on a seperate kernel thread */
408 sched->thread = kthread_run(amd_sched_main, sched, sched->name);
409 if (IS_ERR(sched->thread)) {
410 DRM_ERROR("Failed to create scheduler for %s.\n", name);
411 return PTR_ERR(sched->thread);
418 * Destroy a gpu scheduler
420 * @sched The pointer to the scheduler
422 void amd_sched_fini(struct amd_gpu_scheduler *sched)
424 kthread_stop(sched->thread);