struct amd_sched_entity *entity,
void *owner, struct fence **fence)
{
+ INIT_LIST_HEAD(&job->node);
job->sched = sched;
job->s_entity = entity;
job->s_fence = amd_sched_fence_create(entity, owner);
if (!job->s_fence)
return -ENOMEM;
+ job->s_fence->s_job = job;
+
if (fence)
*fence = &job->s_fence->base;
return 0;
unsigned long flags;
atomic_dec(&sched->hw_rq_count);
+
+ /* remove job from ring_mirror_list */
+ spin_lock_irqsave(&sched->job_list_lock, flags);
+ list_del_init(&s_fence->s_job->node);
+ spin_unlock_irqrestore(&sched->job_list_lock, flags);
+
amd_sched_fence_signal(s_fence);
if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
cancel_delayed_work(&s_fence->dwork);
}
atomic_inc(&sched->hw_rq_count);
+ amd_sched_job_pre_schedule(sched, sched_job);
fence = sched->ops->run_job(sched_job);
amd_sched_fence_scheduled(s_fence);
if (fence) {
init_waitqueue_head(&sched->wake_up_worker);
init_waitqueue_head(&sched->job_scheduled);
+ INIT_LIST_HEAD(&sched->ring_mirror_list);
+ spin_lock_init(&sched->job_list_lock);
atomic_set(&sched->hw_rq_count, 0);
if (atomic_inc_return(&sched_fence_slab_ref) == 1) {
sched_fence_slab = kmem_cache_create(
void *owner;
struct delayed_work dwork;
struct list_head list;
+ struct amd_sched_job *s_job;
};
struct amd_sched_job {
bool use_sched; /* true if the job goes to scheduler */
struct fence_cb cb_free_job;
struct work_struct work_free_job;
+ struct list_head node;
};
extern const struct fence_ops amd_sched_fence_ops;
struct list_head fence_list;
spinlock_t fence_list_lock;
struct task_struct *thread;
+ struct list_head ring_mirror_list;
+ spinlock_t job_list_lock;
};
int amd_sched_init(struct amd_gpu_scheduler *sched,
struct amd_gpu_scheduler *sched,
struct amd_sched_entity *entity,
void *owner, struct fence **fence);
+void amd_sched_job_pre_schedule(struct amd_gpu_scheduler *sched ,
+ struct amd_sched_job *s_job);
#endif
FENCE_TRACE(&fence->base, "was already signaled\n");
}
+void amd_sched_job_pre_schedule(struct amd_gpu_scheduler *sched ,
+ struct amd_sched_job *s_job)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&sched->job_list_lock, flags);
+ list_add_tail(&s_job->node, &sched->ring_mirror_list);
+ spin_unlock_irqrestore(&sched->job_list_lock, flags);
+}
+
void amd_sched_fence_scheduled(struct amd_sched_fence *s_fence)
{
struct fence_cb *cur, *tmp;