]> git.karo-electronics.de Git - linux-beck.git/commitdiff
drm/amdgpu: block scheduler when gpu reset
authorChunming Zhou <David1.Zhou@amd.com>
Sun, 12 Jun 2016 07:41:58 +0000 (15:41 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Thu, 7 Jul 2016 18:54:43 +0000 (14:54 -0400)
Signed-off-by: Chunming Zhou <David1.Zhou@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/scheduler/gpu_scheduler.c

index ae801e9cec94a9016c7a3fb5cd0559e9119dfff3..a7a84286a06ff09f7d5ad6917e49dd9ebd1d879e 100644 (file)
@@ -25,6 +25,7 @@
  *          Alex Deucher
  *          Jerome Glisse
  */
+#include <linux/kthread.h>
 #include <linux/console.h>
 #include <linux/slab.h>
 #include <linux/debugfs.h>
@@ -1895,6 +1896,14 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
 
        atomic_inc(&adev->gpu_reset_counter);
 
+       /* block scheduler */
+       for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+               struct amdgpu_ring *ring = adev->rings[i];
+
+               if (!ring)
+                       continue;
+               kthread_park(ring->sched.thread);
+       }
        /* block TTM */
        resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
 
@@ -1928,7 +1937,7 @@ retry:
                        struct amdgpu_ring *ring = adev->rings[i];
                        if (!ring)
                                continue;
-
+                       kthread_unpark(ring->sched.thread);
                        amdgpu_ring_restore(ring, ring_sizes[i], ring_data[i]);
                        ring_sizes[i] = 0;
                        ring_data[i] = NULL;
@@ -1946,8 +1955,10 @@ retry:
        } else {
                amdgpu_fence_driver_force_completion(adev);
                for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
-                       if (adev->rings[i])
+                       if (adev->rings[i]) {
+                               kthread_unpark(adev->rings[i]->sched.thread);
                                kfree(ring_data[i]);
+                       }
                }
        }
 
index b1d49c5d8e44f9f10a7b48fb8d5cc1fb7952e046..60f58f76a1ebed4923e7d3094070b35609144ccd 100644 (file)
@@ -476,6 +476,16 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
        wake_up_interruptible(&sched->wake_up_worker);
 }
 
+static bool amd_sched_blocked(struct amd_gpu_scheduler *sched)
+{
+       if (kthread_should_park()) {
+               kthread_parkme();
+               return true;
+       }
+
+       return false;
+}
+
 static int amd_sched_main(void *param)
 {
        struct sched_param sparam = {.sched_priority = 1};
@@ -485,14 +495,15 @@ static int amd_sched_main(void *param)
        sched_setscheduler(current, SCHED_FIFO, &sparam);
 
        while (!kthread_should_stop()) {
-               struct amd_sched_entity *entity;
+               struct amd_sched_entity *entity = NULL;
                struct amd_sched_fence *s_fence;
                struct amd_sched_job *sched_job;
                struct fence *fence;
 
                wait_event_interruptible(sched->wake_up_worker,
-                       (entity = amd_sched_select_entity(sched)) ||
-                       kthread_should_stop());
+                                        (!amd_sched_blocked(sched) &&
+                                         (entity = amd_sched_select_entity(sched))) ||
+                                        kthread_should_stop());
 
                if (!entity)
                        continue;