]> git.karo-electronics.de Git - linux-beck.git/commitdiff
drm/amdgpu: stop leaking the ctx id into the scheduler v2
authorChristian König <christian.koenig@amd.com>
Tue, 4 Aug 2015 14:58:36 +0000 (16:58 +0200)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 17 Aug 2015 20:51:01 +0000 (16:51 -0400)
Id's are for the IOCTL ABI only.

v2: remove tgid as well

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
drivers/gpu/drm/amd/scheduler/gpu_scheduler.h

index 3c353375b228079ec9f5fb7de1a0459d38c151b1..c2290ae20312b18798042fcf589531fd3355ae8d 100644 (file)
@@ -50,8 +50,7 @@ static void amdgpu_ctx_do_release(struct kref *ref)
 
 static void amdgpu_ctx_init(struct amdgpu_device *adev,
                            struct amdgpu_fpriv *fpriv,
-                           struct amdgpu_ctx *ctx,
-                           uint32_t id)
+                           struct amdgpu_ctx *ctx)
 {
        int i;
        memset(ctx, 0, sizeof(*ctx));
@@ -81,7 +80,7 @@ int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv,
                        return r;
                }
                *id = (uint32_t)r;
-               amdgpu_ctx_init(adev, fpriv, ctx, *id);
+               amdgpu_ctx_init(adev, fpriv, ctx);
                mutex_unlock(&mgr->lock);
        } else {
                if (adev->kernel_ctx) {
@@ -89,8 +88,7 @@ int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv,
                        kfree(ctx);
                        return 0;
                }
-               *id = AMD_KERNEL_CONTEXT_ID;
-               amdgpu_ctx_init(adev, fpriv, ctx, *id);
+               amdgpu_ctx_init(adev, fpriv, ctx);
 
                adev->kernel_ctx = ctx;
        }
@@ -105,8 +103,7 @@ int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv,
                                rq = &adev->rings[i]->scheduler->kernel_rq;
                        r = amd_context_entity_init(adev->rings[i]->scheduler,
                                                    &ctx->rings[i].c_entity,
-                                                   NULL, rq, *id,
-                                                   amdgpu_sched_jobs);
+                                                   NULL, rq, amdgpu_sched_jobs);
                        if (r)
                                break;
                }
index 4ad1825e713e09abe7b22366d91ba2a9a5e81db8..b9aa572980d2a2710dd28bf97ab86a5986c2b420 100644 (file)
@@ -172,7 +172,7 @@ exit:
  * @entity     The pointer to a valid amd_context_entity
  * @parent     The parent entity of this amd_context_entity
  * @rq         The run queue this entity belongs
- * @context_id The context id for this entity
+ * @kernel     If this is an entity for the kernel
  * @jobs       The max number of jobs in the job queue
  *
  * return 0 if succeed. negative error code on failure
@@ -181,7 +181,6 @@ int amd_context_entity_init(struct amd_gpu_scheduler *sched,
                            struct amd_context_entity *entity,
                            struct amd_sched_entity *parent,
                            struct amd_run_queue *rq,
-                           uint32_t context_id,
                            uint32_t jobs)
 {
        uint64_t seq_ring = 0;
@@ -203,9 +202,6 @@ int amd_context_entity_init(struct amd_gpu_scheduler *sched,
                return -EINVAL;
 
        spin_lock_init(&entity->queue_lock);
-       entity->tgid = (context_id == AMD_KERNEL_CONTEXT_ID) ?
-               AMD_KERNEL_PROCESS_ID : current->tgid;
-       entity->context_id = context_id;
        atomic64_set(&entity->last_emitted_v_seq, seq_ring);
        atomic64_set(&entity->last_queued_v_seq, seq_ring);
 
@@ -275,9 +271,9 @@ int amd_context_entity_fini(struct amd_gpu_scheduler *sched,
 
        if (r) {
                if (entity->is_pending)
-                       DRM_INFO("Entity %u is in waiting state during fini,\
+                       DRM_INFO("Entity %p is in waiting state during fini,\
                                all pending ibs will be canceled.\n",
-                                entity->context_id);
+                                entity);
        }
 
        mutex_lock(&rq->lock);
index fd6d699d42e1a48c58bca4f26203efe172a37f9c..c46d0854ab75468d1465176292d8820940045594 100644 (file)
@@ -26,9 +26,6 @@
 
 #include <linux/kfifo.h>
 
-#define AMD_KERNEL_CONTEXT_ID                  0
-#define AMD_KERNEL_PROCESS_ID                  0
-
 #define AMD_GPU_WAIT_IDLE_TIMEOUT_IN_MS                3000
 
 struct amd_gpu_scheduler;
@@ -74,8 +71,6 @@ struct amd_context_entity {
        /* the virtual_seq is unique per context per ring */
        atomic64_t                      last_queued_v_seq;
        atomic64_t                      last_emitted_v_seq;
-       pid_t                           tgid;
-       uint32_t                        context_id;
        /* the job_queue maintains the jobs submitted by clients */
        struct kfifo                    job_queue;
        spinlock_t                      queue_lock;
@@ -148,7 +143,6 @@ int amd_context_entity_init(struct amd_gpu_scheduler *sched,
                            struct amd_context_entity *entity,
                            struct amd_sched_entity *parent,
                            struct amd_run_queue *rq,
-                           uint32_t context_id,
                            uint32_t jobs);
 
 void amd_sched_emit(struct amd_context_entity *c_entity, uint64_t seq);