]> git.karo-electronics.de Git - linux-beck.git/commitdiff
drm/amdgpu: add user fence context map v2
authorChristian König <christian.koenig@amd.com>
Tue, 7 Jul 2015 15:24:49 +0000 (17:24 +0200)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 17 Aug 2015 20:50:14 +0000 (16:50 -0400)
This is a prerequisite for the GPU scheduler to make the order
of submission independent from the order of execution.

v2: properly implement the locking

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Jammy Zhou <Jammy.Zhou@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c

index 70e783a849ed627111e7b318c1ff57a0dfb11fec..0220d98ba8bf3fa6c996898c2e05094f92b62cbf 100644 (file)
@@ -415,6 +415,8 @@ struct amdgpu_user_fence {
        struct amdgpu_bo        *bo;
        /* write-back address offset to bo start */
        uint32_t                offset;
+       /* resulting sequence number */
+       uint64_t                sequence;
 };
 
 int amdgpu_fence_driver_init(struct amdgpu_device *adev);
@@ -985,9 +987,18 @@ struct amdgpu_vm_manager {
  * context related structures
  */
 
+#define AMDGPU_CTX_MAX_CS_PENDING      16
+
+struct amdgpu_ctx_ring {
+       uint64_t        sequence;
+       struct fence    *fences[AMDGPU_CTX_MAX_CS_PENDING];
+};
+
 struct amdgpu_ctx {
        struct kref             refcount;
        unsigned                reset_counter;
+       spinlock_t              ring_lock;
+       struct amdgpu_ctx_ring  rings[AMDGPU_MAX_RINGS];
 };
 
 struct amdgpu_ctx_mgr {
@@ -1007,6 +1018,11 @@ void amdgpu_ctx_fini(struct amdgpu_fpriv *fpriv);
 struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
 int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
 
+uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
+                             struct fence *fence);
+struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
+                                  struct amdgpu_ring *ring, uint64_t seq);
+
 int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
                     struct drm_file *filp);
 
index 53e6a10fe9f920ccdf863d17904024fdfbdab8bd..cef8360698bef0ffdfed426202c7d051046d679f 100644 (file)
@@ -698,9 +698,9 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
                        sizeof(struct drm_amdgpu_cs_chunk_dep);
 
                for (j = 0; j < num_deps; ++j) {
-                       struct amdgpu_fence *fence;
                        struct amdgpu_ring *ring;
                        struct amdgpu_ctx *ctx;
+                       struct fence *fence;
 
                        r = amdgpu_cs_get_ring(adev, deps[j].ip_type,
                                               deps[j].ip_instance,
@@ -712,20 +712,20 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
                        if (ctx == NULL)
                                return -EINVAL;
 
-                       r = amdgpu_fence_recreate(ring, p->filp,
-                                                 deps[j].handle,
-                                                 &fence);
-                       if (r) {
+                       fence = amdgpu_ctx_get_fence(ctx, ring,
+                                                    deps[j].handle);
+                       if (IS_ERR(fence)) {
+                               r = PTR_ERR(fence);
                                amdgpu_ctx_put(ctx);
                                return r;
-                       }
-
-                       r = amdgpu_sync_fence(adev, &ib->sync, &fence->base);
-                       amdgpu_fence_unref(&fence);
-                       amdgpu_ctx_put(ctx);
 
-                       if (r)
-                               return r;
+                       } else if (fence) {
+                               r = amdgpu_sync_fence(adev, &ib->sync, fence);
+                               fence_put(fence);
+                               amdgpu_ctx_put(ctx);
+                               if (r)
+                                       return r;
+                       }
                }
        }
 
@@ -773,8 +773,11 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
                r = amdgpu_cs_ib_fill(adev, &parser);
        }
 
-       if (!r)
+       if (!r) {
                r = amdgpu_cs_dependencies(adev, &parser);
+               if (r)
+                       DRM_ERROR("Failed in the dependencies handling %d!\n", r);
+       }
 
        if (r) {
                amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
@@ -791,7 +794,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
                goto out;
        }
 
-       cs->out.handle = parser.ibs[parser.num_ibs - 1].fence->seq;
+       cs->out.handle = parser.uf.sequence;
 out:
        amdgpu_cs_parser_fini(&parser, r, true);
        up_read(&adev->exclusive_lock);
@@ -814,30 +817,31 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
        union drm_amdgpu_wait_cs *wait = data;
        struct amdgpu_device *adev = dev->dev_private;
        unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
-       struct amdgpu_fence *fence = NULL;
        struct amdgpu_ring *ring = NULL;
        struct amdgpu_ctx *ctx;
+       struct fence *fence;
        long r;
 
+       r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance,
+                              wait->in.ring, &ring);
+       if (r)
+               return r;
+
        ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
        if (ctx == NULL)
                return -EINVAL;
 
-       r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance,
-                              wait->in.ring, &ring);
-       if (r) {
-               amdgpu_ctx_put(ctx);
-               return r;
-       }
+       fence = amdgpu_ctx_get_fence(ctx, ring, wait->in.handle);
+       if (IS_ERR(fence))
+               r = PTR_ERR(fence);
 
-       r = amdgpu_fence_recreate(ring, filp, wait->in.handle, &fence);
-       if (r) {
-               amdgpu_ctx_put(ctx);
-               return r;
-       }
+       else if (fence) {
+               r = fence_wait_timeout(fence, true, timeout);
+               fence_put(fence);
+
+       } else
+               r = 1;
 
-       r = fence_wait_timeout(&fence->base, true, timeout);
-       amdgpu_fence_unref(&fence);
        amdgpu_ctx_put(ctx);
        if (r < 0)
                return r;
index e63cfb7fa3900aedf93b69d3c021b54ff88f0175..c23bfd8fe4143ce4a11376977a70630c1bcdb972 100644 (file)
 static void amdgpu_ctx_do_release(struct kref *ref)
 {
        struct amdgpu_ctx *ctx;
+       unsigned i, j;
 
        ctx = container_of(ref, struct amdgpu_ctx, refcount);
+
+       for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
+               for (j = 0; j < AMDGPU_CTX_MAX_CS_PENDING; ++j)
+                       fence_put(ctx->rings[i].fences[j]);
        kfree(ctx);
 }
 
 int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv,
                     uint32_t *id)
 {
-       int r;
        struct amdgpu_ctx *ctx;
        struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
+       int i, r;
 
        ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
        if (!ctx)
@@ -55,6 +60,9 @@ int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv,
 
        memset(ctx, 0, sizeof(*ctx));
        kref_init(&ctx->refcount);
+       spin_lock_init(&ctx->ring_lock);
+       for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
+               ctx->rings[i].sequence = 1;
        mutex_unlock(&mgr->lock);
 
        return 0;
@@ -177,3 +185,53 @@ int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
        kref_put(&ctx->refcount, amdgpu_ctx_do_release);
        return 0;
 }
+
+uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
+                             struct fence *fence)
+{
+       struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
+       uint64_t seq = cring->sequence;
+       unsigned idx = seq % AMDGPU_CTX_MAX_CS_PENDING;
+       struct fence *other = cring->fences[idx];
+
+       if (other) {
+               signed long r;
+               r = fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
+               if (r < 0)
+                       DRM_ERROR("Error (%ld) waiting for fence!\n", r);
+       }
+
+       fence_get(fence);
+
+       spin_lock(&ctx->ring_lock);
+       cring->fences[idx] = fence;
+       cring->sequence++;
+       spin_unlock(&ctx->ring_lock);
+
+       fence_put(other);
+
+       return seq;
+}
+
+struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
+                                  struct amdgpu_ring *ring, uint64_t seq)
+{
+       struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
+       struct fence *fence;
+
+       spin_lock(&ctx->ring_lock);
+       if (seq >= cring->sequence) {
+               spin_unlock(&ctx->ring_lock);
+               return ERR_PTR(-EINVAL);
+       }
+
+       if (seq < cring->sequence - AMDGPU_CTX_MAX_CS_PENDING) {
+               spin_unlock(&ctx->ring_lock);
+               return NULL;
+       }
+
+       fence = fence_get(cring->fences[seq % AMDGPU_CTX_MAX_CS_PENDING]);
+       spin_unlock(&ctx->ring_lock);
+
+       return fence;
+}
index 2722815eddbbb72dfd88c6ed453758c681f5bc4b..95d533422a5b34f4f838b079b957f26c89723430 100644 (file)
@@ -219,8 +219,10 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
        /* wrap the last IB with fence */
        if (ib->user) {
                uint64_t addr = amdgpu_bo_gpu_offset(ib->user->bo);
+               ib->user->sequence = amdgpu_ctx_add_fence(ib->ctx, ring,
+                                                         &ib->fence->base);
                addr += ib->user->offset;
-               amdgpu_ring_emit_fence(ring, addr, ib->fence->seq,
+               amdgpu_ring_emit_fence(ring, addr, ib->user->sequence,
                                       AMDGPU_FENCE_FLAG_64BIT);
        }