]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
Merge branch 'drm-next-4.13' of git://people.freedesktop.org/~agd5f/linux into drm...
[karo-tx-linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_cs.c
index 5b3e0f63a1151f0b595e2f115fabcf74f14a49eb..a37bdf4f8e9b6ef6ef132f67cba64f65d283e152 100644 (file)
 #include "amdgpu.h"
 #include "amdgpu_trace.h"
 
-int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
-                      u32 ip_instance, u32 ring,
-                      struct amdgpu_ring **out_ring)
-{
-       /* Right now all IPs have only one instance - multiple rings. */
-       if (ip_instance != 0) {
-               DRM_ERROR("invalid ip instance: %d\n", ip_instance);
-               return -EINVAL;
-       }
-
-       switch (ip_type) {
-       default:
-               DRM_ERROR("unknown ip type: %d\n", ip_type);
-               return -EINVAL;
-       case AMDGPU_HW_IP_GFX:
-               if (ring < adev->gfx.num_gfx_rings) {
-                       *out_ring = &adev->gfx.gfx_ring[ring];
-               } else {
-                       DRM_ERROR("only %d gfx rings are supported now\n",
-                                 adev->gfx.num_gfx_rings);
-                       return -EINVAL;
-               }
-               break;
-       case AMDGPU_HW_IP_COMPUTE:
-               if (ring < adev->gfx.num_compute_rings) {
-                       *out_ring = &adev->gfx.compute_ring[ring];
-               } else {
-                       DRM_ERROR("only %d compute rings are supported now\n",
-                                 adev->gfx.num_compute_rings);
-                       return -EINVAL;
-               }
-               break;
-       case AMDGPU_HW_IP_DMA:
-               if (ring < adev->sdma.num_instances) {
-                       *out_ring = &adev->sdma.instance[ring].ring;
-               } else {
-                       DRM_ERROR("only %d SDMA rings are supported\n",
-                                 adev->sdma.num_instances);
-                       return -EINVAL;
-               }
-               break;
-       case AMDGPU_HW_IP_UVD:
-               *out_ring = &adev->uvd.ring;
-               break;
-       case AMDGPU_HW_IP_VCE:
-               if (ring < adev->vce.num_rings){
-                       *out_ring = &adev->vce.ring[ring];
-               } else {
-                       DRM_ERROR("only %d VCE rings are supported\n", adev->vce.num_rings);
-                       return -EINVAL;
-               }
-               break;
-       case AMDGPU_HW_IP_UVD_ENC:
-               if (ring < adev->uvd.num_enc_rings){
-                       *out_ring = &adev->uvd.ring_enc[ring];
-               } else {
-                       DRM_ERROR("only %d UVD ENC rings are supported\n",
-                               adev->uvd.num_enc_rings);
-                       return -EINVAL;
-               }
-               break;
-       }
-
-       if (!(*out_ring && (*out_ring)->adev)) {
-               DRM_ERROR("Ring %d is not initialized on IP %d\n",
-                         ring, ip_type);
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
 static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
                                      struct drm_amdgpu_cs_chunk_fence *data,
                                      uint32_t *offset)
@@ -597,7 +525,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
                        goto error_free_pages;
                }
 
-               /* Fill the page arrays for all useptrs. */
+               /* Fill the page arrays for all userptrs. */
                list_for_each_entry(e, &need_pages, tv.head) {
                        struct ttm_tt *ttm = e->robj->tbo.ttm;
 
@@ -917,9 +845,8 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
                                return -EINVAL;
                }
 
-               r = amdgpu_cs_get_ring(adev, chunk_ib->ip_type,
-                                      chunk_ib->ip_instance, chunk_ib->ring,
-                                      &ring);
+               r = amdgpu_queue_mgr_map(adev, &parser->ctx->queue_mgr, chunk_ib->ip_type,
+                                        chunk_ib->ip_instance, chunk_ib->ring, &ring);
                if (r)
                        return r;
 
@@ -1021,16 +948,19 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
                        struct amdgpu_ctx *ctx;
                        struct dma_fence *fence;
 
-                       r = amdgpu_cs_get_ring(adev, deps[j].ip_type,
-                                              deps[j].ip_instance,
-                                              deps[j].ring, &ring);
-                       if (r)
-                               return r;
-
                        ctx = amdgpu_ctx_get(fpriv, deps[j].ctx_id);
                        if (ctx == NULL)
                                return -EINVAL;
 
+                       r = amdgpu_queue_mgr_map(adev, &ctx->queue_mgr,
+                                                deps[j].ip_type,
+                                                deps[j].ip_instance,
+                                                deps[j].ring, &ring);
+                       if (r) {
+                               amdgpu_ctx_put(ctx);
+                               return r;
+                       }
+
                        fence = amdgpu_ctx_get_fence(ctx, ring,
                                                     deps[j].handle);
                        if (IS_ERR(fence)) {
@@ -1086,6 +1016,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
 int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
 {
        struct amdgpu_device *adev = dev->dev_private;
+       struct amdgpu_fpriv *fpriv = filp->driver_priv;
        union drm_amdgpu_cs *cs = data;
        struct amdgpu_cs_parser parser = {};
        bool reserved_buffers = false;
@@ -1093,6 +1024,8 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
 
        if (!adev->accel_working)
                return -EBUSY;
+       if (amdgpu_kms_vram_lost(adev, fpriv))
+               return -ENODEV;
 
        parser.adev = adev;
        parser.filp = filp;
@@ -1154,21 +1087,28 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
 {
        union drm_amdgpu_wait_cs *wait = data;
        struct amdgpu_device *adev = dev->dev_private;
+       struct amdgpu_fpriv *fpriv = filp->driver_priv;
        unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
        struct amdgpu_ring *ring = NULL;
        struct amdgpu_ctx *ctx;
        struct dma_fence *fence;
        long r;
 
-       r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance,
-                              wait->in.ring, &ring);
-       if (r)
-               return r;
+       if (amdgpu_kms_vram_lost(adev, fpriv))
+               return -ENODEV;
 
        ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
        if (ctx == NULL)
                return -EINVAL;
 
+       r = amdgpu_queue_mgr_map(adev, &ctx->queue_mgr,
+                                wait->in.ip_type, wait->in.ip_instance,
+                                wait->in.ring, &ring);
+       if (r) {
+               amdgpu_ctx_put(ctx);
+               return r;
+       }
+
        fence = amdgpu_ctx_get_fence(ctx, ring, wait->in.handle);
        if (IS_ERR(fence))
                r = PTR_ERR(fence);
@@ -1204,15 +1144,17 @@ static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
        struct dma_fence *fence;
        int r;
 
-       r = amdgpu_cs_get_ring(adev, user->ip_type, user->ip_instance,
-                              user->ring, &ring);
-       if (r)
-               return ERR_PTR(r);
-
        ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id);
        if (ctx == NULL)
                return ERR_PTR(-EINVAL);
 
+       r = amdgpu_queue_mgr_map(adev, &ctx->queue_mgr, user->ip_type,
+                                user->ip_instance, user->ring, &ring);
+       if (r) {
+               amdgpu_ctx_put(ctx);
+               return ERR_PTR(r);
+       }
+
        fence = amdgpu_ctx_get_fence(ctx, ring, user->seq_no);
        amdgpu_ctx_put(ctx);
 
@@ -1333,12 +1275,15 @@ int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
                                struct drm_file *filp)
 {
        struct amdgpu_device *adev = dev->dev_private;
+       struct amdgpu_fpriv *fpriv = filp->driver_priv;
        union drm_amdgpu_wait_fences *wait = data;
        uint32_t fence_count = wait->in.fence_count;
        struct drm_amdgpu_fence *fences_user;
        struct drm_amdgpu_fence *fences;
        int r;
 
+       if (amdgpu_kms_vram_lost(adev, fpriv))
+               return -ENODEV;
        /* Get the fences from userspace */
        fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
                        GFP_KERNEL);