]> git.karo-electronics.de Git - linux-beck.git/commitdiff
drm/amdgpu: handle more than 10 UVD sessions (v2)
authorArindam Nath <arindam.nath@amd.com>
Tue, 12 Apr 2016 11:46:15 +0000 (13:46 +0200)
committerAlex Deucher <alexander.deucher@amd.com>
Thu, 5 May 2016 00:20:23 +0000 (20:20 -0400)
Change History
--------------

v2:
- Make firmware version check correctly. Firmware
  versions >= 1.80 should all support 40 UVD
  instances.
- Replace AMDGPU_MAX_UVD_HANDLES with max_handles
  variable.

v1:
- The firmware can handle upto 40 UVD sessions.

Signed-off-by: Arindam Nath <arindam.nath@amd.com>
Signed-off-by: Ayyappa Chandolu <ayyappa.chandolu@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h

index d1ad7634f351e80005dcd3d202f06810a0c51b40..c9fe2d56cebf0e0c495eb1b08f99e0c7e3eff1bf 100644 (file)
@@ -1593,16 +1593,19 @@ void amdgpu_get_pcie_info(struct amdgpu_device *adev);
 /*
  * UVD
  */
-#define AMDGPU_MAX_UVD_HANDLES 10
-#define AMDGPU_UVD_STACK_SIZE  (1024*1024)
-#define AMDGPU_UVD_HEAP_SIZE   (1024*1024)
-#define AMDGPU_UVD_FIRMWARE_OFFSET 256
+#define AMDGPU_DEFAULT_UVD_HANDLES     10
+#define AMDGPU_MAX_UVD_HANDLES         40
+#define AMDGPU_UVD_STACK_SIZE          (200*1024)
+#define AMDGPU_UVD_HEAP_SIZE           (256*1024)
+#define AMDGPU_UVD_SESSION_SIZE                (50*1024)
+#define AMDGPU_UVD_FIRMWARE_OFFSET     256
 
 struct amdgpu_uvd {
        struct amdgpu_bo        *vcpu_bo;
        void                    *cpu_addr;
        uint64_t                gpu_addr;
        void                    *saved_bo;
+       unsigned                max_handles;
        atomic_t                handles[AMDGPU_MAX_UVD_HANDLES];
        struct drm_file         *filp[AMDGPU_MAX_UVD_HANDLES];
        struct delayed_work     idle_work;
index 338da80006b66ced7671b5ed63fabb02750355ba..76ebc109e5e7ad39e68f82679aea87dafb2cf230 100644 (file)
@@ -151,6 +151,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
                return r;
        }
 
+       /* Set the default UVD handles that the firmware can handle */
+       adev->uvd.max_handles = AMDGPU_DEFAULT_UVD_HANDLES;
+
        hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
        family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
        version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
@@ -158,8 +161,19 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
        DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
                version_major, version_minor, family_id);
 
+       /*
+        * Limit the number of UVD handles depending on microcode major
+        * and minor versions. The firmware version which has 40 UVD
+        * instances support is 1.80. So all subsequent versions should
+        * also have the same support.
+        */
+       if ((version_major > 0x01) ||
+           ((version_major == 0x01) && (version_minor >= 0x50)))
+               adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES;
+
        bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
-                +  AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE;
+                 +  AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE
+                 +  AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles;
        r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
                             AMDGPU_GEM_DOMAIN_VRAM,
                             AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
@@ -202,7 +216,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
                return r;
        }
 
-       for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
+       for (i = 0; i < adev->uvd.max_handles; ++i) {
                atomic_set(&adev->uvd.handles[i], 0);
                adev->uvd.filp[i] = NULL;
        }
@@ -248,7 +262,7 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
        if (adev->uvd.vcpu_bo == NULL)
                return 0;
 
-       for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
+       for (i = 0; i < adev->uvd.max_handles; ++i)
                if (atomic_read(&adev->uvd.handles[i]))
                        break;
 
@@ -303,7 +317,7 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
        struct amdgpu_ring *ring = &adev->uvd.ring;
        int i, r;
 
-       for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
+       for (i = 0; i < adev->uvd.max_handles; ++i) {
                uint32_t handle = atomic_read(&adev->uvd.handles[i]);
                if (handle != 0 && adev->uvd.filp[i] == filp) {
                        struct fence *fence;
@@ -563,7 +577,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
                amdgpu_bo_kunmap(bo);
 
                /* try to alloc a new handle */
-               for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
+               for (i = 0; i < adev->uvd.max_handles; ++i) {
                        if (atomic_read(&adev->uvd.handles[i]) == handle) {
                                DRM_ERROR("Handle 0x%x already in use!\n", handle);
                                return -EINVAL;
@@ -586,7 +600,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
                        return r;
 
                /* validate the handle */
-               for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
+               for (i = 0; i < adev->uvd.max_handles; ++i) {
                        if (atomic_read(&adev->uvd.handles[i]) == handle) {
                                if (adev->uvd.filp[i] != ctx->parser->filp) {
                                        DRM_ERROR("UVD handle collision detected!\n");
@@ -601,7 +615,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
 
        case 2:
                /* it's a destroy msg, free the handle */
-               for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
+               for (i = 0; i < adev->uvd.max_handles; ++i)
                        atomic_cmpxchg(&adev->uvd.handles[i], handle, 0);
                amdgpu_bo_kunmap(bo);
                return 0;
@@ -1013,7 +1027,7 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
 
        fences = amdgpu_fence_count_emitted(&adev->uvd.ring);
 
-       for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
+       for (i = 0; i < adev->uvd.max_handles; ++i)
                if (atomic_read(&adev->uvd.handles[i]))
                        ++handles;
 
index cb463753115b8d4f8266464a9d69956be21a5750..0d6b9e2150ccff5077b3d0238955bd0461e6fb65 100644 (file)
@@ -559,12 +559,13 @@ static void uvd_v4_2_mc_resume(struct amdgpu_device *adev)
        WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
 
        addr += size;
-       size = AMDGPU_UVD_STACK_SIZE >> 3;
+       size = AMDGPU_UVD_HEAP_SIZE >> 3;
        WREG32(mmUVD_VCPU_CACHE_OFFSET1, addr);
        WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
 
        addr += size;
-       size = AMDGPU_UVD_HEAP_SIZE >> 3;
+       size = (AMDGPU_UVD_STACK_SIZE +
+              (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles)) >> 3;
        WREG32(mmUVD_VCPU_CACHE_OFFSET2, addr);
        WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
 
index de459c8000a708188c7cc9afb3a3d3bf9d5858fc..84abf89ef4f88ae519da13e7d270b0e0cfb41d1a 100644 (file)
@@ -272,12 +272,13 @@ static void uvd_v5_0_mc_resume(struct amdgpu_device *adev)
        WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
 
        offset += size;
-       size = AMDGPU_UVD_STACK_SIZE;
+       size = AMDGPU_UVD_HEAP_SIZE;
        WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
        WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
 
        offset += size;
-       size = AMDGPU_UVD_HEAP_SIZE;
+       size = AMDGPU_UVD_STACK_SIZE +
+              (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles);
        WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
        WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
 
index 372d70a0daeca56a8d334b029f9463a5beb031c8..c633b1a26a7e2c10a4308ba9862b5ef3cb0c9bcb 100644 (file)
@@ -272,18 +272,21 @@ static void uvd_v6_0_mc_resume(struct amdgpu_device *adev)
        WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
 
        offset += size;
-       size = AMDGPU_UVD_STACK_SIZE;
+       size = AMDGPU_UVD_HEAP_SIZE;
        WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
        WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
 
        offset += size;
-       size = AMDGPU_UVD_HEAP_SIZE;
+       size = AMDGPU_UVD_STACK_SIZE +
+              (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles);
        WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
        WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
 
        WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
        WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
        WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
+
+       WREG32(mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
 }
 
 #if 0
index b2d4aaf045bcca565ce9537aefb4122712a4be99..6f6fb34742d299a46d2b8d50764a43f732d9de83 100644 (file)
 #define mmUVD_MIF_RECON1_ADDR_CONFIG                                            0x39c5
 #define ixUVD_MIF_SCLR_ADDR_CONFIG                                              0x4
 #define mmUVD_JPEG_ADDR_CONFIG                                                  0x3a1f
+#define mmUVD_GP_SCRATCH4                                                       0x3d38
 
 #endif /* UVD_6_0_D_H */