]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/radeon/radeon_test.c
Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[karo-tx-linux.git] / drivers / gpu / drm / radeon / radeon_test.c
1 /*
2  * Copyright 2009 VMware, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Michel Dänzer
23  */
24 #include <drm/drmP.h>
25 #include <drm/radeon_drm.h>
26 #include "radeon_reg.h"
27 #include "radeon.h"
28
29 #define RADEON_TEST_COPY_BLIT 1
30 #define RADEON_TEST_COPY_DMA  0
31
32
33 /* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */
34 static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
35 {
36         struct radeon_bo *vram_obj = NULL;
37         struct radeon_bo **gtt_obj = NULL;
38         uint64_t gtt_addr, vram_addr;
39         unsigned n, size;
40         int i, r, ring;
41
42         switch (flag) {
43         case RADEON_TEST_COPY_DMA:
44                 ring = radeon_copy_dma_ring_index(rdev);
45                 break;
46         case RADEON_TEST_COPY_BLIT:
47                 ring = radeon_copy_blit_ring_index(rdev);
48                 break;
49         default:
50                 DRM_ERROR("Unknown copy method\n");
51                 return;
52         }
53
54         size = 1024 * 1024;
55
56         /* Number of tests =
57          * (Total GTT - IB pool - writeback page - ring buffers) / test size
58          */
59         n = rdev->mc.gtt_size - rdev->gart_pin_size;
60         n /= size;
61
62         gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
63         if (!gtt_obj) {
64                 DRM_ERROR("Failed to allocate %d pointers\n", n);
65                 r = 1;
66                 goto out_cleanup;
67         }
68
69         r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
70                              0, NULL, NULL, &vram_obj);
71         if (r) {
72                 DRM_ERROR("Failed to create VRAM object\n");
73                 goto out_cleanup;
74         }
75         r = radeon_bo_reserve(vram_obj, false);
76         if (unlikely(r != 0))
77                 goto out_unref;
78         r = radeon_bo_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr);
79         if (r) {
80                 DRM_ERROR("Failed to pin VRAM object\n");
81                 goto out_unres;
82         }
83         for (i = 0; i < n; i++) {
84                 void *gtt_map, *vram_map;
85                 void **gtt_start, **gtt_end;
86                 void **vram_start, **vram_end;
87                 struct radeon_fence *fence = NULL;
88
89                 r = radeon_bo_create(rdev, size, PAGE_SIZE, true,
90                                      RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL,
91                                      gtt_obj + i);
92                 if (r) {
93                         DRM_ERROR("Failed to create GTT object %d\n", i);
94                         goto out_lclean;
95                 }
96
97                 r = radeon_bo_reserve(gtt_obj[i], false);
98                 if (unlikely(r != 0))
99                         goto out_lclean_unref;
100                 r = radeon_bo_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, &gtt_addr);
101                 if (r) {
102                         DRM_ERROR("Failed to pin GTT object %d\n", i);
103                         goto out_lclean_unres;
104                 }
105
106                 r = radeon_bo_kmap(gtt_obj[i], &gtt_map);
107                 if (r) {
108                         DRM_ERROR("Failed to map GTT object %d\n", i);
109                         goto out_lclean_unpin;
110                 }
111
112                 for (gtt_start = gtt_map, gtt_end = gtt_map + size;
113                      gtt_start < gtt_end;
114                      gtt_start++)
115                         *gtt_start = gtt_start;
116
117                 radeon_bo_kunmap(gtt_obj[i]);
118
119                 if (ring == R600_RING_TYPE_DMA_INDEX)
120                         fence = radeon_copy_dma(rdev, gtt_addr, vram_addr,
121                                                 size / RADEON_GPU_PAGE_SIZE,
122                                                 vram_obj->tbo.resv);
123                 else
124                         fence = radeon_copy_blit(rdev, gtt_addr, vram_addr,
125                                                  size / RADEON_GPU_PAGE_SIZE,
126                                                  vram_obj->tbo.resv);
127                 if (IS_ERR(fence)) {
128                         DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
129                         r = PTR_ERR(fence);
130                         goto out_lclean_unpin;
131                 }
132
133                 r = radeon_fence_wait(fence, false);
134                 if (r) {
135                         DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i);
136                         goto out_lclean_unpin;
137                 }
138
139                 radeon_fence_unref(&fence);
140
141                 r = radeon_bo_kmap(vram_obj, &vram_map);
142                 if (r) {
143                         DRM_ERROR("Failed to map VRAM object after copy %d\n", i);
144                         goto out_lclean_unpin;
145                 }
146
147                 for (gtt_start = gtt_map, gtt_end = gtt_map + size,
148                      vram_start = vram_map, vram_end = vram_map + size;
149                      vram_start < vram_end;
150                      gtt_start++, vram_start++) {
151                         if (*vram_start != gtt_start) {
152                                 DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, "
153                                           "expected 0x%p (GTT/VRAM offset "
154                                           "0x%16llx/0x%16llx)\n",
155                                           i, *vram_start, gtt_start,
156                                           (unsigned long long)
157                                           (gtt_addr - rdev->mc.gtt_start +
158                                            (void*)gtt_start - gtt_map),
159                                           (unsigned long long)
160                                           (vram_addr - rdev->mc.vram_start +
161                                            (void*)gtt_start - gtt_map));
162                                 radeon_bo_kunmap(vram_obj);
163                                 goto out_lclean_unpin;
164                         }
165                         *vram_start = vram_start;
166                 }
167
168                 radeon_bo_kunmap(vram_obj);
169
170                 if (ring == R600_RING_TYPE_DMA_INDEX)
171                         fence = radeon_copy_dma(rdev, vram_addr, gtt_addr,
172                                                 size / RADEON_GPU_PAGE_SIZE,
173                                                 vram_obj->tbo.resv);
174                 else
175                         fence = radeon_copy_blit(rdev, vram_addr, gtt_addr,
176                                                  size / RADEON_GPU_PAGE_SIZE,
177                                                  vram_obj->tbo.resv);
178                 if (IS_ERR(fence)) {
179                         DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
180                         r = PTR_ERR(fence);
181                         goto out_lclean_unpin;
182                 }
183
184                 r = radeon_fence_wait(fence, false);
185                 if (r) {
186                         DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i);
187                         goto out_lclean_unpin;
188                 }
189
190                 radeon_fence_unref(&fence);
191
192                 r = radeon_bo_kmap(gtt_obj[i], &gtt_map);
193                 if (r) {
194                         DRM_ERROR("Failed to map GTT object after copy %d\n", i);
195                         goto out_lclean_unpin;
196                 }
197
198                 for (gtt_start = gtt_map, gtt_end = gtt_map + size,
199                      vram_start = vram_map, vram_end = vram_map + size;
200                      gtt_start < gtt_end;
201                      gtt_start++, vram_start++) {
202                         if (*gtt_start != vram_start) {
203                                 DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, "
204                                           "expected 0x%p (VRAM/GTT offset "
205                                           "0x%16llx/0x%16llx)\n",
206                                           i, *gtt_start, vram_start,
207                                           (unsigned long long)
208                                           (vram_addr - rdev->mc.vram_start +
209                                            (void*)vram_start - vram_map),
210                                           (unsigned long long)
211                                           (gtt_addr - rdev->mc.gtt_start +
212                                            (void*)vram_start - vram_map));
213                                 radeon_bo_kunmap(gtt_obj[i]);
214                                 goto out_lclean_unpin;
215                         }
216                 }
217
218                 radeon_bo_kunmap(gtt_obj[i]);
219
220                 DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
221                          gtt_addr - rdev->mc.gtt_start);
222                 continue;
223
224 out_lclean_unpin:
225                 radeon_bo_unpin(gtt_obj[i]);
226 out_lclean_unres:
227                 radeon_bo_unreserve(gtt_obj[i]);
228 out_lclean_unref:
229                 radeon_bo_unref(&gtt_obj[i]);
230 out_lclean:
231                 for (--i; i >= 0; --i) {
232                         radeon_bo_unpin(gtt_obj[i]);
233                         radeon_bo_unreserve(gtt_obj[i]);
234                         radeon_bo_unref(&gtt_obj[i]);
235                 }
236                 if (fence && !IS_ERR(fence))
237                         radeon_fence_unref(&fence);
238                 break;
239         }
240
241         radeon_bo_unpin(vram_obj);
242 out_unres:
243         radeon_bo_unreserve(vram_obj);
244 out_unref:
245         radeon_bo_unref(&vram_obj);
246 out_cleanup:
247         kfree(gtt_obj);
248         if (r) {
249                 pr_warn("Error while testing BO move\n");
250         }
251 }
252
253 void radeon_test_moves(struct radeon_device *rdev)
254 {
255         if (rdev->asic->copy.dma)
256                 radeon_do_test_moves(rdev, RADEON_TEST_COPY_DMA);
257         if (rdev->asic->copy.blit)
258                 radeon_do_test_moves(rdev, RADEON_TEST_COPY_BLIT);
259 }
260
261 static int radeon_test_create_and_emit_fence(struct radeon_device *rdev,
262                                              struct radeon_ring *ring,
263                                              struct radeon_fence **fence)
264 {
265         uint32_t handle = ring->idx ^ 0xdeafbeef;
266         int r;
267
268         if (ring->idx == R600_RING_TYPE_UVD_INDEX) {
269                 r = radeon_uvd_get_create_msg(rdev, ring->idx, handle, NULL);
270                 if (r) {
271                         DRM_ERROR("Failed to get dummy create msg\n");
272                         return r;
273                 }
274
275                 r = radeon_uvd_get_destroy_msg(rdev, ring->idx, handle, fence);
276                 if (r) {
277                         DRM_ERROR("Failed to get dummy destroy msg\n");
278                         return r;
279                 }
280
281         } else if (ring->idx == TN_RING_TYPE_VCE1_INDEX ||
282                    ring->idx == TN_RING_TYPE_VCE2_INDEX) {
283                 r = radeon_vce_get_create_msg(rdev, ring->idx, handle, NULL);
284                 if (r) {
285                         DRM_ERROR("Failed to get dummy create msg\n");
286                         return r;
287                 }
288
289                 r = radeon_vce_get_destroy_msg(rdev, ring->idx, handle, fence);
290                 if (r) {
291                         DRM_ERROR("Failed to get dummy destroy msg\n");
292                         return r;
293                 }
294
295         } else {
296                 r = radeon_ring_lock(rdev, ring, 64);
297                 if (r) {
298                         DRM_ERROR("Failed to lock ring A %d\n", ring->idx);
299                         return r;
300                 }
301                 r = radeon_fence_emit(rdev, fence, ring->idx);
302                 if (r) {
303                         DRM_ERROR("Failed to emit fence\n");
304                         radeon_ring_unlock_undo(rdev, ring);
305                         return r;
306                 }
307                 radeon_ring_unlock_commit(rdev, ring, false);
308         }
309         return 0;
310 }
311
312 void radeon_test_ring_sync(struct radeon_device *rdev,
313                            struct radeon_ring *ringA,
314                            struct radeon_ring *ringB)
315 {
316         struct radeon_fence *fence1 = NULL, *fence2 = NULL;
317         struct radeon_semaphore *semaphore = NULL;
318         int r;
319
320         r = radeon_semaphore_create(rdev, &semaphore);
321         if (r) {
322                 DRM_ERROR("Failed to create semaphore\n");
323                 goto out_cleanup;
324         }
325
326         r = radeon_ring_lock(rdev, ringA, 64);
327         if (r) {
328                 DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
329                 goto out_cleanup;
330         }
331         radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
332         radeon_ring_unlock_commit(rdev, ringA, false);
333
334         r = radeon_test_create_and_emit_fence(rdev, ringA, &fence1);
335         if (r)
336                 goto out_cleanup;
337
338         r = radeon_ring_lock(rdev, ringA, 64);
339         if (r) {
340                 DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
341                 goto out_cleanup;
342         }
343         radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
344         radeon_ring_unlock_commit(rdev, ringA, false);
345
346         r = radeon_test_create_and_emit_fence(rdev, ringA, &fence2);
347         if (r)
348                 goto out_cleanup;
349
350         mdelay(1000);
351
352         if (radeon_fence_signaled(fence1)) {
353                 DRM_ERROR("Fence 1 signaled without waiting for semaphore.\n");
354                 goto out_cleanup;
355         }
356
357         r = radeon_ring_lock(rdev, ringB, 64);
358         if (r) {
359                 DRM_ERROR("Failed to lock ring B %p\n", ringB);
360                 goto out_cleanup;
361         }
362         radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore);
363         radeon_ring_unlock_commit(rdev, ringB, false);
364
365         r = radeon_fence_wait(fence1, false);
366         if (r) {
367                 DRM_ERROR("Failed to wait for sync fence 1\n");
368                 goto out_cleanup;
369         }
370
371         mdelay(1000);
372
373         if (radeon_fence_signaled(fence2)) {
374                 DRM_ERROR("Fence 2 signaled without waiting for semaphore.\n");
375                 goto out_cleanup;
376         }
377
378         r = radeon_ring_lock(rdev, ringB, 64);
379         if (r) {
380                 DRM_ERROR("Failed to lock ring B %p\n", ringB);
381                 goto out_cleanup;
382         }
383         radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore);
384         radeon_ring_unlock_commit(rdev, ringB, false);
385
386         r = radeon_fence_wait(fence2, false);
387         if (r) {
388                 DRM_ERROR("Failed to wait for sync fence 1\n");
389                 goto out_cleanup;
390         }
391
392 out_cleanup:
393         radeon_semaphore_free(rdev, &semaphore, NULL);
394
395         if (fence1)
396                 radeon_fence_unref(&fence1);
397
398         if (fence2)
399                 radeon_fence_unref(&fence2);
400
401         if (r)
402                 pr_warn("Error while testing ring sync (%d)\n", r);
403 }
404
405 static void radeon_test_ring_sync2(struct radeon_device *rdev,
406                             struct radeon_ring *ringA,
407                             struct radeon_ring *ringB,
408                             struct radeon_ring *ringC)
409 {
410         struct radeon_fence *fenceA = NULL, *fenceB = NULL;
411         struct radeon_semaphore *semaphore = NULL;
412         bool sigA, sigB;
413         int i, r;
414
415         r = radeon_semaphore_create(rdev, &semaphore);
416         if (r) {
417                 DRM_ERROR("Failed to create semaphore\n");
418                 goto out_cleanup;
419         }
420
421         r = radeon_ring_lock(rdev, ringA, 64);
422         if (r) {
423                 DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
424                 goto out_cleanup;
425         }
426         radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
427         radeon_ring_unlock_commit(rdev, ringA, false);
428
429         r = radeon_test_create_and_emit_fence(rdev, ringA, &fenceA);
430         if (r)
431                 goto out_cleanup;
432
433         r = radeon_ring_lock(rdev, ringB, 64);
434         if (r) {
435                 DRM_ERROR("Failed to lock ring B %d\n", ringB->idx);
436                 goto out_cleanup;
437         }
438         radeon_semaphore_emit_wait(rdev, ringB->idx, semaphore);
439         radeon_ring_unlock_commit(rdev, ringB, false);
440         r = radeon_test_create_and_emit_fence(rdev, ringB, &fenceB);
441         if (r)
442                 goto out_cleanup;
443
444         mdelay(1000);
445
446         if (radeon_fence_signaled(fenceA)) {
447                 DRM_ERROR("Fence A signaled without waiting for semaphore.\n");
448                 goto out_cleanup;
449         }
450         if (radeon_fence_signaled(fenceB)) {
451                 DRM_ERROR("Fence B signaled without waiting for semaphore.\n");
452                 goto out_cleanup;
453         }
454
455         r = radeon_ring_lock(rdev, ringC, 64);
456         if (r) {
457                 DRM_ERROR("Failed to lock ring B %p\n", ringC);
458                 goto out_cleanup;
459         }
460         radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
461         radeon_ring_unlock_commit(rdev, ringC, false);
462
463         for (i = 0; i < 30; ++i) {
464                 mdelay(100);
465                 sigA = radeon_fence_signaled(fenceA);
466                 sigB = radeon_fence_signaled(fenceB);
467                 if (sigA || sigB)
468                         break;
469         }
470
471         if (!sigA && !sigB) {
472                 DRM_ERROR("Neither fence A nor B has been signaled\n");
473                 goto out_cleanup;
474         } else if (sigA && sigB) {
475                 DRM_ERROR("Both fence A and B has been signaled\n");
476                 goto out_cleanup;
477         }
478
479         DRM_INFO("Fence %c was first signaled\n", sigA ? 'A' : 'B');
480
481         r = radeon_ring_lock(rdev, ringC, 64);
482         if (r) {
483                 DRM_ERROR("Failed to lock ring B %p\n", ringC);
484                 goto out_cleanup;
485         }
486         radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
487         radeon_ring_unlock_commit(rdev, ringC, false);
488
489         mdelay(1000);
490
491         r = radeon_fence_wait(fenceA, false);
492         if (r) {
493                 DRM_ERROR("Failed to wait for sync fence A\n");
494                 goto out_cleanup;
495         }
496         r = radeon_fence_wait(fenceB, false);
497         if (r) {
498                 DRM_ERROR("Failed to wait for sync fence B\n");
499                 goto out_cleanup;
500         }
501
502 out_cleanup:
503         radeon_semaphore_free(rdev, &semaphore, NULL);
504
505         if (fenceA)
506                 radeon_fence_unref(&fenceA);
507
508         if (fenceB)
509                 radeon_fence_unref(&fenceB);
510
511         if (r)
512                 pr_warn("Error while testing ring sync (%d)\n", r);
513 }
514
515 static bool radeon_test_sync_possible(struct radeon_ring *ringA,
516                                       struct radeon_ring *ringB)
517 {
518         if (ringA->idx == TN_RING_TYPE_VCE2_INDEX &&
519             ringB->idx == TN_RING_TYPE_VCE1_INDEX)
520                 return false;
521
522         return true;
523 }
524
525 void radeon_test_syncing(struct radeon_device *rdev)
526 {
527         int i, j, k;
528
529         for (i = 1; i < RADEON_NUM_RINGS; ++i) {
530                 struct radeon_ring *ringA = &rdev->ring[i];
531                 if (!ringA->ready)
532                         continue;
533
534                 for (j = 0; j < i; ++j) {
535                         struct radeon_ring *ringB = &rdev->ring[j];
536                         if (!ringB->ready)
537                                 continue;
538
539                         if (!radeon_test_sync_possible(ringA, ringB))
540                                 continue;
541
542                         DRM_INFO("Testing syncing between rings %d and %d...\n", i, j);
543                         radeon_test_ring_sync(rdev, ringA, ringB);
544
545                         DRM_INFO("Testing syncing between rings %d and %d...\n", j, i);
546                         radeon_test_ring_sync(rdev, ringB, ringA);
547
548                         for (k = 0; k < j; ++k) {
549                                 struct radeon_ring *ringC = &rdev->ring[k];
550                                 if (!ringC->ready)
551                                         continue;
552
553                                 if (!radeon_test_sync_possible(ringA, ringC))
554                                         continue;
555
556                                 if (!radeon_test_sync_possible(ringB, ringC))
557                                         continue;
558
559                                 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, j, k);
560                                 radeon_test_ring_sync2(rdev, ringA, ringB, ringC);
561
562                                 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, k, j);
563                                 radeon_test_ring_sync2(rdev, ringA, ringC, ringB);
564
565                                 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, i, k);
566                                 radeon_test_ring_sync2(rdev, ringB, ringA, ringC);
567
568                                 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, k, i);
569                                 radeon_test_ring_sync2(rdev, ringB, ringC, ringA);
570
571                                 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, i, j);
572                                 radeon_test_ring_sync2(rdev, ringC, ringA, ringB);
573
574                                 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, j, i);
575                                 radeon_test_ring_sync2(rdev, ringC, ringB, ringA);
576                         }
577                 }
578         }
579 }