]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
Merge tag 'imx-drm-next-2017-03-17' of git://git.pengutronix.de/git/pza/linux into...
[karo-tx-linux.git] / drivers / gpu / drm / i915 / selftests / i915_gem_gtt.c
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24
25 #include <linux/list_sort.h>
26 #include <linux/prime_numbers.h>
27
28 #include "../i915_selftest.h"
29 #include "i915_random.h"
30
31 #include "mock_context.h"
32 #include "mock_drm.h"
33 #include "mock_gem_device.h"
34
35 static void fake_free_pages(struct drm_i915_gem_object *obj,
36                             struct sg_table *pages)
37 {
38         sg_free_table(pages);
39         kfree(pages);
40 }
41
42 static struct sg_table *
43 fake_get_pages(struct drm_i915_gem_object *obj)
44 {
45 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
46 #define PFN_BIAS 0x1000
47         struct sg_table *pages;
48         struct scatterlist *sg;
49         typeof(obj->base.size) rem;
50
51         pages = kmalloc(sizeof(*pages), GFP);
52         if (!pages)
53                 return ERR_PTR(-ENOMEM);
54
55         rem = round_up(obj->base.size, BIT(31)) >> 31;
56         if (sg_alloc_table(pages, rem, GFP)) {
57                 kfree(pages);
58                 return ERR_PTR(-ENOMEM);
59         }
60
61         rem = obj->base.size;
62         for (sg = pages->sgl; sg; sg = sg_next(sg)) {
63                 unsigned long len = min_t(typeof(rem), rem, BIT(31));
64
65                 GEM_BUG_ON(!len);
66                 sg_set_page(sg, pfn_to_page(PFN_BIAS), len, 0);
67                 sg_dma_address(sg) = page_to_phys(sg_page(sg));
68                 sg_dma_len(sg) = len;
69
70                 rem -= len;
71         }
72         GEM_BUG_ON(rem);
73
74         obj->mm.madv = I915_MADV_DONTNEED;
75         return pages;
76 #undef GFP
77 }
78
79 static void fake_put_pages(struct drm_i915_gem_object *obj,
80                            struct sg_table *pages)
81 {
82         fake_free_pages(obj, pages);
83         obj->mm.dirty = false;
84         obj->mm.madv = I915_MADV_WILLNEED;
85 }
86
87 static const struct drm_i915_gem_object_ops fake_ops = {
88         .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
89         .get_pages = fake_get_pages,
90         .put_pages = fake_put_pages,
91 };
92
93 static struct drm_i915_gem_object *
94 fake_dma_object(struct drm_i915_private *i915, u64 size)
95 {
96         struct drm_i915_gem_object *obj;
97
98         GEM_BUG_ON(!size);
99         GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
100
101         if (overflows_type(size, obj->base.size))
102                 return ERR_PTR(-E2BIG);
103
104         obj = i915_gem_object_alloc(i915);
105         if (!obj)
106                 return ERR_PTR(-ENOMEM);
107
108         drm_gem_private_object_init(&i915->drm, &obj->base, size);
109         i915_gem_object_init(obj, &fake_ops);
110
111         obj->base.write_domain = I915_GEM_DOMAIN_CPU;
112         obj->base.read_domains = I915_GEM_DOMAIN_CPU;
113         obj->cache_level = I915_CACHE_NONE;
114
115         /* Preallocate the "backing storage" */
116         if (i915_gem_object_pin_pages(obj))
117                 return ERR_PTR(-ENOMEM);
118
119         i915_gem_object_unpin_pages(obj);
120         return obj;
121 }
122
123 static int igt_ppgtt_alloc(void *arg)
124 {
125         struct drm_i915_private *dev_priv = arg;
126         struct i915_hw_ppgtt *ppgtt;
127         u64 size, last;
128         int err;
129
130         /* Allocate a ppggt and try to fill the entire range */
131
132         if (!USES_PPGTT(dev_priv))
133                 return 0;
134
135         ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
136         if (!ppgtt)
137                 return -ENOMEM;
138
139         mutex_lock(&dev_priv->drm.struct_mutex);
140         err = __hw_ppgtt_init(ppgtt, dev_priv);
141         if (err)
142                 goto err_ppgtt;
143
144         if (!ppgtt->base.allocate_va_range)
145                 goto err_ppgtt_cleanup;
146
147         /* Check we can allocate the entire range */
148         for (size = 4096;
149              size <= ppgtt->base.total;
150              size <<= 2) {
151                 err = ppgtt->base.allocate_va_range(&ppgtt->base, 0, size);
152                 if (err) {
153                         if (err == -ENOMEM) {
154                                 pr_info("[1] Ran out of memory for va_range [0 + %llx] [bit %d]\n",
155                                         size, ilog2(size));
156                                 err = 0; /* virtual space too large! */
157                         }
158                         goto err_ppgtt_cleanup;
159                 }
160
161                 ppgtt->base.clear_range(&ppgtt->base, 0, size);
162         }
163
164         /* Check we can incrementally allocate the entire range */
165         for (last = 0, size = 4096;
166              size <= ppgtt->base.total;
167              last = size, size <<= 2) {
168                 err = ppgtt->base.allocate_va_range(&ppgtt->base,
169                                                     last, size - last);
170                 if (err) {
171                         if (err == -ENOMEM) {
172                                 pr_info("[2] Ran out of memory for va_range [%llx + %llx] [bit %d]\n",
173                                         last, size - last, ilog2(size));
174                                 err = 0; /* virtual space too large! */
175                         }
176                         goto err_ppgtt_cleanup;
177                 }
178         }
179
180 err_ppgtt_cleanup:
181         ppgtt->base.cleanup(&ppgtt->base);
182 err_ppgtt:
183         mutex_unlock(&dev_priv->drm.struct_mutex);
184         kfree(ppgtt);
185         return err;
186 }
187
188 static int lowlevel_hole(struct drm_i915_private *i915,
189                          struct i915_address_space *vm,
190                          u64 hole_start, u64 hole_end,
191                          unsigned long end_time)
192 {
193         I915_RND_STATE(seed_prng);
194         unsigned int size;
195
196         /* Keep creating larger objects until one cannot fit into the hole */
197         for (size = 12; (hole_end - hole_start) >> size; size++) {
198                 I915_RND_SUBSTATE(prng, seed_prng);
199                 struct drm_i915_gem_object *obj;
200                 unsigned int *order, count, n;
201                 u64 hole_size;
202
203                 hole_size = (hole_end - hole_start) >> size;
204                 if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
205                         hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
206                 count = hole_size;
207                 do {
208                         count >>= 1;
209                         order = i915_random_order(count, &prng);
210                 } while (!order && count);
211                 if (!order)
212                         break;
213
214                 GEM_BUG_ON(count * BIT_ULL(size) > vm->total);
215                 GEM_BUG_ON(hole_start + count * BIT_ULL(size) > hole_end);
216
217                 /* Ignore allocation failures (i.e. don't report them as
218                  * a test failure) as we are purposefully allocating very
219                  * large objects without checking that we have sufficient
220                  * memory. We expect to hit -ENOMEM.
221                  */
222
223                 obj = fake_dma_object(i915, BIT_ULL(size));
224                 if (IS_ERR(obj)) {
225                         kfree(order);
226                         break;
227                 }
228
229                 GEM_BUG_ON(obj->base.size != BIT_ULL(size));
230
231                 if (i915_gem_object_pin_pages(obj)) {
232                         i915_gem_object_put(obj);
233                         kfree(order);
234                         break;
235                 }
236
237                 for (n = 0; n < count; n++) {
238                         u64 addr = hole_start + order[n] * BIT_ULL(size);
239
240                         GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
241
242                         if (igt_timeout(end_time,
243                                         "%s timed out before %d/%d\n",
244                                         __func__, n, count)) {
245                                 hole_end = hole_start; /* quit */
246                                 break;
247                         }
248
249                         if (vm->allocate_va_range &&
250                             vm->allocate_va_range(vm, addr, BIT_ULL(size)))
251                                 break;
252
253                         vm->insert_entries(vm, obj->mm.pages, addr,
254                                            I915_CACHE_NONE, 0);
255                 }
256                 count = n;
257
258                 i915_random_reorder(order, count, &prng);
259                 for (n = 0; n < count; n++) {
260                         u64 addr = hole_start + order[n] * BIT_ULL(size);
261
262                         GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
263                         vm->clear_range(vm, addr, BIT_ULL(size));
264                 }
265
266                 i915_gem_object_unpin_pages(obj);
267                 i915_gem_object_put(obj);
268
269                 kfree(order);
270         }
271
272         return 0;
273 }
274
275 static void close_object_list(struct list_head *objects,
276                               struct i915_address_space *vm)
277 {
278         struct drm_i915_gem_object *obj, *on;
279         int ignored;
280
281         list_for_each_entry_safe(obj, on, objects, st_link) {
282                 struct i915_vma *vma;
283
284                 vma = i915_vma_instance(obj, vm, NULL);
285                 if (!IS_ERR(vma))
286                         ignored = i915_vma_unbind(vma);
287                 /* Only ppgtt vma may be closed before the object is freed */
288                 if (!IS_ERR(vma) && !i915_vma_is_ggtt(vma))
289                         i915_vma_close(vma);
290
291                 list_del(&obj->st_link);
292                 i915_gem_object_put(obj);
293         }
294 }
295
296 static int fill_hole(struct drm_i915_private *i915,
297                      struct i915_address_space *vm,
298                      u64 hole_start, u64 hole_end,
299                      unsigned long end_time)
300 {
301         const u64 hole_size = hole_end - hole_start;
302         struct drm_i915_gem_object *obj;
303         const unsigned long max_pages =
304                 min_t(u64, ULONG_MAX - 1, hole_size/2 >> PAGE_SHIFT);
305         const unsigned long max_step = max(int_sqrt(max_pages), 2UL);
306         unsigned long npages, prime, flags;
307         struct i915_vma *vma;
308         LIST_HEAD(objects);
309         int err;
310
311         /* Try binding many VMA working inwards from either edge */
312
313         flags = PIN_OFFSET_FIXED | PIN_USER;
314         if (i915_is_ggtt(vm))
315                 flags |= PIN_GLOBAL;
316
317         for_each_prime_number_from(prime, 2, max_step) {
318                 for (npages = 1; npages <= max_pages; npages *= prime) {
319                         const u64 full_size = npages << PAGE_SHIFT;
320                         const struct {
321                                 const char *name;
322                                 u64 offset;
323                                 int step;
324                         } phases[] = {
325                                 { "top-down", hole_end, -1, },
326                                 { "bottom-up", hole_start, 1, },
327                                 { }
328                         }, *p;
329
330                         obj = fake_dma_object(i915, full_size);
331                         if (IS_ERR(obj))
332                                 break;
333
334                         list_add(&obj->st_link, &objects);
335
336                         /* Align differing sized objects against the edges, and
337                          * check we don't walk off into the void when binding
338                          * them into the GTT.
339                          */
340                         for (p = phases; p->name; p++) {
341                                 u64 offset;
342
343                                 offset = p->offset;
344                                 list_for_each_entry(obj, &objects, st_link) {
345                                         vma = i915_vma_instance(obj, vm, NULL);
346                                         if (IS_ERR(vma))
347                                                 continue;
348
349                                         if (p->step < 0) {
350                                                 if (offset < hole_start + obj->base.size)
351                                                         break;
352                                                 offset -= obj->base.size;
353                                         }
354
355                                         err = i915_vma_pin(vma, 0, 0, offset | flags);
356                                         if (err) {
357                                                 pr_err("%s(%s) pin (forward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
358                                                        __func__, p->name, err, npages, prime, offset);
359                                                 goto err;
360                                         }
361
362                                         if (!drm_mm_node_allocated(&vma->node) ||
363                                             i915_vma_misplaced(vma, 0, 0, offset | flags)) {
364                                                 pr_err("%s(%s) (forward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
365                                                        __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
366                                                        offset);
367                                                 err = -EINVAL;
368                                                 goto err;
369                                         }
370
371                                         i915_vma_unpin(vma);
372
373                                         if (p->step > 0) {
374                                                 if (offset + obj->base.size > hole_end)
375                                                         break;
376                                                 offset += obj->base.size;
377                                         }
378                                 }
379
380                                 offset = p->offset;
381                                 list_for_each_entry(obj, &objects, st_link) {
382                                         vma = i915_vma_instance(obj, vm, NULL);
383                                         if (IS_ERR(vma))
384                                                 continue;
385
386                                         if (p->step < 0) {
387                                                 if (offset < hole_start + obj->base.size)
388                                                         break;
389                                                 offset -= obj->base.size;
390                                         }
391
392                                         if (!drm_mm_node_allocated(&vma->node) ||
393                                             i915_vma_misplaced(vma, 0, 0, offset | flags)) {
394                                                 pr_err("%s(%s) (forward) moved vma.node=%llx + %llx, expected offset %llx\n",
395                                                        __func__, p->name, vma->node.start, vma->node.size,
396                                                        offset);
397                                                 err = -EINVAL;
398                                                 goto err;
399                                         }
400
401                                         err = i915_vma_unbind(vma);
402                                         if (err) {
403                                                 pr_err("%s(%s) (forward) unbind of vma.node=%llx + %llx failed with err=%d\n",
404                                                        __func__, p->name, vma->node.start, vma->node.size,
405                                                        err);
406                                                 goto err;
407                                         }
408
409                                         if (p->step > 0) {
410                                                 if (offset + obj->base.size > hole_end)
411                                                         break;
412                                                 offset += obj->base.size;
413                                         }
414                                 }
415
416                                 offset = p->offset;
417                                 list_for_each_entry_reverse(obj, &objects, st_link) {
418                                         vma = i915_vma_instance(obj, vm, NULL);
419                                         if (IS_ERR(vma))
420                                                 continue;
421
422                                         if (p->step < 0) {
423                                                 if (offset < hole_start + obj->base.size)
424                                                         break;
425                                                 offset -= obj->base.size;
426                                         }
427
428                                         err = i915_vma_pin(vma, 0, 0, offset | flags);
429                                         if (err) {
430                                                 pr_err("%s(%s) pin (backward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
431                                                        __func__, p->name, err, npages, prime, offset);
432                                                 goto err;
433                                         }
434
435                                         if (!drm_mm_node_allocated(&vma->node) ||
436                                             i915_vma_misplaced(vma, 0, 0, offset | flags)) {
437                                                 pr_err("%s(%s) (backward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
438                                                        __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
439                                                        offset);
440                                                 err = -EINVAL;
441                                                 goto err;
442                                         }
443
444                                         i915_vma_unpin(vma);
445
446                                         if (p->step > 0) {
447                                                 if (offset + obj->base.size > hole_end)
448                                                         break;
449                                                 offset += obj->base.size;
450                                         }
451                                 }
452
453                                 offset = p->offset;
454                                 list_for_each_entry_reverse(obj, &objects, st_link) {
455                                         vma = i915_vma_instance(obj, vm, NULL);
456                                         if (IS_ERR(vma))
457                                                 continue;
458
459                                         if (p->step < 0) {
460                                                 if (offset < hole_start + obj->base.size)
461                                                         break;
462                                                 offset -= obj->base.size;
463                                         }
464
465                                         if (!drm_mm_node_allocated(&vma->node) ||
466                                             i915_vma_misplaced(vma, 0, 0, offset | flags)) {
467                                                 pr_err("%s(%s) (backward) moved vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
468                                                        __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
469                                                        offset);
470                                                 err = -EINVAL;
471                                                 goto err;
472                                         }
473
474                                         err = i915_vma_unbind(vma);
475                                         if (err) {
476                                                 pr_err("%s(%s) (backward) unbind of vma.node=%llx + %llx failed with err=%d\n",
477                                                        __func__, p->name, vma->node.start, vma->node.size,
478                                                        err);
479                                                 goto err;
480                                         }
481
482                                         if (p->step > 0) {
483                                                 if (offset + obj->base.size > hole_end)
484                                                         break;
485                                                 offset += obj->base.size;
486                                         }
487                                 }
488                         }
489
490                         if (igt_timeout(end_time, "%s timed out (npages=%lu, prime=%lu)\n",
491                                         __func__, npages, prime)) {
492                                 err = -EINTR;
493                                 goto err;
494                         }
495                 }
496
497                 close_object_list(&objects, vm);
498         }
499
500         return 0;
501
502 err:
503         close_object_list(&objects, vm);
504         return err;
505 }
506
507 static int walk_hole(struct drm_i915_private *i915,
508                      struct i915_address_space *vm,
509                      u64 hole_start, u64 hole_end,
510                      unsigned long end_time)
511 {
512         const u64 hole_size = hole_end - hole_start;
513         const unsigned long max_pages =
514                 min_t(u64, ULONG_MAX - 1, hole_size >> PAGE_SHIFT);
515         unsigned long flags;
516         u64 size;
517
518         /* Try binding a single VMA in different positions within the hole */
519
520         flags = PIN_OFFSET_FIXED | PIN_USER;
521         if (i915_is_ggtt(vm))
522                 flags |= PIN_GLOBAL;
523
524         for_each_prime_number_from(size, 1, max_pages) {
525                 struct drm_i915_gem_object *obj;
526                 struct i915_vma *vma;
527                 u64 addr;
528                 int err = 0;
529
530                 obj = fake_dma_object(i915, size << PAGE_SHIFT);
531                 if (IS_ERR(obj))
532                         break;
533
534                 vma = i915_vma_instance(obj, vm, NULL);
535                 if (IS_ERR(vma)) {
536                         err = PTR_ERR(vma);
537                         goto err;
538                 }
539
540                 for (addr = hole_start;
541                      addr + obj->base.size < hole_end;
542                      addr += obj->base.size) {
543                         err = i915_vma_pin(vma, 0, 0, addr | flags);
544                         if (err) {
545                                 pr_err("%s bind failed at %llx + %llx [hole %llx- %llx] with err=%d\n",
546                                        __func__, addr, vma->size,
547                                        hole_start, hole_end, err);
548                                 goto err;
549                         }
550                         i915_vma_unpin(vma);
551
552                         if (!drm_mm_node_allocated(&vma->node) ||
553                             i915_vma_misplaced(vma, 0, 0, addr | flags)) {
554                                 pr_err("%s incorrect at %llx + %llx\n",
555                                        __func__, addr, vma->size);
556                                 err = -EINVAL;
557                                 goto err;
558                         }
559
560                         err = i915_vma_unbind(vma);
561                         if (err) {
562                                 pr_err("%s unbind failed at %llx + %llx  with err=%d\n",
563                                        __func__, addr, vma->size, err);
564                                 goto err;
565                         }
566
567                         GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
568
569                         if (igt_timeout(end_time,
570                                         "%s timed out at %llx\n",
571                                         __func__, addr)) {
572                                 err = -EINTR;
573                                 goto err;
574                         }
575                 }
576
577 err:
578                 if (!i915_vma_is_ggtt(vma))
579                         i915_vma_close(vma);
580                 i915_gem_object_put(obj);
581                 if (err)
582                         return err;
583         }
584
585         return 0;
586 }
587
588 static int pot_hole(struct drm_i915_private *i915,
589                     struct i915_address_space *vm,
590                     u64 hole_start, u64 hole_end,
591                     unsigned long end_time)
592 {
593         struct drm_i915_gem_object *obj;
594         struct i915_vma *vma;
595         unsigned long flags;
596         unsigned int pot;
597         int err = 0;
598
599         flags = PIN_OFFSET_FIXED | PIN_USER;
600         if (i915_is_ggtt(vm))
601                 flags |= PIN_GLOBAL;
602
603         obj = i915_gem_object_create_internal(i915, 2 * I915_GTT_PAGE_SIZE);
604         if (IS_ERR(obj))
605                 return PTR_ERR(obj);
606
607         vma = i915_vma_instance(obj, vm, NULL);
608         if (IS_ERR(vma)) {
609                 err = PTR_ERR(vma);
610                 goto err_obj;
611         }
612
613         /* Insert a pair of pages across every pot boundary within the hole */
614         for (pot = fls64(hole_end - 1) - 1;
615              pot > ilog2(2 * I915_GTT_PAGE_SIZE);
616              pot--) {
617                 u64 step = BIT_ULL(pot);
618                 u64 addr;
619
620                 for (addr = round_up(hole_start + I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
621                      addr <= round_down(hole_end - 2*I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
622                      addr += step) {
623                         err = i915_vma_pin(vma, 0, 0, addr | flags);
624                         if (err) {
625                                 pr_err("%s failed to pin object at %llx in hole [%llx - %llx], with err=%d\n",
626                                        __func__,
627                                        addr,
628                                        hole_start, hole_end,
629                                        err);
630                                 goto err;
631                         }
632
633                         if (!drm_mm_node_allocated(&vma->node) ||
634                             i915_vma_misplaced(vma, 0, 0, addr | flags)) {
635                                 pr_err("%s incorrect at %llx + %llx\n",
636                                        __func__, addr, vma->size);
637                                 i915_vma_unpin(vma);
638                                 err = i915_vma_unbind(vma);
639                                 err = -EINVAL;
640                                 goto err;
641                         }
642
643                         i915_vma_unpin(vma);
644                         err = i915_vma_unbind(vma);
645                         GEM_BUG_ON(err);
646                 }
647
648                 if (igt_timeout(end_time,
649                                 "%s timed out after %d/%d\n",
650                                 __func__, pot, fls64(hole_end - 1) - 1)) {
651                         err = -EINTR;
652                         goto err;
653                 }
654         }
655
656 err:
657         if (!i915_vma_is_ggtt(vma))
658                 i915_vma_close(vma);
659 err_obj:
660         i915_gem_object_put(obj);
661         return err;
662 }
663
664 static int drunk_hole(struct drm_i915_private *i915,
665                       struct i915_address_space *vm,
666                       u64 hole_start, u64 hole_end,
667                       unsigned long end_time)
668 {
669         I915_RND_STATE(prng);
670         unsigned int size;
671         unsigned long flags;
672
673         flags = PIN_OFFSET_FIXED | PIN_USER;
674         if (i915_is_ggtt(vm))
675                 flags |= PIN_GLOBAL;
676
677         /* Keep creating larger objects until one cannot fit into the hole */
678         for (size = 12; (hole_end - hole_start) >> size; size++) {
679                 struct drm_i915_gem_object *obj;
680                 unsigned int *order, count, n;
681                 struct i915_vma *vma;
682                 u64 hole_size;
683                 int err;
684
685                 hole_size = (hole_end - hole_start) >> size;
686                 if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
687                         hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
688                 count = hole_size;
689                 do {
690                         count >>= 1;
691                         order = i915_random_order(count, &prng);
692                 } while (!order && count);
693                 if (!order)
694                         break;
695
696                 /* Ignore allocation failures (i.e. don't report them as
697                  * a test failure) as we are purposefully allocating very
698                  * large objects without checking that we have sufficient
699                  * memory. We expect to hit -ENOMEM.
700                  */
701
702                 obj = fake_dma_object(i915, BIT_ULL(size));
703                 if (IS_ERR(obj)) {
704                         kfree(order);
705                         break;
706                 }
707
708                 vma = i915_vma_instance(obj, vm, NULL);
709                 if (IS_ERR(vma)) {
710                         err = PTR_ERR(vma);
711                         goto err_obj;
712                 }
713
714                 GEM_BUG_ON(vma->size != BIT_ULL(size));
715
716                 for (n = 0; n < count; n++) {
717                         u64 addr = hole_start + order[n] * BIT_ULL(size);
718
719                         err = i915_vma_pin(vma, 0, 0, addr | flags);
720                         if (err) {
721                                 pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
722                                        __func__,
723                                        addr, BIT_ULL(size),
724                                        hole_start, hole_end,
725                                        err);
726                                 goto err;
727                         }
728
729                         if (!drm_mm_node_allocated(&vma->node) ||
730                             i915_vma_misplaced(vma, 0, 0, addr | flags)) {
731                                 pr_err("%s incorrect at %llx + %llx\n",
732                                        __func__, addr, BIT_ULL(size));
733                                 i915_vma_unpin(vma);
734                                 err = i915_vma_unbind(vma);
735                                 err = -EINVAL;
736                                 goto err;
737                         }
738
739                         i915_vma_unpin(vma);
740                         err = i915_vma_unbind(vma);
741                         GEM_BUG_ON(err);
742
743                         if (igt_timeout(end_time,
744                                         "%s timed out after %d/%d\n",
745                                         __func__, n, count)) {
746                                 err = -EINTR;
747                                 goto err;
748                         }
749                 }
750
751 err:
752                 if (!i915_vma_is_ggtt(vma))
753                         i915_vma_close(vma);
754 err_obj:
755                 i915_gem_object_put(obj);
756                 kfree(order);
757                 if (err)
758                         return err;
759         }
760
761         return 0;
762 }
763
764 static int __shrink_hole(struct drm_i915_private *i915,
765                          struct i915_address_space *vm,
766                          u64 hole_start, u64 hole_end,
767                          unsigned long end_time)
768 {
769         struct drm_i915_gem_object *obj;
770         unsigned long flags = PIN_OFFSET_FIXED | PIN_USER;
771         unsigned int order = 12;
772         LIST_HEAD(objects);
773         int err = 0;
774         u64 addr;
775
776         /* Keep creating larger objects until one cannot fit into the hole */
777         for (addr = hole_start; addr < hole_end; ) {
778                 struct i915_vma *vma;
779                 u64 size = BIT_ULL(order++);
780
781                 size = min(size, hole_end - addr);
782                 obj = fake_dma_object(i915, size);
783                 if (IS_ERR(obj)) {
784                         err = PTR_ERR(obj);
785                         break;
786                 }
787
788                 list_add(&obj->st_link, &objects);
789
790                 vma = i915_vma_instance(obj, vm, NULL);
791                 if (IS_ERR(vma)) {
792                         err = PTR_ERR(vma);
793                         break;
794                 }
795
796                 GEM_BUG_ON(vma->size != size);
797
798                 err = i915_vma_pin(vma, 0, 0, addr | flags);
799                 if (err) {
800                         pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
801                                __func__, addr, size, hole_start, hole_end, err);
802                         break;
803                 }
804
805                 if (!drm_mm_node_allocated(&vma->node) ||
806                     i915_vma_misplaced(vma, 0, 0, addr | flags)) {
807                         pr_err("%s incorrect at %llx + %llx\n",
808                                __func__, addr, size);
809                         i915_vma_unpin(vma);
810                         err = i915_vma_unbind(vma);
811                         err = -EINVAL;
812                         break;
813                 }
814
815                 i915_vma_unpin(vma);
816                 addr += size;
817
818                 if (igt_timeout(end_time,
819                                 "%s timed out at ofset %llx [%llx - %llx]\n",
820                                 __func__, addr, hole_start, hole_end)) {
821                         err = -EINTR;
822                         break;
823                 }
824         }
825
826         close_object_list(&objects, vm);
827         return err;
828 }
829
830 static int shrink_hole(struct drm_i915_private *i915,
831                        struct i915_address_space *vm,
832                        u64 hole_start, u64 hole_end,
833                        unsigned long end_time)
834 {
835         unsigned long prime;
836         int err;
837
838         vm->fault_attr.probability = 999;
839         atomic_set(&vm->fault_attr.times, -1);
840
841         for_each_prime_number_from(prime, 0, ULONG_MAX - 1) {
842                 vm->fault_attr.interval = prime;
843                 err = __shrink_hole(i915, vm, hole_start, hole_end, end_time);
844                 if (err)
845                         break;
846         }
847
848         memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
849
850         return err;
851 }
852
853 static int exercise_ppgtt(struct drm_i915_private *dev_priv,
854                           int (*func)(struct drm_i915_private *i915,
855                                       struct i915_address_space *vm,
856                                       u64 hole_start, u64 hole_end,
857                                       unsigned long end_time))
858 {
859         struct drm_file *file;
860         struct i915_hw_ppgtt *ppgtt;
861         IGT_TIMEOUT(end_time);
862         int err;
863
864         if (!USES_FULL_PPGTT(dev_priv))
865                 return 0;
866
867         file = mock_file(dev_priv);
868         if (IS_ERR(file))
869                 return PTR_ERR(file);
870
871         mutex_lock(&dev_priv->drm.struct_mutex);
872         ppgtt = i915_ppgtt_create(dev_priv, file->driver_priv, "mock");
873         if (IS_ERR(ppgtt)) {
874                 err = PTR_ERR(ppgtt);
875                 goto out_unlock;
876         }
877         GEM_BUG_ON(offset_in_page(ppgtt->base.total));
878         GEM_BUG_ON(ppgtt->base.closed);
879
880         err = func(dev_priv, &ppgtt->base, 0, ppgtt->base.total, end_time);
881
882         i915_ppgtt_close(&ppgtt->base);
883         i915_ppgtt_put(ppgtt);
884 out_unlock:
885         mutex_unlock(&dev_priv->drm.struct_mutex);
886
887         mock_file_free(dev_priv, file);
888         return err;
889 }
890
891 static int igt_ppgtt_fill(void *arg)
892 {
893         return exercise_ppgtt(arg, fill_hole);
894 }
895
896 static int igt_ppgtt_walk(void *arg)
897 {
898         return exercise_ppgtt(arg, walk_hole);
899 }
900
901 static int igt_ppgtt_pot(void *arg)
902 {
903         return exercise_ppgtt(arg, pot_hole);
904 }
905
906 static int igt_ppgtt_drunk(void *arg)
907 {
908         return exercise_ppgtt(arg, drunk_hole);
909 }
910
911 static int igt_ppgtt_lowlevel(void *arg)
912 {
913         return exercise_ppgtt(arg, lowlevel_hole);
914 }
915
916 static int igt_ppgtt_shrink(void *arg)
917 {
918         return exercise_ppgtt(arg, shrink_hole);
919 }
920
921 static int sort_holes(void *priv, struct list_head *A, struct list_head *B)
922 {
923         struct drm_mm_node *a = list_entry(A, typeof(*a), hole_stack);
924         struct drm_mm_node *b = list_entry(B, typeof(*b), hole_stack);
925
926         if (a->start < b->start)
927                 return -1;
928         else
929                 return 1;
930 }
931
932 static int exercise_ggtt(struct drm_i915_private *i915,
933                          int (*func)(struct drm_i915_private *i915,
934                                      struct i915_address_space *vm,
935                                      u64 hole_start, u64 hole_end,
936                                      unsigned long end_time))
937 {
938         struct i915_ggtt *ggtt = &i915->ggtt;
939         u64 hole_start, hole_end, last = 0;
940         struct drm_mm_node *node;
941         IGT_TIMEOUT(end_time);
942         int err;
943
944         mutex_lock(&i915->drm.struct_mutex);
945 restart:
946         list_sort(NULL, &ggtt->base.mm.hole_stack, sort_holes);
947         drm_mm_for_each_hole(node, &ggtt->base.mm, hole_start, hole_end) {
948                 if (hole_start < last)
949                         continue;
950
951                 if (ggtt->base.mm.color_adjust)
952                         ggtt->base.mm.color_adjust(node, 0,
953                                                    &hole_start, &hole_end);
954                 if (hole_start >= hole_end)
955                         continue;
956
957                 err = func(i915, &ggtt->base, hole_start, hole_end, end_time);
958                 if (err)
959                         break;
960
961                 /* As we have manipulated the drm_mm, the list may be corrupt */
962                 last = hole_end;
963                 goto restart;
964         }
965         mutex_unlock(&i915->drm.struct_mutex);
966
967         return err;
968 }
969
970 static int igt_ggtt_fill(void *arg)
971 {
972         return exercise_ggtt(arg, fill_hole);
973 }
974
975 static int igt_ggtt_walk(void *arg)
976 {
977         return exercise_ggtt(arg, walk_hole);
978 }
979
980 static int igt_ggtt_pot(void *arg)
981 {
982         return exercise_ggtt(arg, pot_hole);
983 }
984
985 static int igt_ggtt_drunk(void *arg)
986 {
987         return exercise_ggtt(arg, drunk_hole);
988 }
989
990 static int igt_ggtt_lowlevel(void *arg)
991 {
992         return exercise_ggtt(arg, lowlevel_hole);
993 }
994
995 static int igt_ggtt_page(void *arg)
996 {
997         const unsigned int count = PAGE_SIZE/sizeof(u32);
998         I915_RND_STATE(prng);
999         struct drm_i915_private *i915 = arg;
1000         struct i915_ggtt *ggtt = &i915->ggtt;
1001         struct drm_i915_gem_object *obj;
1002         struct drm_mm_node tmp;
1003         unsigned int *order, n;
1004         int err;
1005
1006         mutex_lock(&i915->drm.struct_mutex);
1007
1008         obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1009         if (IS_ERR(obj)) {
1010                 err = PTR_ERR(obj);
1011                 goto out_unlock;
1012         }
1013
1014         err = i915_gem_object_pin_pages(obj);
1015         if (err)
1016                 goto out_free;
1017
1018         memset(&tmp, 0, sizeof(tmp));
1019         err = drm_mm_insert_node_in_range(&ggtt->base.mm, &tmp,
1020                                           1024 * PAGE_SIZE, 0,
1021                                           I915_COLOR_UNEVICTABLE,
1022                                           0, ggtt->mappable_end,
1023                                           DRM_MM_INSERT_LOW);
1024         if (err)
1025                 goto out_unpin;
1026
1027         order = i915_random_order(count, &prng);
1028         if (!order) {
1029                 err = -ENOMEM;
1030                 goto out_remove;
1031         }
1032
1033         for (n = 0; n < count; n++) {
1034                 u64 offset = tmp.start + order[n] * PAGE_SIZE;
1035                 u32 __iomem *vaddr;
1036
1037                 ggtt->base.insert_page(&ggtt->base,
1038                                        i915_gem_object_get_dma_address(obj, 0),
1039                                        offset, I915_CACHE_NONE, 0);
1040
1041                 vaddr = io_mapping_map_atomic_wc(&ggtt->mappable, offset);
1042                 iowrite32(n, vaddr + n);
1043                 io_mapping_unmap_atomic(vaddr);
1044
1045                 wmb();
1046                 ggtt->base.clear_range(&ggtt->base, offset, PAGE_SIZE);
1047         }
1048
1049         i915_random_reorder(order, count, &prng);
1050         for (n = 0; n < count; n++) {
1051                 u64 offset = tmp.start + order[n] * PAGE_SIZE;
1052                 u32 __iomem *vaddr;
1053                 u32 val;
1054
1055                 ggtt->base.insert_page(&ggtt->base,
1056                                        i915_gem_object_get_dma_address(obj, 0),
1057                                        offset, I915_CACHE_NONE, 0);
1058
1059                 vaddr = io_mapping_map_atomic_wc(&ggtt->mappable, offset);
1060                 val = ioread32(vaddr + n);
1061                 io_mapping_unmap_atomic(vaddr);
1062
1063                 ggtt->base.clear_range(&ggtt->base, offset, PAGE_SIZE);
1064
1065                 if (val != n) {
1066                         pr_err("insert page failed: found %d, expected %d\n",
1067                                val, n);
1068                         err = -EINVAL;
1069                         break;
1070                 }
1071         }
1072
1073         kfree(order);
1074 out_remove:
1075         drm_mm_remove_node(&tmp);
1076 out_unpin:
1077         i915_gem_object_unpin_pages(obj);
1078 out_free:
1079         i915_gem_object_put(obj);
1080 out_unlock:
1081         mutex_unlock(&i915->drm.struct_mutex);
1082         return err;
1083 }
1084
1085 static void track_vma_bind(struct i915_vma *vma)
1086 {
1087         struct drm_i915_gem_object *obj = vma->obj;
1088
1089         obj->bind_count++; /* track for eviction later */
1090         __i915_gem_object_pin_pages(obj);
1091
1092         vma->pages = obj->mm.pages;
1093         list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
1094 }
1095
1096 static int exercise_mock(struct drm_i915_private *i915,
1097                          int (*func)(struct drm_i915_private *i915,
1098                                      struct i915_address_space *vm,
1099                                      u64 hole_start, u64 hole_end,
1100                                      unsigned long end_time))
1101 {
1102         struct i915_gem_context *ctx;
1103         struct i915_hw_ppgtt *ppgtt;
1104         IGT_TIMEOUT(end_time);
1105         int err;
1106
1107         ctx = mock_context(i915, "mock");
1108         if (!ctx)
1109                 return -ENOMEM;
1110
1111         ppgtt = ctx->ppgtt;
1112         GEM_BUG_ON(!ppgtt);
1113
1114         err = func(i915, &ppgtt->base, 0, ppgtt->base.total, end_time);
1115
1116         mock_context_close(ctx);
1117         return err;
1118 }
1119
1120 static int igt_mock_fill(void *arg)
1121 {
1122         return exercise_mock(arg, fill_hole);
1123 }
1124
1125 static int igt_mock_walk(void *arg)
1126 {
1127         return exercise_mock(arg, walk_hole);
1128 }
1129
1130 static int igt_mock_pot(void *arg)
1131 {
1132         return exercise_mock(arg, pot_hole);
1133 }
1134
1135 static int igt_mock_drunk(void *arg)
1136 {
1137         return exercise_mock(arg, drunk_hole);
1138 }
1139
1140 static int igt_gtt_reserve(void *arg)
1141 {
1142         struct drm_i915_private *i915 = arg;
1143         struct drm_i915_gem_object *obj, *on;
1144         LIST_HEAD(objects);
1145         u64 total;
1146         int err;
1147
1148         /* i915_gem_gtt_reserve() tries to reserve the precise range
1149          * for the node, and evicts if it has to. So our test checks that
1150          * it can give us the requsted space and prevent overlaps.
1151          */
1152
1153         /* Start by filling the GGTT */
1154         for (total = 0;
1155              total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
1156              total += 2*I915_GTT_PAGE_SIZE) {
1157                 struct i915_vma *vma;
1158
1159                 obj = i915_gem_object_create_internal(i915, 2*PAGE_SIZE);
1160                 if (IS_ERR(obj)) {
1161                         err = PTR_ERR(obj);
1162                         goto out;
1163                 }
1164
1165                 err = i915_gem_object_pin_pages(obj);
1166                 if (err) {
1167                         i915_gem_object_put(obj);
1168                         goto out;
1169                 }
1170
1171                 list_add(&obj->st_link, &objects);
1172
1173                 vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
1174                 if (IS_ERR(vma)) {
1175                         err = PTR_ERR(vma);
1176                         goto out;
1177                 }
1178
1179                 err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
1180                                            obj->base.size,
1181                                            total,
1182                                            obj->cache_level,
1183                                            0);
1184                 if (err) {
1185                         pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
1186                                total, i915->ggtt.base.total, err);
1187                         goto out;
1188                 }
1189                 track_vma_bind(vma);
1190
1191                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1192                 if (vma->node.start != total ||
1193                     vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1194                         pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
1195                                vma->node.start, vma->node.size,
1196                                total, 2*I915_GTT_PAGE_SIZE);
1197                         err = -EINVAL;
1198                         goto out;
1199                 }
1200         }
1201
1202         /* Now we start forcing evictions */
1203         for (total = I915_GTT_PAGE_SIZE;
1204              total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
1205              total += 2*I915_GTT_PAGE_SIZE) {
1206                 struct i915_vma *vma;
1207
1208                 obj = i915_gem_object_create_internal(i915, 2*PAGE_SIZE);
1209                 if (IS_ERR(obj)) {
1210                         err = PTR_ERR(obj);
1211                         goto out;
1212                 }
1213
1214                 err = i915_gem_object_pin_pages(obj);
1215                 if (err) {
1216                         i915_gem_object_put(obj);
1217                         goto out;
1218                 }
1219
1220                 list_add(&obj->st_link, &objects);
1221
1222                 vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
1223                 if (IS_ERR(vma)) {
1224                         err = PTR_ERR(vma);
1225                         goto out;
1226                 }
1227
1228                 err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
1229                                            obj->base.size,
1230                                            total,
1231                                            obj->cache_level,
1232                                            0);
1233                 if (err) {
1234                         pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
1235                                total, i915->ggtt.base.total, err);
1236                         goto out;
1237                 }
1238                 track_vma_bind(vma);
1239
1240                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1241                 if (vma->node.start != total ||
1242                     vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1243                         pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
1244                                vma->node.start, vma->node.size,
1245                                total, 2*I915_GTT_PAGE_SIZE);
1246                         err = -EINVAL;
1247                         goto out;
1248                 }
1249         }
1250
1251         /* And then try at random */
1252         list_for_each_entry_safe(obj, on, &objects, st_link) {
1253                 struct i915_vma *vma;
1254                 u64 offset;
1255
1256                 vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
1257                 if (IS_ERR(vma)) {
1258                         err = PTR_ERR(vma);
1259                         goto out;
1260                 }
1261
1262                 err = i915_vma_unbind(vma);
1263                 if (err) {
1264                         pr_err("i915_vma_unbind failed with err=%d!\n", err);
1265                         goto out;
1266                 }
1267
1268                 offset = random_offset(0, i915->ggtt.base.total,
1269                                        2*I915_GTT_PAGE_SIZE,
1270                                        I915_GTT_MIN_ALIGNMENT);
1271
1272                 err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
1273                                            obj->base.size,
1274                                            offset,
1275                                            obj->cache_level,
1276                                            0);
1277                 if (err) {
1278                         pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
1279                                total, i915->ggtt.base.total, err);
1280                         goto out;
1281                 }
1282                 track_vma_bind(vma);
1283
1284                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1285                 if (vma->node.start != offset ||
1286                     vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1287                         pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
1288                                vma->node.start, vma->node.size,
1289                                offset, 2*I915_GTT_PAGE_SIZE);
1290                         err = -EINVAL;
1291                         goto out;
1292                 }
1293         }
1294
1295 out:
1296         list_for_each_entry_safe(obj, on, &objects, st_link) {
1297                 i915_gem_object_unpin_pages(obj);
1298                 i915_gem_object_put(obj);
1299         }
1300         return err;
1301 }
1302
1303 static int igt_gtt_insert(void *arg)
1304 {
1305         struct drm_i915_private *i915 = arg;
1306         struct drm_i915_gem_object *obj, *on;
1307         struct drm_mm_node tmp = {};
1308         const struct invalid_insert {
1309                 u64 size;
1310                 u64 alignment;
1311                 u64 start, end;
1312         } invalid_insert[] = {
1313                 {
1314                         i915->ggtt.base.total + I915_GTT_PAGE_SIZE, 0,
1315                         0, i915->ggtt.base.total,
1316                 },
1317                 {
1318                         2*I915_GTT_PAGE_SIZE, 0,
1319                         0, I915_GTT_PAGE_SIZE,
1320                 },
1321                 {
1322                         -(u64)I915_GTT_PAGE_SIZE, 0,
1323                         0, 4*I915_GTT_PAGE_SIZE,
1324                 },
1325                 {
1326                         -(u64)2*I915_GTT_PAGE_SIZE, 2*I915_GTT_PAGE_SIZE,
1327                         0, 4*I915_GTT_PAGE_SIZE,
1328                 },
1329                 {
1330                         I915_GTT_PAGE_SIZE, I915_GTT_MIN_ALIGNMENT << 1,
1331                         I915_GTT_MIN_ALIGNMENT, I915_GTT_MIN_ALIGNMENT << 1,
1332                 },
1333                 {}
1334         }, *ii;
1335         LIST_HEAD(objects);
1336         u64 total;
1337         int err;
1338
1339         /* i915_gem_gtt_insert() tries to allocate some free space in the GTT
1340          * to the node, evicting if required.
1341          */
1342
1343         /* Check a couple of obviously invalid requests */
1344         for (ii = invalid_insert; ii->size; ii++) {
1345                 err = i915_gem_gtt_insert(&i915->ggtt.base, &tmp,
1346                                           ii->size, ii->alignment,
1347                                           I915_COLOR_UNEVICTABLE,
1348                                           ii->start, ii->end,
1349                                           0);
1350                 if (err != -ENOSPC) {
1351                         pr_err("Invalid i915_gem_gtt_insert(.size=%llx, .alignment=%llx, .start=%llx, .end=%llx) succeeded (err=%d)\n",
1352                                ii->size, ii->alignment, ii->start, ii->end,
1353                                err);
1354                         return -EINVAL;
1355                 }
1356         }
1357
1358         /* Start by filling the GGTT */
1359         for (total = 0;
1360              total + I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
1361              total += I915_GTT_PAGE_SIZE) {
1362                 struct i915_vma *vma;
1363
1364                 obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
1365                 if (IS_ERR(obj)) {
1366                         err = PTR_ERR(obj);
1367                         goto out;
1368                 }
1369
1370                 err = i915_gem_object_pin_pages(obj);
1371                 if (err) {
1372                         i915_gem_object_put(obj);
1373                         goto out;
1374                 }
1375
1376                 list_add(&obj->st_link, &objects);
1377
1378                 vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
1379                 if (IS_ERR(vma)) {
1380                         err = PTR_ERR(vma);
1381                         goto out;
1382                 }
1383
1384                 err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
1385                                           obj->base.size, 0, obj->cache_level,
1386                                           0, i915->ggtt.base.total,
1387                                           0);
1388                 if (err == -ENOSPC) {
1389                         /* maxed out the GGTT space */
1390                         i915_gem_object_put(obj);
1391                         break;
1392                 }
1393                 if (err) {
1394                         pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
1395                                total, i915->ggtt.base.total, err);
1396                         goto out;
1397                 }
1398                 track_vma_bind(vma);
1399                 __i915_vma_pin(vma);
1400
1401                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1402         }
1403
1404         list_for_each_entry(obj, &objects, st_link) {
1405                 struct i915_vma *vma;
1406
1407                 vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
1408                 if (IS_ERR(vma)) {
1409                         err = PTR_ERR(vma);
1410                         goto out;
1411                 }
1412
1413                 if (!drm_mm_node_allocated(&vma->node)) {
1414                         pr_err("VMA was unexpectedly evicted!\n");
1415                         err = -EINVAL;
1416                         goto out;
1417                 }
1418
1419                 __i915_vma_unpin(vma);
1420         }
1421
1422         /* If we then reinsert, we should find the same hole */
1423         list_for_each_entry_safe(obj, on, &objects, st_link) {
1424                 struct i915_vma *vma;
1425                 u64 offset;
1426
1427                 vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
1428                 if (IS_ERR(vma)) {
1429                         err = PTR_ERR(vma);
1430                         goto out;
1431                 }
1432
1433                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1434                 offset = vma->node.start;
1435
1436                 err = i915_vma_unbind(vma);
1437                 if (err) {
1438                         pr_err("i915_vma_unbind failed with err=%d!\n", err);
1439                         goto out;
1440                 }
1441
1442                 err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
1443                                           obj->base.size, 0, obj->cache_level,
1444                                           0, i915->ggtt.base.total,
1445                                           0);
1446                 if (err) {
1447                         pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
1448                                total, i915->ggtt.base.total, err);
1449                         goto out;
1450                 }
1451                 track_vma_bind(vma);
1452
1453                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1454                 if (vma->node.start != offset) {
1455                         pr_err("i915_gem_gtt_insert did not return node to its previous location (the only hole), expected address %llx, found %llx\n",
1456                                offset, vma->node.start);
1457                         err = -EINVAL;
1458                         goto out;
1459                 }
1460         }
1461
1462         /* And then force evictions */
1463         for (total = 0;
1464              total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
1465              total += 2*I915_GTT_PAGE_SIZE) {
1466                 struct i915_vma *vma;
1467
1468                 obj = i915_gem_object_create_internal(i915, 2*I915_GTT_PAGE_SIZE);
1469                 if (IS_ERR(obj)) {
1470                         err = PTR_ERR(obj);
1471                         goto out;
1472                 }
1473
1474                 err = i915_gem_object_pin_pages(obj);
1475                 if (err) {
1476                         i915_gem_object_put(obj);
1477                         goto out;
1478                 }
1479
1480                 list_add(&obj->st_link, &objects);
1481
1482                 vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
1483                 if (IS_ERR(vma)) {
1484                         err = PTR_ERR(vma);
1485                         goto out;
1486                 }
1487
1488                 err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
1489                                           obj->base.size, 0, obj->cache_level,
1490                                           0, i915->ggtt.base.total,
1491                                           0);
1492                 if (err) {
1493                         pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
1494                                total, i915->ggtt.base.total, err);
1495                         goto out;
1496                 }
1497                 track_vma_bind(vma);
1498
1499                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1500         }
1501
1502 out:
1503         list_for_each_entry_safe(obj, on, &objects, st_link) {
1504                 i915_gem_object_unpin_pages(obj);
1505                 i915_gem_object_put(obj);
1506         }
1507         return err;
1508 }
1509
1510 int i915_gem_gtt_mock_selftests(void)
1511 {
1512         static const struct i915_subtest tests[] = {
1513                 SUBTEST(igt_mock_drunk),
1514                 SUBTEST(igt_mock_walk),
1515                 SUBTEST(igt_mock_pot),
1516                 SUBTEST(igt_mock_fill),
1517                 SUBTEST(igt_gtt_reserve),
1518                 SUBTEST(igt_gtt_insert),
1519         };
1520         struct drm_i915_private *i915;
1521         int err;
1522
1523         i915 = mock_gem_device();
1524         if (!i915)
1525                 return -ENOMEM;
1526
1527         mutex_lock(&i915->drm.struct_mutex);
1528         err = i915_subtests(tests, i915);
1529         mutex_unlock(&i915->drm.struct_mutex);
1530
1531         drm_dev_unref(&i915->drm);
1532         return err;
1533 }
1534
1535 int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
1536 {
1537         static const struct i915_subtest tests[] = {
1538                 SUBTEST(igt_ppgtt_alloc),
1539                 SUBTEST(igt_ppgtt_lowlevel),
1540                 SUBTEST(igt_ppgtt_drunk),
1541                 SUBTEST(igt_ppgtt_walk),
1542                 SUBTEST(igt_ppgtt_pot),
1543                 SUBTEST(igt_ppgtt_fill),
1544                 SUBTEST(igt_ppgtt_shrink),
1545                 SUBTEST(igt_ggtt_lowlevel),
1546                 SUBTEST(igt_ggtt_drunk),
1547                 SUBTEST(igt_ggtt_walk),
1548                 SUBTEST(igt_ggtt_pot),
1549                 SUBTEST(igt_ggtt_fill),
1550                 SUBTEST(igt_ggtt_page),
1551         };
1552
1553         GEM_BUG_ON(offset_in_page(i915->ggtt.base.total));
1554
1555         return i915_subtests(tests, i915);
1556 }