2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/list_sort.h>
26 #include <linux/prime_numbers.h>
28 #include "../i915_selftest.h"
29 #include "i915_random.h"
31 #include "mock_context.h"
33 #include "mock_gem_device.h"
35 static void fake_free_pages(struct drm_i915_gem_object *obj,
36 struct sg_table *pages)
42 static struct sg_table *
43 fake_get_pages(struct drm_i915_gem_object *obj)
45 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
46 #define PFN_BIAS 0x1000
47 struct sg_table *pages;
48 struct scatterlist *sg;
49 typeof(obj->base.size) rem;
51 pages = kmalloc(sizeof(*pages), GFP);
53 return ERR_PTR(-ENOMEM);
55 rem = round_up(obj->base.size, BIT(31)) >> 31;
56 if (sg_alloc_table(pages, rem, GFP)) {
58 return ERR_PTR(-ENOMEM);
62 for (sg = pages->sgl; sg; sg = sg_next(sg)) {
63 unsigned long len = min_t(typeof(rem), rem, BIT(31));
66 sg_set_page(sg, pfn_to_page(PFN_BIAS), len, 0);
67 sg_dma_address(sg) = page_to_phys(sg_page(sg));
74 obj->mm.madv = I915_MADV_DONTNEED;
79 static void fake_put_pages(struct drm_i915_gem_object *obj,
80 struct sg_table *pages)
82 fake_free_pages(obj, pages);
83 obj->mm.dirty = false;
84 obj->mm.madv = I915_MADV_WILLNEED;
87 static const struct drm_i915_gem_object_ops fake_ops = {
88 .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
89 .get_pages = fake_get_pages,
90 .put_pages = fake_put_pages,
93 static struct drm_i915_gem_object *
94 fake_dma_object(struct drm_i915_private *i915, u64 size)
96 struct drm_i915_gem_object *obj;
99 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
101 if (overflows_type(size, obj->base.size))
102 return ERR_PTR(-E2BIG);
104 obj = i915_gem_object_alloc(i915);
106 return ERR_PTR(-ENOMEM);
108 drm_gem_private_object_init(&i915->drm, &obj->base, size);
109 i915_gem_object_init(obj, &fake_ops);
111 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
112 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
113 obj->cache_level = I915_CACHE_NONE;
115 /* Preallocate the "backing storage" */
116 if (i915_gem_object_pin_pages(obj))
117 return ERR_PTR(-ENOMEM);
119 i915_gem_object_unpin_pages(obj);
123 static int igt_ppgtt_alloc(void *arg)
125 struct drm_i915_private *dev_priv = arg;
126 struct i915_hw_ppgtt *ppgtt;
130 /* Allocate a ppggt and try to fill the entire range */
132 if (!USES_PPGTT(dev_priv))
135 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
139 mutex_lock(&dev_priv->drm.struct_mutex);
140 err = __hw_ppgtt_init(ppgtt, dev_priv);
144 if (!ppgtt->base.allocate_va_range)
145 goto err_ppgtt_cleanup;
147 /* Check we can allocate the entire range */
149 size <= ppgtt->base.total;
151 err = ppgtt->base.allocate_va_range(&ppgtt->base, 0, size);
153 if (err == -ENOMEM) {
154 pr_info("[1] Ran out of memory for va_range [0 + %llx] [bit %d]\n",
156 err = 0; /* virtual space too large! */
158 goto err_ppgtt_cleanup;
161 ppgtt->base.clear_range(&ppgtt->base, 0, size);
164 /* Check we can incrementally allocate the entire range */
165 for (last = 0, size = 4096;
166 size <= ppgtt->base.total;
167 last = size, size <<= 2) {
168 err = ppgtt->base.allocate_va_range(&ppgtt->base,
171 if (err == -ENOMEM) {
172 pr_info("[2] Ran out of memory for va_range [%llx + %llx] [bit %d]\n",
173 last, size - last, ilog2(size));
174 err = 0; /* virtual space too large! */
176 goto err_ppgtt_cleanup;
181 ppgtt->base.cleanup(&ppgtt->base);
183 mutex_unlock(&dev_priv->drm.struct_mutex);
188 static int lowlevel_hole(struct drm_i915_private *i915,
189 struct i915_address_space *vm,
190 u64 hole_start, u64 hole_end,
191 unsigned long end_time)
193 I915_RND_STATE(seed_prng);
196 /* Keep creating larger objects until one cannot fit into the hole */
197 for (size = 12; (hole_end - hole_start) >> size; size++) {
198 I915_RND_SUBSTATE(prng, seed_prng);
199 struct drm_i915_gem_object *obj;
200 unsigned int *order, count, n;
203 hole_size = (hole_end - hole_start) >> size;
204 if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
205 hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
209 order = i915_random_order(count, &prng);
210 } while (!order && count);
214 GEM_BUG_ON(count * BIT_ULL(size) > vm->total);
215 GEM_BUG_ON(hole_start + count * BIT_ULL(size) > hole_end);
217 /* Ignore allocation failures (i.e. don't report them as
218 * a test failure) as we are purposefully allocating very
219 * large objects without checking that we have sufficient
220 * memory. We expect to hit -ENOMEM.
223 obj = fake_dma_object(i915, BIT_ULL(size));
229 GEM_BUG_ON(obj->base.size != BIT_ULL(size));
231 if (i915_gem_object_pin_pages(obj)) {
232 i915_gem_object_put(obj);
237 for (n = 0; n < count; n++) {
238 u64 addr = hole_start + order[n] * BIT_ULL(size);
240 GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
242 if (igt_timeout(end_time,
243 "%s timed out before %d/%d\n",
244 __func__, n, count)) {
245 hole_end = hole_start; /* quit */
249 if (vm->allocate_va_range &&
250 vm->allocate_va_range(vm, addr, BIT_ULL(size)))
253 vm->insert_entries(vm, obj->mm.pages, addr,
258 i915_random_reorder(order, count, &prng);
259 for (n = 0; n < count; n++) {
260 u64 addr = hole_start + order[n] * BIT_ULL(size);
262 GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
263 vm->clear_range(vm, addr, BIT_ULL(size));
266 i915_gem_object_unpin_pages(obj);
267 i915_gem_object_put(obj);
275 static void close_object_list(struct list_head *objects,
276 struct i915_address_space *vm)
278 struct drm_i915_gem_object *obj, *on;
281 list_for_each_entry_safe(obj, on, objects, st_link) {
282 struct i915_vma *vma;
284 vma = i915_vma_instance(obj, vm, NULL);
286 ignored = i915_vma_unbind(vma);
287 /* Only ppgtt vma may be closed before the object is freed */
288 if (!IS_ERR(vma) && !i915_vma_is_ggtt(vma))
291 list_del(&obj->st_link);
292 i915_gem_object_put(obj);
296 static int fill_hole(struct drm_i915_private *i915,
297 struct i915_address_space *vm,
298 u64 hole_start, u64 hole_end,
299 unsigned long end_time)
301 const u64 hole_size = hole_end - hole_start;
302 struct drm_i915_gem_object *obj;
303 const unsigned long max_pages =
304 min_t(u64, ULONG_MAX - 1, hole_size/2 >> PAGE_SHIFT);
305 const unsigned long max_step = max(int_sqrt(max_pages), 2UL);
306 unsigned long npages, prime, flags;
307 struct i915_vma *vma;
311 /* Try binding many VMA working inwards from either edge */
313 flags = PIN_OFFSET_FIXED | PIN_USER;
314 if (i915_is_ggtt(vm))
317 for_each_prime_number_from(prime, 2, max_step) {
318 for (npages = 1; npages <= max_pages; npages *= prime) {
319 const u64 full_size = npages << PAGE_SHIFT;
325 { "top-down", hole_end, -1, },
326 { "bottom-up", hole_start, 1, },
330 obj = fake_dma_object(i915, full_size);
334 list_add(&obj->st_link, &objects);
336 /* Align differing sized objects against the edges, and
337 * check we don't walk off into the void when binding
340 for (p = phases; p->name; p++) {
344 list_for_each_entry(obj, &objects, st_link) {
345 vma = i915_vma_instance(obj, vm, NULL);
350 if (offset < hole_start + obj->base.size)
352 offset -= obj->base.size;
355 err = i915_vma_pin(vma, 0, 0, offset | flags);
357 pr_err("%s(%s) pin (forward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
358 __func__, p->name, err, npages, prime, offset);
362 if (!drm_mm_node_allocated(&vma->node) ||
363 i915_vma_misplaced(vma, 0, 0, offset | flags)) {
364 pr_err("%s(%s) (forward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
365 __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
374 if (offset + obj->base.size > hole_end)
376 offset += obj->base.size;
381 list_for_each_entry(obj, &objects, st_link) {
382 vma = i915_vma_instance(obj, vm, NULL);
387 if (offset < hole_start + obj->base.size)
389 offset -= obj->base.size;
392 if (!drm_mm_node_allocated(&vma->node) ||
393 i915_vma_misplaced(vma, 0, 0, offset | flags)) {
394 pr_err("%s(%s) (forward) moved vma.node=%llx + %llx, expected offset %llx\n",
395 __func__, p->name, vma->node.start, vma->node.size,
401 err = i915_vma_unbind(vma);
403 pr_err("%s(%s) (forward) unbind of vma.node=%llx + %llx failed with err=%d\n",
404 __func__, p->name, vma->node.start, vma->node.size,
410 if (offset + obj->base.size > hole_end)
412 offset += obj->base.size;
417 list_for_each_entry_reverse(obj, &objects, st_link) {
418 vma = i915_vma_instance(obj, vm, NULL);
423 if (offset < hole_start + obj->base.size)
425 offset -= obj->base.size;
428 err = i915_vma_pin(vma, 0, 0, offset | flags);
430 pr_err("%s(%s) pin (backward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
431 __func__, p->name, err, npages, prime, offset);
435 if (!drm_mm_node_allocated(&vma->node) ||
436 i915_vma_misplaced(vma, 0, 0, offset | flags)) {
437 pr_err("%s(%s) (backward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
438 __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
447 if (offset + obj->base.size > hole_end)
449 offset += obj->base.size;
454 list_for_each_entry_reverse(obj, &objects, st_link) {
455 vma = i915_vma_instance(obj, vm, NULL);
460 if (offset < hole_start + obj->base.size)
462 offset -= obj->base.size;
465 if (!drm_mm_node_allocated(&vma->node) ||
466 i915_vma_misplaced(vma, 0, 0, offset | flags)) {
467 pr_err("%s(%s) (backward) moved vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
468 __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
474 err = i915_vma_unbind(vma);
476 pr_err("%s(%s) (backward) unbind of vma.node=%llx + %llx failed with err=%d\n",
477 __func__, p->name, vma->node.start, vma->node.size,
483 if (offset + obj->base.size > hole_end)
485 offset += obj->base.size;
490 if (igt_timeout(end_time, "%s timed out (npages=%lu, prime=%lu)\n",
491 __func__, npages, prime)) {
497 close_object_list(&objects, vm);
503 close_object_list(&objects, vm);
507 static int walk_hole(struct drm_i915_private *i915,
508 struct i915_address_space *vm,
509 u64 hole_start, u64 hole_end,
510 unsigned long end_time)
512 const u64 hole_size = hole_end - hole_start;
513 const unsigned long max_pages =
514 min_t(u64, ULONG_MAX - 1, hole_size >> PAGE_SHIFT);
518 /* Try binding a single VMA in different positions within the hole */
520 flags = PIN_OFFSET_FIXED | PIN_USER;
521 if (i915_is_ggtt(vm))
524 for_each_prime_number_from(size, 1, max_pages) {
525 struct drm_i915_gem_object *obj;
526 struct i915_vma *vma;
530 obj = fake_dma_object(i915, size << PAGE_SHIFT);
534 vma = i915_vma_instance(obj, vm, NULL);
540 for (addr = hole_start;
541 addr + obj->base.size < hole_end;
542 addr += obj->base.size) {
543 err = i915_vma_pin(vma, 0, 0, addr | flags);
545 pr_err("%s bind failed at %llx + %llx [hole %llx- %llx] with err=%d\n",
546 __func__, addr, vma->size,
547 hole_start, hole_end, err);
552 if (!drm_mm_node_allocated(&vma->node) ||
553 i915_vma_misplaced(vma, 0, 0, addr | flags)) {
554 pr_err("%s incorrect at %llx + %llx\n",
555 __func__, addr, vma->size);
560 err = i915_vma_unbind(vma);
562 pr_err("%s unbind failed at %llx + %llx with err=%d\n",
563 __func__, addr, vma->size, err);
567 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
569 if (igt_timeout(end_time,
570 "%s timed out at %llx\n",
578 if (!i915_vma_is_ggtt(vma))
580 i915_gem_object_put(obj);
588 static int pot_hole(struct drm_i915_private *i915,
589 struct i915_address_space *vm,
590 u64 hole_start, u64 hole_end,
591 unsigned long end_time)
593 struct drm_i915_gem_object *obj;
594 struct i915_vma *vma;
599 flags = PIN_OFFSET_FIXED | PIN_USER;
600 if (i915_is_ggtt(vm))
603 obj = i915_gem_object_create_internal(i915, 2 * I915_GTT_PAGE_SIZE);
607 vma = i915_vma_instance(obj, vm, NULL);
613 /* Insert a pair of pages across every pot boundary within the hole */
614 for (pot = fls64(hole_end - 1) - 1;
615 pot > ilog2(2 * I915_GTT_PAGE_SIZE);
617 u64 step = BIT_ULL(pot);
620 for (addr = round_up(hole_start + I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
621 addr <= round_down(hole_end - 2*I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
623 err = i915_vma_pin(vma, 0, 0, addr | flags);
625 pr_err("%s failed to pin object at %llx in hole [%llx - %llx], with err=%d\n",
628 hole_start, hole_end,
633 if (!drm_mm_node_allocated(&vma->node) ||
634 i915_vma_misplaced(vma, 0, 0, addr | flags)) {
635 pr_err("%s incorrect at %llx + %llx\n",
636 __func__, addr, vma->size);
638 err = i915_vma_unbind(vma);
644 err = i915_vma_unbind(vma);
648 if (igt_timeout(end_time,
649 "%s timed out after %d/%d\n",
650 __func__, pot, fls64(hole_end - 1) - 1)) {
657 if (!i915_vma_is_ggtt(vma))
660 i915_gem_object_put(obj);
664 static int drunk_hole(struct drm_i915_private *i915,
665 struct i915_address_space *vm,
666 u64 hole_start, u64 hole_end,
667 unsigned long end_time)
669 I915_RND_STATE(prng);
673 flags = PIN_OFFSET_FIXED | PIN_USER;
674 if (i915_is_ggtt(vm))
677 /* Keep creating larger objects until one cannot fit into the hole */
678 for (size = 12; (hole_end - hole_start) >> size; size++) {
679 struct drm_i915_gem_object *obj;
680 unsigned int *order, count, n;
681 struct i915_vma *vma;
685 hole_size = (hole_end - hole_start) >> size;
686 if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
687 hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
691 order = i915_random_order(count, &prng);
692 } while (!order && count);
696 /* Ignore allocation failures (i.e. don't report them as
697 * a test failure) as we are purposefully allocating very
698 * large objects without checking that we have sufficient
699 * memory. We expect to hit -ENOMEM.
702 obj = fake_dma_object(i915, BIT_ULL(size));
708 vma = i915_vma_instance(obj, vm, NULL);
714 GEM_BUG_ON(vma->size != BIT_ULL(size));
716 for (n = 0; n < count; n++) {
717 u64 addr = hole_start + order[n] * BIT_ULL(size);
719 err = i915_vma_pin(vma, 0, 0, addr | flags);
721 pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
724 hole_start, hole_end,
729 if (!drm_mm_node_allocated(&vma->node) ||
730 i915_vma_misplaced(vma, 0, 0, addr | flags)) {
731 pr_err("%s incorrect at %llx + %llx\n",
732 __func__, addr, BIT_ULL(size));
734 err = i915_vma_unbind(vma);
740 err = i915_vma_unbind(vma);
743 if (igt_timeout(end_time,
744 "%s timed out after %d/%d\n",
745 __func__, n, count)) {
752 if (!i915_vma_is_ggtt(vma))
755 i915_gem_object_put(obj);
764 static int __shrink_hole(struct drm_i915_private *i915,
765 struct i915_address_space *vm,
766 u64 hole_start, u64 hole_end,
767 unsigned long end_time)
769 struct drm_i915_gem_object *obj;
770 unsigned long flags = PIN_OFFSET_FIXED | PIN_USER;
771 unsigned int order = 12;
776 /* Keep creating larger objects until one cannot fit into the hole */
777 for (addr = hole_start; addr < hole_end; ) {
778 struct i915_vma *vma;
779 u64 size = BIT_ULL(order++);
781 size = min(size, hole_end - addr);
782 obj = fake_dma_object(i915, size);
788 list_add(&obj->st_link, &objects);
790 vma = i915_vma_instance(obj, vm, NULL);
796 GEM_BUG_ON(vma->size != size);
798 err = i915_vma_pin(vma, 0, 0, addr | flags);
800 pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
801 __func__, addr, size, hole_start, hole_end, err);
805 if (!drm_mm_node_allocated(&vma->node) ||
806 i915_vma_misplaced(vma, 0, 0, addr | flags)) {
807 pr_err("%s incorrect at %llx + %llx\n",
808 __func__, addr, size);
810 err = i915_vma_unbind(vma);
818 if (igt_timeout(end_time,
819 "%s timed out at ofset %llx [%llx - %llx]\n",
820 __func__, addr, hole_start, hole_end)) {
826 close_object_list(&objects, vm);
830 static int shrink_hole(struct drm_i915_private *i915,
831 struct i915_address_space *vm,
832 u64 hole_start, u64 hole_end,
833 unsigned long end_time)
838 vm->fault_attr.probability = 999;
839 atomic_set(&vm->fault_attr.times, -1);
841 for_each_prime_number_from(prime, 0, ULONG_MAX - 1) {
842 vm->fault_attr.interval = prime;
843 err = __shrink_hole(i915, vm, hole_start, hole_end, end_time);
848 memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
853 static int exercise_ppgtt(struct drm_i915_private *dev_priv,
854 int (*func)(struct drm_i915_private *i915,
855 struct i915_address_space *vm,
856 u64 hole_start, u64 hole_end,
857 unsigned long end_time))
859 struct drm_file *file;
860 struct i915_hw_ppgtt *ppgtt;
861 IGT_TIMEOUT(end_time);
864 if (!USES_FULL_PPGTT(dev_priv))
867 file = mock_file(dev_priv);
869 return PTR_ERR(file);
871 mutex_lock(&dev_priv->drm.struct_mutex);
872 ppgtt = i915_ppgtt_create(dev_priv, file->driver_priv, "mock");
874 err = PTR_ERR(ppgtt);
877 GEM_BUG_ON(offset_in_page(ppgtt->base.total));
878 GEM_BUG_ON(ppgtt->base.closed);
880 err = func(dev_priv, &ppgtt->base, 0, ppgtt->base.total, end_time);
882 i915_ppgtt_close(&ppgtt->base);
883 i915_ppgtt_put(ppgtt);
885 mutex_unlock(&dev_priv->drm.struct_mutex);
887 mock_file_free(dev_priv, file);
891 static int igt_ppgtt_fill(void *arg)
893 return exercise_ppgtt(arg, fill_hole);
896 static int igt_ppgtt_walk(void *arg)
898 return exercise_ppgtt(arg, walk_hole);
901 static int igt_ppgtt_pot(void *arg)
903 return exercise_ppgtt(arg, pot_hole);
906 static int igt_ppgtt_drunk(void *arg)
908 return exercise_ppgtt(arg, drunk_hole);
911 static int igt_ppgtt_lowlevel(void *arg)
913 return exercise_ppgtt(arg, lowlevel_hole);
916 static int igt_ppgtt_shrink(void *arg)
918 return exercise_ppgtt(arg, shrink_hole);
921 static int sort_holes(void *priv, struct list_head *A, struct list_head *B)
923 struct drm_mm_node *a = list_entry(A, typeof(*a), hole_stack);
924 struct drm_mm_node *b = list_entry(B, typeof(*b), hole_stack);
926 if (a->start < b->start)
932 static int exercise_ggtt(struct drm_i915_private *i915,
933 int (*func)(struct drm_i915_private *i915,
934 struct i915_address_space *vm,
935 u64 hole_start, u64 hole_end,
936 unsigned long end_time))
938 struct i915_ggtt *ggtt = &i915->ggtt;
939 u64 hole_start, hole_end, last = 0;
940 struct drm_mm_node *node;
941 IGT_TIMEOUT(end_time);
944 mutex_lock(&i915->drm.struct_mutex);
946 list_sort(NULL, &ggtt->base.mm.hole_stack, sort_holes);
947 drm_mm_for_each_hole(node, &ggtt->base.mm, hole_start, hole_end) {
948 if (hole_start < last)
951 if (ggtt->base.mm.color_adjust)
952 ggtt->base.mm.color_adjust(node, 0,
953 &hole_start, &hole_end);
954 if (hole_start >= hole_end)
957 err = func(i915, &ggtt->base, hole_start, hole_end, end_time);
961 /* As we have manipulated the drm_mm, the list may be corrupt */
965 mutex_unlock(&i915->drm.struct_mutex);
970 static int igt_ggtt_fill(void *arg)
972 return exercise_ggtt(arg, fill_hole);
975 static int igt_ggtt_walk(void *arg)
977 return exercise_ggtt(arg, walk_hole);
980 static int igt_ggtt_pot(void *arg)
982 return exercise_ggtt(arg, pot_hole);
985 static int igt_ggtt_drunk(void *arg)
987 return exercise_ggtt(arg, drunk_hole);
990 static int igt_ggtt_lowlevel(void *arg)
992 return exercise_ggtt(arg, lowlevel_hole);
995 static int igt_ggtt_page(void *arg)
997 const unsigned int count = PAGE_SIZE/sizeof(u32);
998 I915_RND_STATE(prng);
999 struct drm_i915_private *i915 = arg;
1000 struct i915_ggtt *ggtt = &i915->ggtt;
1001 struct drm_i915_gem_object *obj;
1002 struct drm_mm_node tmp;
1003 unsigned int *order, n;
1006 mutex_lock(&i915->drm.struct_mutex);
1008 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1014 err = i915_gem_object_pin_pages(obj);
1018 memset(&tmp, 0, sizeof(tmp));
1019 err = drm_mm_insert_node_in_range(&ggtt->base.mm, &tmp,
1020 1024 * PAGE_SIZE, 0,
1021 I915_COLOR_UNEVICTABLE,
1022 0, ggtt->mappable_end,
1027 order = i915_random_order(count, &prng);
1033 for (n = 0; n < count; n++) {
1034 u64 offset = tmp.start + order[n] * PAGE_SIZE;
1037 ggtt->base.insert_page(&ggtt->base,
1038 i915_gem_object_get_dma_address(obj, 0),
1039 offset, I915_CACHE_NONE, 0);
1041 vaddr = io_mapping_map_atomic_wc(&ggtt->mappable, offset);
1042 iowrite32(n, vaddr + n);
1043 io_mapping_unmap_atomic(vaddr);
1046 ggtt->base.clear_range(&ggtt->base, offset, PAGE_SIZE);
1049 i915_random_reorder(order, count, &prng);
1050 for (n = 0; n < count; n++) {
1051 u64 offset = tmp.start + order[n] * PAGE_SIZE;
1055 ggtt->base.insert_page(&ggtt->base,
1056 i915_gem_object_get_dma_address(obj, 0),
1057 offset, I915_CACHE_NONE, 0);
1059 vaddr = io_mapping_map_atomic_wc(&ggtt->mappable, offset);
1060 val = ioread32(vaddr + n);
1061 io_mapping_unmap_atomic(vaddr);
1063 ggtt->base.clear_range(&ggtt->base, offset, PAGE_SIZE);
1066 pr_err("insert page failed: found %d, expected %d\n",
1075 drm_mm_remove_node(&tmp);
1077 i915_gem_object_unpin_pages(obj);
1079 i915_gem_object_put(obj);
1081 mutex_unlock(&i915->drm.struct_mutex);
1085 static void track_vma_bind(struct i915_vma *vma)
1087 struct drm_i915_gem_object *obj = vma->obj;
1089 obj->bind_count++; /* track for eviction later */
1090 __i915_gem_object_pin_pages(obj);
1092 vma->pages = obj->mm.pages;
1093 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
1096 static int exercise_mock(struct drm_i915_private *i915,
1097 int (*func)(struct drm_i915_private *i915,
1098 struct i915_address_space *vm,
1099 u64 hole_start, u64 hole_end,
1100 unsigned long end_time))
1102 struct i915_gem_context *ctx;
1103 struct i915_hw_ppgtt *ppgtt;
1104 IGT_TIMEOUT(end_time);
1107 ctx = mock_context(i915, "mock");
1114 err = func(i915, &ppgtt->base, 0, ppgtt->base.total, end_time);
1116 mock_context_close(ctx);
1120 static int igt_mock_fill(void *arg)
1122 return exercise_mock(arg, fill_hole);
1125 static int igt_mock_walk(void *arg)
1127 return exercise_mock(arg, walk_hole);
1130 static int igt_mock_pot(void *arg)
1132 return exercise_mock(arg, pot_hole);
1135 static int igt_mock_drunk(void *arg)
1137 return exercise_mock(arg, drunk_hole);
1140 static int igt_gtt_reserve(void *arg)
1142 struct drm_i915_private *i915 = arg;
1143 struct drm_i915_gem_object *obj, *on;
1148 /* i915_gem_gtt_reserve() tries to reserve the precise range
1149 * for the node, and evicts if it has to. So our test checks that
1150 * it can give us the requsted space and prevent overlaps.
1153 /* Start by filling the GGTT */
1155 total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
1156 total += 2*I915_GTT_PAGE_SIZE) {
1157 struct i915_vma *vma;
1159 obj = i915_gem_object_create_internal(i915, 2*PAGE_SIZE);
1165 err = i915_gem_object_pin_pages(obj);
1167 i915_gem_object_put(obj);
1171 list_add(&obj->st_link, &objects);
1173 vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
1179 err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
1185 pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
1186 total, i915->ggtt.base.total, err);
1189 track_vma_bind(vma);
1191 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1192 if (vma->node.start != total ||
1193 vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1194 pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
1195 vma->node.start, vma->node.size,
1196 total, 2*I915_GTT_PAGE_SIZE);
1202 /* Now we start forcing evictions */
1203 for (total = I915_GTT_PAGE_SIZE;
1204 total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
1205 total += 2*I915_GTT_PAGE_SIZE) {
1206 struct i915_vma *vma;
1208 obj = i915_gem_object_create_internal(i915, 2*PAGE_SIZE);
1214 err = i915_gem_object_pin_pages(obj);
1216 i915_gem_object_put(obj);
1220 list_add(&obj->st_link, &objects);
1222 vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
1228 err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
1234 pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
1235 total, i915->ggtt.base.total, err);
1238 track_vma_bind(vma);
1240 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1241 if (vma->node.start != total ||
1242 vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1243 pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
1244 vma->node.start, vma->node.size,
1245 total, 2*I915_GTT_PAGE_SIZE);
1251 /* And then try at random */
1252 list_for_each_entry_safe(obj, on, &objects, st_link) {
1253 struct i915_vma *vma;
1256 vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
1262 err = i915_vma_unbind(vma);
1264 pr_err("i915_vma_unbind failed with err=%d!\n", err);
1268 offset = random_offset(0, i915->ggtt.base.total,
1269 2*I915_GTT_PAGE_SIZE,
1270 I915_GTT_MIN_ALIGNMENT);
1272 err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
1278 pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
1279 total, i915->ggtt.base.total, err);
1282 track_vma_bind(vma);
1284 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1285 if (vma->node.start != offset ||
1286 vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1287 pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
1288 vma->node.start, vma->node.size,
1289 offset, 2*I915_GTT_PAGE_SIZE);
1296 list_for_each_entry_safe(obj, on, &objects, st_link) {
1297 i915_gem_object_unpin_pages(obj);
1298 i915_gem_object_put(obj);
1303 static int igt_gtt_insert(void *arg)
1305 struct drm_i915_private *i915 = arg;
1306 struct drm_i915_gem_object *obj, *on;
1307 struct drm_mm_node tmp = {};
1308 const struct invalid_insert {
1312 } invalid_insert[] = {
1314 i915->ggtt.base.total + I915_GTT_PAGE_SIZE, 0,
1315 0, i915->ggtt.base.total,
1318 2*I915_GTT_PAGE_SIZE, 0,
1319 0, I915_GTT_PAGE_SIZE,
1322 -(u64)I915_GTT_PAGE_SIZE, 0,
1323 0, 4*I915_GTT_PAGE_SIZE,
1326 -(u64)2*I915_GTT_PAGE_SIZE, 2*I915_GTT_PAGE_SIZE,
1327 0, 4*I915_GTT_PAGE_SIZE,
1330 I915_GTT_PAGE_SIZE, I915_GTT_MIN_ALIGNMENT << 1,
1331 I915_GTT_MIN_ALIGNMENT, I915_GTT_MIN_ALIGNMENT << 1,
1339 /* i915_gem_gtt_insert() tries to allocate some free space in the GTT
1340 * to the node, evicting if required.
1343 /* Check a couple of obviously invalid requests */
1344 for (ii = invalid_insert; ii->size; ii++) {
1345 err = i915_gem_gtt_insert(&i915->ggtt.base, &tmp,
1346 ii->size, ii->alignment,
1347 I915_COLOR_UNEVICTABLE,
1350 if (err != -ENOSPC) {
1351 pr_err("Invalid i915_gem_gtt_insert(.size=%llx, .alignment=%llx, .start=%llx, .end=%llx) succeeded (err=%d)\n",
1352 ii->size, ii->alignment, ii->start, ii->end,
1358 /* Start by filling the GGTT */
1360 total + I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
1361 total += I915_GTT_PAGE_SIZE) {
1362 struct i915_vma *vma;
1364 obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
1370 err = i915_gem_object_pin_pages(obj);
1372 i915_gem_object_put(obj);
1376 list_add(&obj->st_link, &objects);
1378 vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
1384 err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
1385 obj->base.size, 0, obj->cache_level,
1386 0, i915->ggtt.base.total,
1388 if (err == -ENOSPC) {
1389 /* maxed out the GGTT space */
1390 i915_gem_object_put(obj);
1394 pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
1395 total, i915->ggtt.base.total, err);
1398 track_vma_bind(vma);
1399 __i915_vma_pin(vma);
1401 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1404 list_for_each_entry(obj, &objects, st_link) {
1405 struct i915_vma *vma;
1407 vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
1413 if (!drm_mm_node_allocated(&vma->node)) {
1414 pr_err("VMA was unexpectedly evicted!\n");
1419 __i915_vma_unpin(vma);
1422 /* If we then reinsert, we should find the same hole */
1423 list_for_each_entry_safe(obj, on, &objects, st_link) {
1424 struct i915_vma *vma;
1427 vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
1433 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1434 offset = vma->node.start;
1436 err = i915_vma_unbind(vma);
1438 pr_err("i915_vma_unbind failed with err=%d!\n", err);
1442 err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
1443 obj->base.size, 0, obj->cache_level,
1444 0, i915->ggtt.base.total,
1447 pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
1448 total, i915->ggtt.base.total, err);
1451 track_vma_bind(vma);
1453 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1454 if (vma->node.start != offset) {
1455 pr_err("i915_gem_gtt_insert did not return node to its previous location (the only hole), expected address %llx, found %llx\n",
1456 offset, vma->node.start);
1462 /* And then force evictions */
1464 total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
1465 total += 2*I915_GTT_PAGE_SIZE) {
1466 struct i915_vma *vma;
1468 obj = i915_gem_object_create_internal(i915, 2*I915_GTT_PAGE_SIZE);
1474 err = i915_gem_object_pin_pages(obj);
1476 i915_gem_object_put(obj);
1480 list_add(&obj->st_link, &objects);
1482 vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
1488 err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
1489 obj->base.size, 0, obj->cache_level,
1490 0, i915->ggtt.base.total,
1493 pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
1494 total, i915->ggtt.base.total, err);
1497 track_vma_bind(vma);
1499 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1503 list_for_each_entry_safe(obj, on, &objects, st_link) {
1504 i915_gem_object_unpin_pages(obj);
1505 i915_gem_object_put(obj);
1510 int i915_gem_gtt_mock_selftests(void)
1512 static const struct i915_subtest tests[] = {
1513 SUBTEST(igt_mock_drunk),
1514 SUBTEST(igt_mock_walk),
1515 SUBTEST(igt_mock_pot),
1516 SUBTEST(igt_mock_fill),
1517 SUBTEST(igt_gtt_reserve),
1518 SUBTEST(igt_gtt_insert),
1520 struct drm_i915_private *i915;
1523 i915 = mock_gem_device();
1527 mutex_lock(&i915->drm.struct_mutex);
1528 err = i915_subtests(tests, i915);
1529 mutex_unlock(&i915->drm.struct_mutex);
1531 drm_dev_unref(&i915->drm);
1535 int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
1537 static const struct i915_subtest tests[] = {
1538 SUBTEST(igt_ppgtt_alloc),
1539 SUBTEST(igt_ppgtt_lowlevel),
1540 SUBTEST(igt_ppgtt_drunk),
1541 SUBTEST(igt_ppgtt_walk),
1542 SUBTEST(igt_ppgtt_pot),
1543 SUBTEST(igt_ppgtt_fill),
1544 SUBTEST(igt_ppgtt_shrink),
1545 SUBTEST(igt_ggtt_lowlevel),
1546 SUBTEST(igt_ggtt_drunk),
1547 SUBTEST(igt_ggtt_walk),
1548 SUBTEST(igt_ggtt_pot),
1549 SUBTEST(igt_ggtt_fill),
1550 SUBTEST(igt_ggtt_page),
1553 GEM_BUG_ON(offset_in_page(i915->ggtt.base.total));
1555 return i915_subtests(tests, i915);