2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
29 #include <drm/drm_vma_manager.h>
30 #include <drm/i915_drm.h>
32 #include "i915_trace.h"
33 #include "intel_drv.h"
34 #include <linux/oom.h>
35 #include <linux/shmem_fs.h>
36 #include <linux/slab.h>
37 #include <linux/swap.h>
38 #include <linux/pci.h>
39 #include <linux/dma-buf.h>
41 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
42 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
44 static __must_check int
45 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
48 i915_gem_object_retire(struct drm_i915_gem_object *obj);
50 static void i915_gem_write_fence(struct drm_device *dev, int reg,
51 struct drm_i915_gem_object *obj);
52 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
53 struct drm_i915_fence_reg *fence,
56 static unsigned long i915_gem_shrinker_count(struct shrinker *shrinker,
57 struct shrink_control *sc);
58 static unsigned long i915_gem_shrinker_scan(struct shrinker *shrinker,
59 struct shrink_control *sc);
60 static int i915_gem_shrinker_oom(struct notifier_block *nb,
63 static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
64 static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
66 static bool cpu_cache_is_coherent(struct drm_device *dev,
67 enum i915_cache_level level)
69 return HAS_LLC(dev) || level != I915_CACHE_NONE;
72 static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
74 if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
77 return obj->pin_display;
80 static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
83 i915_gem_release_mmap(obj);
85 /* As we do not have an associated fence register, we will force
86 * a tiling change if we ever need to acquire one.
88 obj->fence_dirty = false;
89 obj->fence_reg = I915_FENCE_REG_NONE;
92 /* some bookkeeping */
93 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
96 spin_lock(&dev_priv->mm.object_stat_lock);
97 dev_priv->mm.object_count++;
98 dev_priv->mm.object_memory += size;
99 spin_unlock(&dev_priv->mm.object_stat_lock);
102 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
105 spin_lock(&dev_priv->mm.object_stat_lock);
106 dev_priv->mm.object_count--;
107 dev_priv->mm.object_memory -= size;
108 spin_unlock(&dev_priv->mm.object_stat_lock);
112 i915_gem_wait_for_error(struct i915_gpu_error *error)
116 #define EXIT_COND (!i915_reset_in_progress(error) || \
117 i915_terminally_wedged(error))
122 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
123 * userspace. If it takes that long something really bad is going on and
124 * we should simply try to bail out and fail as gracefully as possible.
126 ret = wait_event_interruptible_timeout(error->reset_queue,
130 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
132 } else if (ret < 0) {
140 int i915_mutex_lock_interruptible(struct drm_device *dev)
142 struct drm_i915_private *dev_priv = dev->dev_private;
145 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
149 ret = mutex_lock_interruptible(&dev->struct_mutex);
153 WARN_ON(i915_verify_lists(dev));
158 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
160 return i915_gem_obj_bound_any(obj) && !obj->active;
164 i915_gem_init_ioctl(struct drm_device *dev, void *data,
165 struct drm_file *file)
167 struct drm_i915_private *dev_priv = dev->dev_private;
168 struct drm_i915_gem_init *args = data;
170 if (drm_core_check_feature(dev, DRIVER_MODESET))
173 if (args->gtt_start >= args->gtt_end ||
174 (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
177 /* GEM with user mode setting was never supported on ilk and later. */
178 if (INTEL_INFO(dev)->gen >= 5)
181 mutex_lock(&dev->struct_mutex);
182 i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
184 dev_priv->gtt.mappable_end = args->gtt_end;
185 mutex_unlock(&dev->struct_mutex);
191 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
192 struct drm_file *file)
194 struct drm_i915_private *dev_priv = dev->dev_private;
195 struct drm_i915_gem_get_aperture *args = data;
196 struct drm_i915_gem_object *obj;
200 mutex_lock(&dev->struct_mutex);
201 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
202 if (i915_gem_obj_is_pinned(obj))
203 pinned += i915_gem_obj_ggtt_size(obj);
204 mutex_unlock(&dev->struct_mutex);
206 args->aper_size = dev_priv->gtt.base.total;
207 args->aper_available_size = args->aper_size - pinned;
212 static void i915_gem_object_detach_phys(struct drm_i915_gem_object *obj)
214 drm_dma_handle_t *phys = obj->phys_handle;
219 if (obj->madv == I915_MADV_WILLNEED) {
220 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
221 char *vaddr = phys->vaddr;
224 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
225 struct page *page = shmem_read_mapping_page(mapping, i);
227 char *dst = kmap_atomic(page);
228 memcpy(dst, vaddr, PAGE_SIZE);
229 drm_clflush_virt_range(dst, PAGE_SIZE);
232 set_page_dirty(page);
233 mark_page_accessed(page);
234 page_cache_release(page);
238 i915_gem_chipset_flush(obj->base.dev);
242 set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE);
244 drm_pci_free(obj->base.dev, phys);
245 obj->phys_handle = NULL;
249 i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
252 drm_dma_handle_t *phys;
253 struct address_space *mapping;
257 if (obj->phys_handle) {
258 if ((unsigned long)obj->phys_handle->vaddr & (align -1))
264 if (obj->madv != I915_MADV_WILLNEED)
267 if (obj->base.filp == NULL)
270 /* create a new object */
271 phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
277 set_memory_wc((unsigned long)vaddr, phys->size / PAGE_SIZE);
279 mapping = file_inode(obj->base.filp)->i_mapping;
280 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
284 page = shmem_read_mapping_page(mapping, i);
287 set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE);
289 drm_pci_free(obj->base.dev, phys);
290 return PTR_ERR(page);
293 src = kmap_atomic(page);
294 memcpy(vaddr, src, PAGE_SIZE);
297 mark_page_accessed(page);
298 page_cache_release(page);
303 obj->phys_handle = phys;
308 i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
309 struct drm_i915_gem_pwrite *args,
310 struct drm_file *file_priv)
312 struct drm_device *dev = obj->base.dev;
313 void *vaddr = obj->phys_handle->vaddr + args->offset;
314 char __user *user_data = to_user_ptr(args->data_ptr);
316 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
317 unsigned long unwritten;
319 /* The physical object once assigned is fixed for the lifetime
320 * of the obj, so we can safely drop the lock and continue
323 mutex_unlock(&dev->struct_mutex);
324 unwritten = copy_from_user(vaddr, user_data, args->size);
325 mutex_lock(&dev->struct_mutex);
330 i915_gem_chipset_flush(dev);
334 void *i915_gem_object_alloc(struct drm_device *dev)
336 struct drm_i915_private *dev_priv = dev->dev_private;
337 return kmem_cache_zalloc(dev_priv->slab, GFP_KERNEL);
340 void i915_gem_object_free(struct drm_i915_gem_object *obj)
342 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
343 kmem_cache_free(dev_priv->slab, obj);
347 i915_gem_create(struct drm_file *file,
348 struct drm_device *dev,
352 struct drm_i915_gem_object *obj;
356 size = roundup(size, PAGE_SIZE);
360 /* Allocate the new object */
361 obj = i915_gem_alloc_object(dev, size);
365 ret = drm_gem_handle_create(file, &obj->base, &handle);
366 /* drop reference from allocate - handle holds it now */
367 drm_gem_object_unreference_unlocked(&obj->base);
376 i915_gem_dumb_create(struct drm_file *file,
377 struct drm_device *dev,
378 struct drm_mode_create_dumb *args)
380 /* have to work out size/pitch and return them */
381 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
382 args->size = args->pitch * args->height;
383 return i915_gem_create(file, dev,
384 args->size, &args->handle);
388 * Creates a new mm object and returns a handle to it.
391 i915_gem_create_ioctl(struct drm_device *dev, void *data,
392 struct drm_file *file)
394 struct drm_i915_gem_create *args = data;
396 return i915_gem_create(file, dev,
397 args->size, &args->handle);
401 __copy_to_user_swizzled(char __user *cpu_vaddr,
402 const char *gpu_vaddr, int gpu_offset,
405 int ret, cpu_offset = 0;
408 int cacheline_end = ALIGN(gpu_offset + 1, 64);
409 int this_length = min(cacheline_end - gpu_offset, length);
410 int swizzled_gpu_offset = gpu_offset ^ 64;
412 ret = __copy_to_user(cpu_vaddr + cpu_offset,
413 gpu_vaddr + swizzled_gpu_offset,
418 cpu_offset += this_length;
419 gpu_offset += this_length;
420 length -= this_length;
427 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
428 const char __user *cpu_vaddr,
431 int ret, cpu_offset = 0;
434 int cacheline_end = ALIGN(gpu_offset + 1, 64);
435 int this_length = min(cacheline_end - gpu_offset, length);
436 int swizzled_gpu_offset = gpu_offset ^ 64;
438 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
439 cpu_vaddr + cpu_offset,
444 cpu_offset += this_length;
445 gpu_offset += this_length;
446 length -= this_length;
453 * Pins the specified object's pages and synchronizes the object with
454 * GPU accesses. Sets needs_clflush to non-zero if the caller should
455 * flush the object from the CPU cache.
457 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
467 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
468 /* If we're not in the cpu read domain, set ourself into the gtt
469 * read domain and manually flush cachelines (if required). This
470 * optimizes for the case when the gpu will dirty the data
471 * anyway again before the next pread happens. */
472 *needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
474 ret = i915_gem_object_wait_rendering(obj, true);
478 i915_gem_object_retire(obj);
481 ret = i915_gem_object_get_pages(obj);
485 i915_gem_object_pin_pages(obj);
490 /* Per-page copy function for the shmem pread fastpath.
491 * Flushes invalid cachelines before reading the target if
492 * needs_clflush is set. */
494 shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
495 char __user *user_data,
496 bool page_do_bit17_swizzling, bool needs_clflush)
501 if (unlikely(page_do_bit17_swizzling))
504 vaddr = kmap_atomic(page);
506 drm_clflush_virt_range(vaddr + shmem_page_offset,
508 ret = __copy_to_user_inatomic(user_data,
509 vaddr + shmem_page_offset,
511 kunmap_atomic(vaddr);
513 return ret ? -EFAULT : 0;
517 shmem_clflush_swizzled_range(char *addr, unsigned long length,
520 if (unlikely(swizzled)) {
521 unsigned long start = (unsigned long) addr;
522 unsigned long end = (unsigned long) addr + length;
524 /* For swizzling simply ensure that we always flush both
525 * channels. Lame, but simple and it works. Swizzled
526 * pwrite/pread is far from a hotpath - current userspace
527 * doesn't use it at all. */
528 start = round_down(start, 128);
529 end = round_up(end, 128);
531 drm_clflush_virt_range((void *)start, end - start);
533 drm_clflush_virt_range(addr, length);
538 /* Only difference to the fast-path function is that this can handle bit17
539 * and uses non-atomic copy and kmap functions. */
541 shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
542 char __user *user_data,
543 bool page_do_bit17_swizzling, bool needs_clflush)
550 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
552 page_do_bit17_swizzling);
554 if (page_do_bit17_swizzling)
555 ret = __copy_to_user_swizzled(user_data,
556 vaddr, shmem_page_offset,
559 ret = __copy_to_user(user_data,
560 vaddr + shmem_page_offset,
564 return ret ? - EFAULT : 0;
568 i915_gem_shmem_pread(struct drm_device *dev,
569 struct drm_i915_gem_object *obj,
570 struct drm_i915_gem_pread *args,
571 struct drm_file *file)
573 char __user *user_data;
576 int shmem_page_offset, page_length, ret = 0;
577 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
579 int needs_clflush = 0;
580 struct sg_page_iter sg_iter;
582 user_data = to_user_ptr(args->data_ptr);
585 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
587 ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
591 offset = args->offset;
593 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
594 offset >> PAGE_SHIFT) {
595 struct page *page = sg_page_iter_page(&sg_iter);
600 /* Operation in this page
602 * shmem_page_offset = offset within page in shmem file
603 * page_length = bytes to copy for this page
605 shmem_page_offset = offset_in_page(offset);
606 page_length = remain;
607 if ((shmem_page_offset + page_length) > PAGE_SIZE)
608 page_length = PAGE_SIZE - shmem_page_offset;
610 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
611 (page_to_phys(page) & (1 << 17)) != 0;
613 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
614 user_data, page_do_bit17_swizzling,
619 mutex_unlock(&dev->struct_mutex);
621 if (likely(!i915.prefault_disable) && !prefaulted) {
622 ret = fault_in_multipages_writeable(user_data, remain);
623 /* Userspace is tricking us, but we've already clobbered
624 * its pages with the prefault and promised to write the
625 * data up to the first fault. Hence ignore any errors
626 * and just continue. */
631 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
632 user_data, page_do_bit17_swizzling,
635 mutex_lock(&dev->struct_mutex);
641 remain -= page_length;
642 user_data += page_length;
643 offset += page_length;
647 i915_gem_object_unpin_pages(obj);
653 * Reads data from the object referenced by handle.
655 * On error, the contents of *data are undefined.
658 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
659 struct drm_file *file)
661 struct drm_i915_gem_pread *args = data;
662 struct drm_i915_gem_object *obj;
668 if (!access_ok(VERIFY_WRITE,
669 to_user_ptr(args->data_ptr),
673 ret = i915_mutex_lock_interruptible(dev);
677 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
678 if (&obj->base == NULL) {
683 /* Bounds check source. */
684 if (args->offset > obj->base.size ||
685 args->size > obj->base.size - args->offset) {
690 /* prime objects have no backing filp to GEM pread/pwrite
693 if (!obj->base.filp) {
698 trace_i915_gem_object_pread(obj, args->offset, args->size);
700 ret = i915_gem_shmem_pread(dev, obj, args, file);
703 drm_gem_object_unreference(&obj->base);
705 mutex_unlock(&dev->struct_mutex);
709 /* This is the fast write path which cannot handle
710 * page faults in the source data
714 fast_user_write(struct io_mapping *mapping,
715 loff_t page_base, int page_offset,
716 char __user *user_data,
719 void __iomem *vaddr_atomic;
721 unsigned long unwritten;
723 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
724 /* We can use the cpu mem copy function because this is X86. */
725 vaddr = (void __force*)vaddr_atomic + page_offset;
726 unwritten = __copy_from_user_inatomic_nocache(vaddr,
728 io_mapping_unmap_atomic(vaddr_atomic);
733 * This is the fast pwrite path, where we copy the data directly from the
734 * user into the GTT, uncached.
737 i915_gem_gtt_pwrite_fast(struct drm_device *dev,
738 struct drm_i915_gem_object *obj,
739 struct drm_i915_gem_pwrite *args,
740 struct drm_file *file)
742 struct drm_i915_private *dev_priv = dev->dev_private;
744 loff_t offset, page_base;
745 char __user *user_data;
746 int page_offset, page_length, ret;
748 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
752 ret = i915_gem_object_set_to_gtt_domain(obj, true);
756 ret = i915_gem_object_put_fence(obj);
760 user_data = to_user_ptr(args->data_ptr);
763 offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
766 /* Operation in this page
768 * page_base = page offset within aperture
769 * page_offset = offset within page
770 * page_length = bytes to copy for this page
772 page_base = offset & PAGE_MASK;
773 page_offset = offset_in_page(offset);
774 page_length = remain;
775 if ((page_offset + remain) > PAGE_SIZE)
776 page_length = PAGE_SIZE - page_offset;
778 /* If we get a fault while copying data, then (presumably) our
779 * source page isn't available. Return the error and we'll
780 * retry in the slow path.
782 if (fast_user_write(dev_priv->gtt.mappable, page_base,
783 page_offset, user_data, page_length)) {
788 remain -= page_length;
789 user_data += page_length;
790 offset += page_length;
794 i915_gem_object_ggtt_unpin(obj);
799 /* Per-page copy function for the shmem pwrite fastpath.
800 * Flushes invalid cachelines before writing to the target if
801 * needs_clflush_before is set and flushes out any written cachelines after
802 * writing if needs_clflush is set. */
804 shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
805 char __user *user_data,
806 bool page_do_bit17_swizzling,
807 bool needs_clflush_before,
808 bool needs_clflush_after)
813 if (unlikely(page_do_bit17_swizzling))
816 vaddr = kmap_atomic(page);
817 if (needs_clflush_before)
818 drm_clflush_virt_range(vaddr + shmem_page_offset,
820 ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
821 user_data, page_length);
822 if (needs_clflush_after)
823 drm_clflush_virt_range(vaddr + shmem_page_offset,
825 kunmap_atomic(vaddr);
827 return ret ? -EFAULT : 0;
830 /* Only difference to the fast-path function is that this can handle bit17
831 * and uses non-atomic copy and kmap functions. */
833 shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
834 char __user *user_data,
835 bool page_do_bit17_swizzling,
836 bool needs_clflush_before,
837 bool needs_clflush_after)
843 if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
844 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
846 page_do_bit17_swizzling);
847 if (page_do_bit17_swizzling)
848 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
852 ret = __copy_from_user(vaddr + shmem_page_offset,
855 if (needs_clflush_after)
856 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
858 page_do_bit17_swizzling);
861 return ret ? -EFAULT : 0;
865 i915_gem_shmem_pwrite(struct drm_device *dev,
866 struct drm_i915_gem_object *obj,
867 struct drm_i915_gem_pwrite *args,
868 struct drm_file *file)
872 char __user *user_data;
873 int shmem_page_offset, page_length, ret = 0;
874 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
875 int hit_slowpath = 0;
876 int needs_clflush_after = 0;
877 int needs_clflush_before = 0;
878 struct sg_page_iter sg_iter;
880 user_data = to_user_ptr(args->data_ptr);
883 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
885 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
886 /* If we're not in the cpu write domain, set ourself into the gtt
887 * write domain and manually flush cachelines (if required). This
888 * optimizes for the case when the gpu will use the data
889 * right away and we therefore have to clflush anyway. */
890 needs_clflush_after = cpu_write_needs_clflush(obj);
891 ret = i915_gem_object_wait_rendering(obj, false);
895 i915_gem_object_retire(obj);
897 /* Same trick applies to invalidate partially written cachelines read
899 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
900 needs_clflush_before =
901 !cpu_cache_is_coherent(dev, obj->cache_level);
903 ret = i915_gem_object_get_pages(obj);
907 i915_gem_object_pin_pages(obj);
909 offset = args->offset;
912 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
913 offset >> PAGE_SHIFT) {
914 struct page *page = sg_page_iter_page(&sg_iter);
915 int partial_cacheline_write;
920 /* Operation in this page
922 * shmem_page_offset = offset within page in shmem file
923 * page_length = bytes to copy for this page
925 shmem_page_offset = offset_in_page(offset);
927 page_length = remain;
928 if ((shmem_page_offset + page_length) > PAGE_SIZE)
929 page_length = PAGE_SIZE - shmem_page_offset;
931 /* If we don't overwrite a cacheline completely we need to be
932 * careful to have up-to-date data by first clflushing. Don't
933 * overcomplicate things and flush the entire patch. */
934 partial_cacheline_write = needs_clflush_before &&
935 ((shmem_page_offset | page_length)
936 & (boot_cpu_data.x86_clflush_size - 1));
938 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
939 (page_to_phys(page) & (1 << 17)) != 0;
941 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
942 user_data, page_do_bit17_swizzling,
943 partial_cacheline_write,
944 needs_clflush_after);
949 mutex_unlock(&dev->struct_mutex);
950 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
951 user_data, page_do_bit17_swizzling,
952 partial_cacheline_write,
953 needs_clflush_after);
955 mutex_lock(&dev->struct_mutex);
961 remain -= page_length;
962 user_data += page_length;
963 offset += page_length;
967 i915_gem_object_unpin_pages(obj);
971 * Fixup: Flush cpu caches in case we didn't flush the dirty
972 * cachelines in-line while writing and the object moved
973 * out of the cpu write domain while we've dropped the lock.
975 if (!needs_clflush_after &&
976 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
977 if (i915_gem_clflush_object(obj, obj->pin_display))
978 i915_gem_chipset_flush(dev);
982 if (needs_clflush_after)
983 i915_gem_chipset_flush(dev);
989 * Writes data to the object referenced by handle.
991 * On error, the contents of the buffer that were to be modified are undefined.
994 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
995 struct drm_file *file)
997 struct drm_i915_gem_pwrite *args = data;
998 struct drm_i915_gem_object *obj;
1001 if (args->size == 0)
1004 if (!access_ok(VERIFY_READ,
1005 to_user_ptr(args->data_ptr),
1009 if (likely(!i915.prefault_disable)) {
1010 ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
1016 ret = i915_mutex_lock_interruptible(dev);
1020 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1021 if (&obj->base == NULL) {
1026 /* Bounds check destination. */
1027 if (args->offset > obj->base.size ||
1028 args->size > obj->base.size - args->offset) {
1033 /* prime objects have no backing filp to GEM pread/pwrite
1036 if (!obj->base.filp) {
1041 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
1044 /* We can only do the GTT pwrite on untiled buffers, as otherwise
1045 * it would end up going through the fenced access, and we'll get
1046 * different detiling behavior between reading and writing.
1047 * pread/pwrite currently are reading and writing from the CPU
1048 * perspective, requiring manual detiling by the client.
1050 if (obj->phys_handle) {
1051 ret = i915_gem_phys_pwrite(obj, args, file);
1055 if (obj->tiling_mode == I915_TILING_NONE &&
1056 obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
1057 cpu_write_needs_clflush(obj)) {
1058 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
1059 /* Note that the gtt paths might fail with non-page-backed user
1060 * pointers (e.g. gtt mappings when moving data between
1061 * textures). Fallback to the shmem path in that case. */
1064 if (ret == -EFAULT || ret == -ENOSPC)
1065 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
1068 drm_gem_object_unreference(&obj->base);
1070 mutex_unlock(&dev->struct_mutex);
1075 i915_gem_check_wedge(struct i915_gpu_error *error,
1078 if (i915_reset_in_progress(error)) {
1079 /* Non-interruptible callers can't handle -EAGAIN, hence return
1080 * -EIO unconditionally for these. */
1084 /* Recovery complete, but the reset failed ... */
1085 if (i915_terminally_wedged(error))
1095 * Compare seqno against outstanding lazy request. Emit a request if they are
1099 i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno)
1103 BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
1106 if (seqno == ring->outstanding_lazy_seqno)
1107 ret = i915_add_request(ring, NULL);
1112 static void fake_irq(unsigned long data)
1114 wake_up_process((struct task_struct *)data);
1117 static bool missed_irq(struct drm_i915_private *dev_priv,
1118 struct intel_engine_cs *ring)
1120 return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
1123 static bool can_wait_boost(struct drm_i915_file_private *file_priv)
1125 if (file_priv == NULL)
1128 return !atomic_xchg(&file_priv->rps_wait_boost, true);
1132 * __wait_seqno - wait until execution of seqno has finished
1133 * @ring: the ring expected to report seqno
1135 * @reset_counter: reset sequence associated with the given seqno
1136 * @interruptible: do an interruptible wait (normally yes)
1137 * @timeout: in - how long to wait (NULL forever); out - how much time remaining
1139 * Note: It is of utmost importance that the passed in seqno and reset_counter
1140 * values have been read by the caller in an smp safe manner. Where read-side
1141 * locks are involved, it is sufficient to read the reset_counter before
1142 * unlocking the lock that protects the seqno. For lockless tricks, the
1143 * reset_counter _must_ be read before, and an appropriate smp_rmb must be
1146 * Returns 0 if the seqno was found within the alloted time. Else returns the
1147 * errno with remaining time filled in timeout argument.
1149 static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
1150 unsigned reset_counter,
1152 struct timespec *timeout,
1153 struct drm_i915_file_private *file_priv)
1155 struct drm_device *dev = ring->dev;
1156 struct drm_i915_private *dev_priv = dev->dev_private;
1157 const bool irq_test_in_progress =
1158 ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
1159 struct timespec before, now;
1161 unsigned long timeout_expire;
1164 WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
1166 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
1169 timeout_expire = timeout ? jiffies + timespec_to_jiffies_timeout(timeout) : 0;
1171 if (INTEL_INFO(dev)->gen >= 6 && ring->id == RCS && can_wait_boost(file_priv)) {
1172 gen6_rps_boost(dev_priv);
1174 mod_delayed_work(dev_priv->wq,
1175 &file_priv->mm.idle_work,
1176 msecs_to_jiffies(100));
1179 if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring)))
1182 /* Record current time in case interrupted by signal, or wedged */
1183 trace_i915_gem_request_wait_begin(ring, seqno);
1184 getrawmonotonic(&before);
1186 struct timer_list timer;
1188 prepare_to_wait(&ring->irq_queue, &wait,
1189 interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
1191 /* We need to check whether any gpu reset happened in between
1192 * the caller grabbing the seqno and now ... */
1193 if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) {
1194 /* ... but upgrade the -EAGAIN to an -EIO if the gpu
1195 * is truely gone. */
1196 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1202 if (i915_seqno_passed(ring->get_seqno(ring, false), seqno)) {
1207 if (interruptible && signal_pending(current)) {
1212 if (timeout && time_after_eq(jiffies, timeout_expire)) {
1217 timer.function = NULL;
1218 if (timeout || missed_irq(dev_priv, ring)) {
1219 unsigned long expire;
1221 setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
1222 expire = missed_irq(dev_priv, ring) ? jiffies + 1 : timeout_expire;
1223 mod_timer(&timer, expire);
1228 if (timer.function) {
1229 del_singleshot_timer_sync(&timer);
1230 destroy_timer_on_stack(&timer);
1233 getrawmonotonic(&now);
1234 trace_i915_gem_request_wait_end(ring, seqno);
1236 if (!irq_test_in_progress)
1237 ring->irq_put(ring);
1239 finish_wait(&ring->irq_queue, &wait);
1242 struct timespec sleep_time = timespec_sub(now, before);
1243 *timeout = timespec_sub(*timeout, sleep_time);
1244 if (!timespec_valid(timeout)) /* i.e. negative time remains */
1245 set_normalized_timespec(timeout, 0, 0);
1252 * Waits for a sequence number to be signaled, and cleans up the
1253 * request and object lists appropriately for that event.
1256 i915_wait_seqno(struct intel_engine_cs *ring, uint32_t seqno)
1258 struct drm_device *dev = ring->dev;
1259 struct drm_i915_private *dev_priv = dev->dev_private;
1260 bool interruptible = dev_priv->mm.interruptible;
1263 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1266 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1270 ret = i915_gem_check_olr(ring, seqno);
1274 return __wait_seqno(ring, seqno,
1275 atomic_read(&dev_priv->gpu_error.reset_counter),
1276 interruptible, NULL, NULL);
1280 i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
1281 struct intel_engine_cs *ring)
1286 /* Manually manage the write flush as we may have not yet
1287 * retired the buffer.
1289 * Note that the last_write_seqno is always the earlier of
1290 * the two (read/write) seqno, so if we haved successfully waited,
1291 * we know we have passed the last write.
1293 obj->last_write_seqno = 0;
1299 * Ensures that all rendering to the object has completed and the object is
1300 * safe to unbind from the GTT or access from the CPU.
1302 static __must_check int
1303 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1306 struct intel_engine_cs *ring = obj->ring;
1310 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1314 ret = i915_wait_seqno(ring, seqno);
1318 return i915_gem_object_wait_rendering__tail(obj, ring);
1321 /* A nonblocking variant of the above wait. This is a highly dangerous routine
1322 * as the object state may change during this call.
1324 static __must_check int
1325 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1326 struct drm_i915_file_private *file_priv,
1329 struct drm_device *dev = obj->base.dev;
1330 struct drm_i915_private *dev_priv = dev->dev_private;
1331 struct intel_engine_cs *ring = obj->ring;
1332 unsigned reset_counter;
1336 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1337 BUG_ON(!dev_priv->mm.interruptible);
1339 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1343 ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
1347 ret = i915_gem_check_olr(ring, seqno);
1351 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
1352 mutex_unlock(&dev->struct_mutex);
1353 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file_priv);
1354 mutex_lock(&dev->struct_mutex);
1358 return i915_gem_object_wait_rendering__tail(obj, ring);
1362 * Called when user space prepares to use an object with the CPU, either
1363 * through the mmap ioctl's mapping or a GTT mapping.
1366 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1367 struct drm_file *file)
1369 struct drm_i915_gem_set_domain *args = data;
1370 struct drm_i915_gem_object *obj;
1371 uint32_t read_domains = args->read_domains;
1372 uint32_t write_domain = args->write_domain;
1375 /* Only handle setting domains to types used by the CPU. */
1376 if (write_domain & I915_GEM_GPU_DOMAINS)
1379 if (read_domains & I915_GEM_GPU_DOMAINS)
1382 /* Having something in the write domain implies it's in the read
1383 * domain, and only that read domain. Enforce that in the request.
1385 if (write_domain != 0 && read_domains != write_domain)
1388 ret = i915_mutex_lock_interruptible(dev);
1392 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1393 if (&obj->base == NULL) {
1398 /* Try to flush the object off the GPU without holding the lock.
1399 * We will repeat the flush holding the lock in the normal manner
1400 * to catch cases where we are gazumped.
1402 ret = i915_gem_object_wait_rendering__nonblocking(obj,
1408 if (read_domains & I915_GEM_DOMAIN_GTT) {
1409 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1411 /* Silently promote "you're not bound, there was nothing to do"
1412 * to success, since the client was just asking us to
1413 * make sure everything was done.
1418 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1422 drm_gem_object_unreference(&obj->base);
1424 mutex_unlock(&dev->struct_mutex);
1429 * Called when user space has done writes to this buffer
1432 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1433 struct drm_file *file)
1435 struct drm_i915_gem_sw_finish *args = data;
1436 struct drm_i915_gem_object *obj;
1439 ret = i915_mutex_lock_interruptible(dev);
1443 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1444 if (&obj->base == NULL) {
1449 /* Pinned buffers may be scanout, so flush the cache */
1450 if (obj->pin_display)
1451 i915_gem_object_flush_cpu_write_domain(obj, true);
1453 drm_gem_object_unreference(&obj->base);
1455 mutex_unlock(&dev->struct_mutex);
1460 * Maps the contents of an object, returning the address it is mapped
1463 * While the mapping holds a reference on the contents of the object, it doesn't
1464 * imply a ref on the object itself.
1467 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1468 struct drm_file *file)
1470 struct drm_i915_gem_mmap *args = data;
1471 struct drm_gem_object *obj;
1474 obj = drm_gem_object_lookup(dev, file, args->handle);
1478 /* prime objects have no backing filp to GEM mmap
1482 drm_gem_object_unreference_unlocked(obj);
1486 addr = vm_mmap(obj->filp, 0, args->size,
1487 PROT_READ | PROT_WRITE, MAP_SHARED,
1489 drm_gem_object_unreference_unlocked(obj);
1490 if (IS_ERR((void *)addr))
1493 args->addr_ptr = (uint64_t) addr;
1499 * i915_gem_fault - fault a page into the GTT
1500 * vma: VMA in question
1503 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1504 * from userspace. The fault handler takes care of binding the object to
1505 * the GTT (if needed), allocating and programming a fence register (again,
1506 * only if needed based on whether the old reg is still valid or the object
1507 * is tiled) and inserting a new PTE into the faulting process.
1509 * Note that the faulting process may involve evicting existing objects
1510 * from the GTT and/or fence registers to make room. So performance may
1511 * suffer if the GTT working set is large or there are few fence registers
1514 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1516 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1517 struct drm_device *dev = obj->base.dev;
1518 struct drm_i915_private *dev_priv = dev->dev_private;
1519 pgoff_t page_offset;
1522 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1524 intel_runtime_pm_get(dev_priv);
1526 /* We don't use vmf->pgoff since that has the fake offset */
1527 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1530 ret = i915_mutex_lock_interruptible(dev);
1534 trace_i915_gem_object_fault(obj, page_offset, true, write);
1536 /* Try to flush the object off the GPU first without holding the lock.
1537 * Upon reacquiring the lock, we will perform our sanity checks and then
1538 * repeat the flush holding the lock in the normal manner to catch cases
1539 * where we are gazumped.
1541 ret = i915_gem_object_wait_rendering__nonblocking(obj, NULL, !write);
1545 /* Access to snoopable pages through the GTT is incoherent. */
1546 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1551 /* Now bind it into the GTT if needed */
1552 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
1556 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1560 ret = i915_gem_object_get_fence(obj);
1564 /* Finally, remap it using the new GTT offset */
1565 pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
1568 if (!obj->fault_mappable) {
1569 unsigned long size = min_t(unsigned long,
1570 vma->vm_end - vma->vm_start,
1574 for (i = 0; i < size >> PAGE_SHIFT; i++) {
1575 ret = vm_insert_pfn(vma,
1576 (unsigned long)vma->vm_start + i * PAGE_SIZE,
1582 obj->fault_mappable = true;
1584 ret = vm_insert_pfn(vma,
1585 (unsigned long)vmf->virtual_address,
1588 i915_gem_object_ggtt_unpin(obj);
1590 mutex_unlock(&dev->struct_mutex);
1594 /* If this -EIO is due to a gpu hang, give the reset code a
1595 * chance to clean up the mess. Otherwise return the proper
1597 if (i915_terminally_wedged(&dev_priv->gpu_error)) {
1598 ret = VM_FAULT_SIGBUS;
1603 * EAGAIN means the gpu is hung and we'll wait for the error
1604 * handler to reset everything when re-faulting in
1605 * i915_mutex_lock_interruptible.
1612 * EBUSY is ok: this just means that another thread
1613 * already did the job.
1615 ret = VM_FAULT_NOPAGE;
1622 ret = VM_FAULT_SIGBUS;
1625 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
1626 ret = VM_FAULT_SIGBUS;
1630 intel_runtime_pm_put(dev_priv);
1635 * i915_gem_release_mmap - remove physical page mappings
1636 * @obj: obj in question
1638 * Preserve the reservation of the mmapping with the DRM core code, but
1639 * relinquish ownership of the pages back to the system.
1641 * It is vital that we remove the page mapping if we have mapped a tiled
1642 * object through the GTT and then lose the fence register due to
1643 * resource pressure. Similarly if the object has been moved out of the
1644 * aperture, than pages mapped into userspace must be revoked. Removing the
1645 * mapping will then trigger a page fault on the next user access, allowing
1646 * fixup by i915_gem_fault().
1649 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1651 if (!obj->fault_mappable)
1654 drm_vma_node_unmap(&obj->base.vma_node,
1655 obj->base.dev->anon_inode->i_mapping);
1656 obj->fault_mappable = false;
1660 i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
1662 struct drm_i915_gem_object *obj;
1664 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
1665 i915_gem_release_mmap(obj);
1669 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1673 if (INTEL_INFO(dev)->gen >= 4 ||
1674 tiling_mode == I915_TILING_NONE)
1677 /* Previous chips need a power-of-two fence region when tiling */
1678 if (INTEL_INFO(dev)->gen == 3)
1679 gtt_size = 1024*1024;
1681 gtt_size = 512*1024;
1683 while (gtt_size < size)
1690 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1691 * @obj: object to check
1693 * Return the required GTT alignment for an object, taking into account
1694 * potential fence register mapping.
1697 i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
1698 int tiling_mode, bool fenced)
1701 * Minimum alignment is 4k (GTT page size), but might be greater
1702 * if a fence register is needed for the object.
1704 if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
1705 tiling_mode == I915_TILING_NONE)
1709 * Previous chips need to be aligned to the size of the smallest
1710 * fence register that can contain the object.
1712 return i915_gem_get_gtt_size(dev, size, tiling_mode);
1715 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1717 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1720 if (drm_vma_node_has_offset(&obj->base.vma_node))
1723 dev_priv->mm.shrinker_no_lock_stealing = true;
1725 ret = drm_gem_create_mmap_offset(&obj->base);
1729 /* Badly fragmented mmap space? The only way we can recover
1730 * space is by destroying unwanted objects. We can't randomly release
1731 * mmap_offsets as userspace expects them to be persistent for the
1732 * lifetime of the objects. The closest we can is to release the
1733 * offsets on purgeable objects by truncating it and marking it purged,
1734 * which prevents userspace from ever using that object again.
1736 i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
1737 ret = drm_gem_create_mmap_offset(&obj->base);
1741 i915_gem_shrink_all(dev_priv);
1742 ret = drm_gem_create_mmap_offset(&obj->base);
1744 dev_priv->mm.shrinker_no_lock_stealing = false;
1749 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1751 drm_gem_free_mmap_offset(&obj->base);
1755 i915_gem_mmap_gtt(struct drm_file *file,
1756 struct drm_device *dev,
1760 struct drm_i915_private *dev_priv = dev->dev_private;
1761 struct drm_i915_gem_object *obj;
1764 ret = i915_mutex_lock_interruptible(dev);
1768 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
1769 if (&obj->base == NULL) {
1774 if (obj->base.size > dev_priv->gtt.mappable_end) {
1779 if (obj->madv != I915_MADV_WILLNEED) {
1780 DRM_DEBUG("Attempting to mmap a purgeable buffer\n");
1785 ret = i915_gem_object_create_mmap_offset(obj);
1789 *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
1792 drm_gem_object_unreference(&obj->base);
1794 mutex_unlock(&dev->struct_mutex);
1799 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1801 * @data: GTT mapping ioctl data
1802 * @file: GEM object info
1804 * Simply returns the fake offset to userspace so it can mmap it.
1805 * The mmap call will end up in drm_gem_mmap(), which will set things
1806 * up so we can get faults in the handler above.
1808 * The fault handler will take care of binding the object into the GTT
1809 * (since it may have been evicted to make room for something), allocating
1810 * a fence register, and mapping the appropriate aperture address into
1814 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1815 struct drm_file *file)
1817 struct drm_i915_gem_mmap_gtt *args = data;
1819 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1823 i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1825 return obj->madv == I915_MADV_DONTNEED;
1828 /* Immediately discard the backing storage */
1830 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1832 i915_gem_object_free_mmap_offset(obj);
1834 if (obj->base.filp == NULL)
1837 /* Our goal here is to return as much of the memory as
1838 * is possible back to the system as we are called from OOM.
1839 * To do this we must instruct the shmfs to drop all of its
1840 * backing pages, *now*.
1842 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
1843 obj->madv = __I915_MADV_PURGED;
1846 /* Try to discard unwanted pages */
1848 i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
1850 struct address_space *mapping;
1852 switch (obj->madv) {
1853 case I915_MADV_DONTNEED:
1854 i915_gem_object_truncate(obj);
1855 case __I915_MADV_PURGED:
1859 if (obj->base.filp == NULL)
1862 mapping = file_inode(obj->base.filp)->i_mapping,
1863 invalidate_mapping_pages(mapping, 0, (loff_t)-1);
1867 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1869 struct sg_page_iter sg_iter;
1872 BUG_ON(obj->madv == __I915_MADV_PURGED);
1874 ret = i915_gem_object_set_to_cpu_domain(obj, true);
1876 /* In the event of a disaster, abandon all caches and
1877 * hope for the best.
1879 WARN_ON(ret != -EIO);
1880 i915_gem_clflush_object(obj, true);
1881 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1884 if (i915_gem_object_needs_bit17_swizzle(obj))
1885 i915_gem_object_save_bit_17_swizzle(obj);
1887 if (obj->madv == I915_MADV_DONTNEED)
1890 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
1891 struct page *page = sg_page_iter_page(&sg_iter);
1894 set_page_dirty(page);
1896 if (obj->madv == I915_MADV_WILLNEED)
1897 mark_page_accessed(page);
1899 page_cache_release(page);
1903 sg_free_table(obj->pages);
1908 i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1910 const struct drm_i915_gem_object_ops *ops = obj->ops;
1912 if (obj->pages == NULL)
1915 if (obj->pages_pin_count)
1918 BUG_ON(i915_gem_obj_bound_any(obj));
1920 /* ->put_pages might need to allocate memory for the bit17 swizzle
1921 * array, hence protect them from being reaped by removing them from gtt
1923 list_del(&obj->global_list);
1925 ops->put_pages(obj);
1928 i915_gem_object_invalidate(obj);
1933 static unsigned long
1934 __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
1935 bool purgeable_only)
1937 struct list_head still_in_list;
1938 struct drm_i915_gem_object *obj;
1939 unsigned long count = 0;
1942 * As we may completely rewrite the (un)bound list whilst unbinding
1943 * (due to retiring requests) we have to strictly process only
1944 * one element of the list at the time, and recheck the list
1945 * on every iteration.
1947 * In particular, we must hold a reference whilst removing the
1948 * object as we may end up waiting for and/or retiring the objects.
1949 * This might release the final reference (held by the active list)
1950 * and result in the object being freed from under us. This is
1951 * similar to the precautions the eviction code must take whilst
1954 * Also note that although these lists do not hold a reference to
1955 * the object we can safely grab one here: The final object
1956 * unreferencing and the bound_list are both protected by the
1957 * dev->struct_mutex and so we won't ever be able to observe an
1958 * object on the bound_list with a reference count equals 0.
1960 INIT_LIST_HEAD(&still_in_list);
1961 while (count < target && !list_empty(&dev_priv->mm.unbound_list)) {
1962 obj = list_first_entry(&dev_priv->mm.unbound_list,
1963 typeof(*obj), global_list);
1964 list_move_tail(&obj->global_list, &still_in_list);
1966 if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
1969 drm_gem_object_reference(&obj->base);
1971 if (i915_gem_object_put_pages(obj) == 0)
1972 count += obj->base.size >> PAGE_SHIFT;
1974 drm_gem_object_unreference(&obj->base);
1976 list_splice(&still_in_list, &dev_priv->mm.unbound_list);
1978 INIT_LIST_HEAD(&still_in_list);
1979 while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
1980 struct i915_vma *vma, *v;
1982 obj = list_first_entry(&dev_priv->mm.bound_list,
1983 typeof(*obj), global_list);
1984 list_move_tail(&obj->global_list, &still_in_list);
1986 if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
1989 drm_gem_object_reference(&obj->base);
1991 list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
1992 if (i915_vma_unbind(vma))
1995 if (i915_gem_object_put_pages(obj) == 0)
1996 count += obj->base.size >> PAGE_SHIFT;
1998 drm_gem_object_unreference(&obj->base);
2000 list_splice(&still_in_list, &dev_priv->mm.bound_list);
2005 static unsigned long
2006 i915_gem_purge(struct drm_i915_private *dev_priv, long target)
2008 return __i915_gem_shrink(dev_priv, target, true);
2011 static unsigned long
2012 i915_gem_shrink_all(struct drm_i915_private *dev_priv)
2014 i915_gem_evict_everything(dev_priv->dev);
2015 return __i915_gem_shrink(dev_priv, LONG_MAX, false);
2019 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2021 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2023 struct address_space *mapping;
2024 struct sg_table *st;
2025 struct scatterlist *sg;
2026 struct sg_page_iter sg_iter;
2028 unsigned long last_pfn = 0; /* suppress gcc warning */
2031 /* Assert that the object is not currently in any GPU domain. As it
2032 * wasn't in the GTT, there shouldn't be any way it could have been in
2035 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2036 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2038 st = kmalloc(sizeof(*st), GFP_KERNEL);
2042 page_count = obj->base.size / PAGE_SIZE;
2043 if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
2048 /* Get the list of pages out of our struct file. They'll be pinned
2049 * at this point until we release them.
2051 * Fail silently without starting the shrinker
2053 mapping = file_inode(obj->base.filp)->i_mapping;
2054 gfp = mapping_gfp_mask(mapping);
2055 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
2056 gfp &= ~(__GFP_IO | __GFP_WAIT);
2059 for (i = 0; i < page_count; i++) {
2060 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2062 i915_gem_purge(dev_priv, page_count);
2063 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2066 /* We've tried hard to allocate the memory by reaping
2067 * our own buffer, now let the real VM do its job and
2068 * go down in flames if truly OOM.
2070 i915_gem_shrink_all(dev_priv);
2071 page = shmem_read_mapping_page(mapping, i);
2075 #ifdef CONFIG_SWIOTLB
2076 if (swiotlb_nr_tbl()) {
2078 sg_set_page(sg, page, PAGE_SIZE, 0);
2083 if (!i || page_to_pfn(page) != last_pfn + 1) {
2087 sg_set_page(sg, page, PAGE_SIZE, 0);
2089 sg->length += PAGE_SIZE;
2091 last_pfn = page_to_pfn(page);
2093 /* Check that the i965g/gm workaround works. */
2094 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
2096 #ifdef CONFIG_SWIOTLB
2097 if (!swiotlb_nr_tbl())
2102 if (i915_gem_object_needs_bit17_swizzle(obj))
2103 i915_gem_object_do_bit_17_swizzle(obj);
2109 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
2110 page_cache_release(sg_page_iter_page(&sg_iter));
2114 /* shmemfs first checks if there is enough memory to allocate the page
2115 * and reports ENOSPC should there be insufficient, along with the usual
2116 * ENOMEM for a genuine allocation failure.
2118 * We use ENOSPC in our driver to mean that we have run out of aperture
2119 * space and so want to translate the error from shmemfs back to our
2120 * usual understanding of ENOMEM.
2122 if (PTR_ERR(page) == -ENOSPC)
2125 return PTR_ERR(page);
2128 /* Ensure that the associated pages are gathered from the backing storage
2129 * and pinned into our object. i915_gem_object_get_pages() may be called
2130 * multiple times before they are released by a single call to
2131 * i915_gem_object_put_pages() - once the pages are no longer referenced
2132 * either as a result of memory pressure (reaping pages under the shrinker)
2133 * or as the object is itself released.
2136 i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2138 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2139 const struct drm_i915_gem_object_ops *ops = obj->ops;
2145 if (obj->madv != I915_MADV_WILLNEED) {
2146 DRM_DEBUG("Attempting to obtain a purgeable object\n");
2150 BUG_ON(obj->pages_pin_count);
2152 ret = ops->get_pages(obj);
2156 list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2161 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
2162 struct intel_engine_cs *ring)
2164 struct drm_device *dev = obj->base.dev;
2165 struct drm_i915_private *dev_priv = dev->dev_private;
2166 u32 seqno = intel_ring_get_seqno(ring);
2168 BUG_ON(ring == NULL);
2169 if (obj->ring != ring && obj->last_write_seqno) {
2170 /* Keep the seqno relative to the current ring */
2171 obj->last_write_seqno = seqno;
2175 /* Add a reference if we're newly entering the active list. */
2177 drm_gem_object_reference(&obj->base);
2181 list_move_tail(&obj->ring_list, &ring->active_list);
2183 obj->last_read_seqno = seqno;
2185 if (obj->fenced_gpu_access) {
2186 obj->last_fenced_seqno = seqno;
2188 /* Bump MRU to take account of the delayed flush */
2189 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2190 struct drm_i915_fence_reg *reg;
2192 reg = &dev_priv->fence_regs[obj->fence_reg];
2193 list_move_tail(®->lru_list,
2194 &dev_priv->mm.fence_list);
2199 void i915_vma_move_to_active(struct i915_vma *vma,
2200 struct intel_engine_cs *ring)
2202 list_move_tail(&vma->mm_list, &vma->vm->active_list);
2203 return i915_gem_object_move_to_active(vma->obj, ring);
2207 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
2209 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2210 struct i915_address_space *vm;
2211 struct i915_vma *vma;
2213 BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
2214 BUG_ON(!obj->active);
2216 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
2217 vma = i915_gem_obj_to_vma(obj, vm);
2218 if (vma && !list_empty(&vma->mm_list))
2219 list_move_tail(&vma->mm_list, &vm->inactive_list);
2222 intel_fb_obj_flush(obj, true);
2224 list_del_init(&obj->ring_list);
2227 obj->last_read_seqno = 0;
2228 obj->last_write_seqno = 0;
2229 obj->base.write_domain = 0;
2231 obj->last_fenced_seqno = 0;
2232 obj->fenced_gpu_access = false;
2235 drm_gem_object_unreference(&obj->base);
2237 WARN_ON(i915_verify_lists(dev));
2241 i915_gem_object_retire(struct drm_i915_gem_object *obj)
2243 struct intel_engine_cs *ring = obj->ring;
2248 if (i915_seqno_passed(ring->get_seqno(ring, true),
2249 obj->last_read_seqno))
2250 i915_gem_object_move_to_inactive(obj);
2254 i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
2256 struct drm_i915_private *dev_priv = dev->dev_private;
2257 struct intel_engine_cs *ring;
2260 /* Carefully retire all requests without writing to the rings */
2261 for_each_ring(ring, dev_priv, i) {
2262 ret = intel_ring_idle(ring);
2266 i915_gem_retire_requests(dev);
2268 /* Finally reset hw state */
2269 for_each_ring(ring, dev_priv, i) {
2270 intel_ring_init_seqno(ring, seqno);
2272 for (j = 0; j < ARRAY_SIZE(ring->semaphore.sync_seqno); j++)
2273 ring->semaphore.sync_seqno[j] = 0;
2279 int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
2281 struct drm_i915_private *dev_priv = dev->dev_private;
2287 /* HWS page needs to be set less than what we
2288 * will inject to ring
2290 ret = i915_gem_init_seqno(dev, seqno - 1);
2294 /* Carefully set the last_seqno value so that wrap
2295 * detection still works
2297 dev_priv->next_seqno = seqno;
2298 dev_priv->last_seqno = seqno - 1;
2299 if (dev_priv->last_seqno == 0)
2300 dev_priv->last_seqno--;
2306 i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
2308 struct drm_i915_private *dev_priv = dev->dev_private;
2310 /* reserve 0 for non-seqno */
2311 if (dev_priv->next_seqno == 0) {
2312 int ret = i915_gem_init_seqno(dev, 0);
2316 dev_priv->next_seqno = 1;
2319 *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
2323 int __i915_add_request(struct intel_engine_cs *ring,
2324 struct drm_file *file,
2325 struct drm_i915_gem_object *obj,
2328 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2329 struct drm_i915_gem_request *request;
2330 u32 request_ring_position, request_start;
2333 request_start = intel_ring_get_tail(ring->buffer);
2335 * Emit any outstanding flushes - execbuf can fail to emit the flush
2336 * after having emitted the batchbuffer command. Hence we need to fix
2337 * things up similar to emitting the lazy request. The difference here
2338 * is that the flush _must_ happen before the next request, no matter
2341 ret = intel_ring_flush_all_caches(ring);
2345 request = ring->preallocated_lazy_request;
2346 if (WARN_ON(request == NULL))
2349 /* Record the position of the start of the request so that
2350 * should we detect the updated seqno part-way through the
2351 * GPU processing the request, we never over-estimate the
2352 * position of the head.
2354 request_ring_position = intel_ring_get_tail(ring->buffer);
2356 ret = ring->add_request(ring);
2360 request->seqno = intel_ring_get_seqno(ring);
2361 request->ring = ring;
2362 request->head = request_start;
2363 request->tail = request_ring_position;
2365 /* Whilst this request exists, batch_obj will be on the
2366 * active_list, and so will hold the active reference. Only when this
2367 * request is retired will the the batch_obj be moved onto the
2368 * inactive_list and lose its active reference. Hence we do not need
2369 * to explicitly hold another reference here.
2371 request->batch_obj = obj;
2373 /* Hold a reference to the current context so that we can inspect
2374 * it later in case a hangcheck error event fires.
2376 request->ctx = ring->last_context;
2378 i915_gem_context_reference(request->ctx);
2380 request->emitted_jiffies = jiffies;
2381 list_add_tail(&request->list, &ring->request_list);
2382 request->file_priv = NULL;
2385 struct drm_i915_file_private *file_priv = file->driver_priv;
2387 spin_lock(&file_priv->mm.lock);
2388 request->file_priv = file_priv;
2389 list_add_tail(&request->client_list,
2390 &file_priv->mm.request_list);
2391 spin_unlock(&file_priv->mm.lock);
2394 trace_i915_gem_request_add(ring, request->seqno);
2395 ring->outstanding_lazy_seqno = 0;
2396 ring->preallocated_lazy_request = NULL;
2398 if (!dev_priv->ums.mm_suspended) {
2399 i915_queue_hangcheck(ring->dev);
2401 cancel_delayed_work_sync(&dev_priv->mm.idle_work);
2402 queue_delayed_work(dev_priv->wq,
2403 &dev_priv->mm.retire_work,
2404 round_jiffies_up_relative(HZ));
2405 intel_mark_busy(dev_priv->dev);
2409 *out_seqno = request->seqno;
2414 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
2416 struct drm_i915_file_private *file_priv = request->file_priv;
2421 spin_lock(&file_priv->mm.lock);
2422 list_del(&request->client_list);
2423 request->file_priv = NULL;
2424 spin_unlock(&file_priv->mm.lock);
2427 static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
2428 const struct intel_context *ctx)
2430 unsigned long elapsed;
2432 elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
2434 if (ctx->hang_stats.banned)
2437 if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
2438 if (!i915_gem_context_is_default(ctx)) {
2439 DRM_DEBUG("context hanging too fast, banning!\n");
2441 } else if (i915_stop_ring_allow_ban(dev_priv)) {
2442 if (i915_stop_ring_allow_warn(dev_priv))
2443 DRM_ERROR("gpu hanging too fast, banning!\n");
2451 static void i915_set_reset_status(struct drm_i915_private *dev_priv,
2452 struct intel_context *ctx,
2455 struct i915_ctx_hang_stats *hs;
2460 hs = &ctx->hang_stats;
2463 hs->banned = i915_context_is_banned(dev_priv, ctx);
2465 hs->guilty_ts = get_seconds();
2467 hs->batch_pending++;
2471 static void i915_gem_free_request(struct drm_i915_gem_request *request)
2473 list_del(&request->list);
2474 i915_gem_request_remove_from_client(request);
2477 i915_gem_context_unreference(request->ctx);
2482 struct drm_i915_gem_request *
2483 i915_gem_find_active_request(struct intel_engine_cs *ring)
2485 struct drm_i915_gem_request *request;
2486 u32 completed_seqno;
2488 completed_seqno = ring->get_seqno(ring, false);
2490 list_for_each_entry(request, &ring->request_list, list) {
2491 if (i915_seqno_passed(completed_seqno, request->seqno))
2500 static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
2501 struct intel_engine_cs *ring)
2503 struct drm_i915_gem_request *request;
2506 request = i915_gem_find_active_request(ring);
2508 if (request == NULL)
2511 ring_hung = ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
2513 i915_set_reset_status(dev_priv, request->ctx, ring_hung);
2515 list_for_each_entry_continue(request, &ring->request_list, list)
2516 i915_set_reset_status(dev_priv, request->ctx, false);
2519 static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
2520 struct intel_engine_cs *ring)
2522 while (!list_empty(&ring->active_list)) {
2523 struct drm_i915_gem_object *obj;
2525 obj = list_first_entry(&ring->active_list,
2526 struct drm_i915_gem_object,
2529 i915_gem_object_move_to_inactive(obj);
2533 * We must free the requests after all the corresponding objects have
2534 * been moved off active lists. Which is the same order as the normal
2535 * retire_requests function does. This is important if object hold
2536 * implicit references on things like e.g. ppgtt address spaces through
2539 while (!list_empty(&ring->request_list)) {
2540 struct drm_i915_gem_request *request;
2542 request = list_first_entry(&ring->request_list,
2543 struct drm_i915_gem_request,
2546 i915_gem_free_request(request);
2549 /* These may not have been flush before the reset, do so now */
2550 kfree(ring->preallocated_lazy_request);
2551 ring->preallocated_lazy_request = NULL;
2552 ring->outstanding_lazy_seqno = 0;
2555 void i915_gem_restore_fences(struct drm_device *dev)
2557 struct drm_i915_private *dev_priv = dev->dev_private;
2560 for (i = 0; i < dev_priv->num_fence_regs; i++) {
2561 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
2564 * Commit delayed tiling changes if we have an object still
2565 * attached to the fence, otherwise just clear the fence.
2568 i915_gem_object_update_fence(reg->obj, reg,
2569 reg->obj->tiling_mode);
2571 i915_gem_write_fence(dev, i, NULL);
2576 void i915_gem_reset(struct drm_device *dev)
2578 struct drm_i915_private *dev_priv = dev->dev_private;
2579 struct intel_engine_cs *ring;
2583 * Before we free the objects from the requests, we need to inspect
2584 * them for finding the guilty party. As the requests only borrow
2585 * their reference to the objects, the inspection must be done first.
2587 for_each_ring(ring, dev_priv, i)
2588 i915_gem_reset_ring_status(dev_priv, ring);
2590 for_each_ring(ring, dev_priv, i)
2591 i915_gem_reset_ring_cleanup(dev_priv, ring);
2593 i915_gem_context_reset(dev);
2595 i915_gem_restore_fences(dev);
2599 * This function clears the request list as sequence numbers are passed.
2602 i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
2606 if (list_empty(&ring->request_list))
2609 WARN_ON(i915_verify_lists(ring->dev));
2611 seqno = ring->get_seqno(ring, true);
2613 /* Move any buffers on the active list that are no longer referenced
2614 * by the ringbuffer to the flushing/inactive lists as appropriate,
2615 * before we free the context associated with the requests.
2617 while (!list_empty(&ring->active_list)) {
2618 struct drm_i915_gem_object *obj;
2620 obj = list_first_entry(&ring->active_list,
2621 struct drm_i915_gem_object,
2624 if (!i915_seqno_passed(seqno, obj->last_read_seqno))
2627 i915_gem_object_move_to_inactive(obj);
2631 while (!list_empty(&ring->request_list)) {
2632 struct drm_i915_gem_request *request;
2634 request = list_first_entry(&ring->request_list,
2635 struct drm_i915_gem_request,
2638 if (!i915_seqno_passed(seqno, request->seqno))
2641 trace_i915_gem_request_retire(ring, request->seqno);
2642 /* We know the GPU must have read the request to have
2643 * sent us the seqno + interrupt, so use the position
2644 * of tail of the request to update the last known position
2647 ring->buffer->last_retired_head = request->tail;
2649 i915_gem_free_request(request);
2652 if (unlikely(ring->trace_irq_seqno &&
2653 i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
2654 ring->irq_put(ring);
2655 ring->trace_irq_seqno = 0;
2658 WARN_ON(i915_verify_lists(ring->dev));
2662 i915_gem_retire_requests(struct drm_device *dev)
2664 struct drm_i915_private *dev_priv = dev->dev_private;
2665 struct intel_engine_cs *ring;
2669 for_each_ring(ring, dev_priv, i) {
2670 i915_gem_retire_requests_ring(ring);
2671 idle &= list_empty(&ring->request_list);
2675 mod_delayed_work(dev_priv->wq,
2676 &dev_priv->mm.idle_work,
2677 msecs_to_jiffies(100));
2683 i915_gem_retire_work_handler(struct work_struct *work)
2685 struct drm_i915_private *dev_priv =
2686 container_of(work, typeof(*dev_priv), mm.retire_work.work);
2687 struct drm_device *dev = dev_priv->dev;
2690 /* Come back later if the device is busy... */
2692 if (mutex_trylock(&dev->struct_mutex)) {
2693 idle = i915_gem_retire_requests(dev);
2694 mutex_unlock(&dev->struct_mutex);
2697 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2698 round_jiffies_up_relative(HZ));
2702 i915_gem_idle_work_handler(struct work_struct *work)
2704 struct drm_i915_private *dev_priv =
2705 container_of(work, typeof(*dev_priv), mm.idle_work.work);
2707 intel_mark_idle(dev_priv->dev);
2711 * Ensures that an object will eventually get non-busy by flushing any required
2712 * write domains, emitting any outstanding lazy request and retiring and
2713 * completed requests.
2716 i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
2721 ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
2725 i915_gem_retire_requests_ring(obj->ring);
2732 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2733 * @DRM_IOCTL_ARGS: standard ioctl arguments
2735 * Returns 0 if successful, else an error is returned with the remaining time in
2736 * the timeout parameter.
2737 * -ETIME: object is still busy after timeout
2738 * -ERESTARTSYS: signal interrupted the wait
2739 * -ENONENT: object doesn't exist
2740 * Also possible, but rare:
2741 * -EAGAIN: GPU wedged
2743 * -ENODEV: Internal IRQ fail
2744 * -E?: The add request failed
2746 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2747 * non-zero timeout parameter the wait ioctl will wait for the given number of
2748 * nanoseconds on an object becoming unbusy. Since the wait itself does so
2749 * without holding struct_mutex the object may become re-busied before this
2750 * function completes. A similar but shorter * race condition exists in the busy
2754 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2756 struct drm_i915_private *dev_priv = dev->dev_private;
2757 struct drm_i915_gem_wait *args = data;
2758 struct drm_i915_gem_object *obj;
2759 struct intel_engine_cs *ring = NULL;
2760 struct timespec timeout_stack, *timeout = NULL;
2761 unsigned reset_counter;
2765 if (args->timeout_ns >= 0) {
2766 timeout_stack = ns_to_timespec(args->timeout_ns);
2767 timeout = &timeout_stack;
2770 ret = i915_mutex_lock_interruptible(dev);
2774 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
2775 if (&obj->base == NULL) {
2776 mutex_unlock(&dev->struct_mutex);
2780 /* Need to make sure the object gets inactive eventually. */
2781 ret = i915_gem_object_flush_active(obj);
2786 seqno = obj->last_read_seqno;
2793 /* Do this after OLR check to make sure we make forward progress polling
2794 * on this IOCTL with a 0 timeout (like busy ioctl)
2796 if (!args->timeout_ns) {
2801 drm_gem_object_unreference(&obj->base);
2802 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
2803 mutex_unlock(&dev->struct_mutex);
2805 ret = __wait_seqno(ring, seqno, reset_counter, true, timeout, file->driver_priv);
2807 args->timeout_ns = timespec_to_ns(timeout);
2811 drm_gem_object_unreference(&obj->base);
2812 mutex_unlock(&dev->struct_mutex);
2817 * i915_gem_object_sync - sync an object to a ring.
2819 * @obj: object which may be in use on another ring.
2820 * @to: ring we wish to use the object on. May be NULL.
2822 * This code is meant to abstract object synchronization with the GPU.
2823 * Calling with NULL implies synchronizing the object with the CPU
2824 * rather than a particular GPU ring.
2826 * Returns 0 if successful, else propagates up the lower layer error.
2829 i915_gem_object_sync(struct drm_i915_gem_object *obj,
2830 struct intel_engine_cs *to)
2832 struct intel_engine_cs *from = obj->ring;
2836 if (from == NULL || to == from)
2839 if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
2840 return i915_gem_object_wait_rendering(obj, false);
2842 idx = intel_ring_sync_index(from, to);
2844 seqno = obj->last_read_seqno;
2845 /* Optimization: Avoid semaphore sync when we are sure we already
2846 * waited for an object with higher seqno */
2847 if (seqno <= from->semaphore.sync_seqno[idx])
2850 ret = i915_gem_check_olr(obj->ring, seqno);
2854 trace_i915_gem_ring_sync_to(from, to, seqno);
2855 ret = to->semaphore.sync_to(to, from, seqno);
2857 /* We use last_read_seqno because sync_to()
2858 * might have just caused seqno wrap under
2861 from->semaphore.sync_seqno[idx] = obj->last_read_seqno;
2866 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2868 u32 old_write_domain, old_read_domains;
2870 /* Force a pagefault for domain tracking on next user access */
2871 i915_gem_release_mmap(obj);
2873 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2876 /* Wait for any direct GTT access to complete */
2879 old_read_domains = obj->base.read_domains;
2880 old_write_domain = obj->base.write_domain;
2882 obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2883 obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2885 trace_i915_gem_object_change_domain(obj,
2890 int i915_vma_unbind(struct i915_vma *vma)
2892 struct drm_i915_gem_object *obj = vma->obj;
2893 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2896 if (list_empty(&vma->vma_link))
2899 if (!drm_mm_node_allocated(&vma->node)) {
2900 i915_gem_vma_destroy(vma);
2907 BUG_ON(obj->pages == NULL);
2909 ret = i915_gem_object_finish_gpu(obj);
2912 /* Continue on if we fail due to EIO, the GPU is hung so we
2913 * should be safe and we need to cleanup or else we might
2914 * cause memory corruption through use-after-free.
2917 if (i915_is_ggtt(vma->vm)) {
2918 i915_gem_object_finish_gtt(obj);
2920 /* release the fence reg _after_ flushing */
2921 ret = i915_gem_object_put_fence(obj);
2926 trace_i915_vma_unbind(vma);
2928 vma->unbind_vma(vma);
2930 list_del_init(&vma->mm_list);
2931 /* Avoid an unnecessary call to unbind on rebind. */
2932 if (i915_is_ggtt(vma->vm))
2933 obj->map_and_fenceable = true;
2935 drm_mm_remove_node(&vma->node);
2936 i915_gem_vma_destroy(vma);
2938 /* Since the unbound list is global, only move to that list if
2939 * no more VMAs exist. */
2940 if (list_empty(&obj->vma_list)) {
2941 i915_gem_gtt_finish_object(obj);
2942 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2945 /* And finally now the object is completely decoupled from this vma,
2946 * we can drop its hold on the backing storage and allow it to be
2947 * reaped by the shrinker.
2949 i915_gem_object_unpin_pages(obj);
2954 int i915_gpu_idle(struct drm_device *dev)
2956 struct drm_i915_private *dev_priv = dev->dev_private;
2957 struct intel_engine_cs *ring;
2960 /* Flush everything onto the inactive list. */
2961 for_each_ring(ring, dev_priv, i) {
2962 ret = i915_switch_context(ring, ring->default_context);
2966 ret = intel_ring_idle(ring);
2974 static void i965_write_fence_reg(struct drm_device *dev, int reg,
2975 struct drm_i915_gem_object *obj)
2977 struct drm_i915_private *dev_priv = dev->dev_private;
2979 int fence_pitch_shift;
2981 if (INTEL_INFO(dev)->gen >= 6) {
2982 fence_reg = FENCE_REG_SANDYBRIDGE_0;
2983 fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
2985 fence_reg = FENCE_REG_965_0;
2986 fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
2989 fence_reg += reg * 8;
2991 /* To w/a incoherency with non-atomic 64-bit register updates,
2992 * we split the 64-bit update into two 32-bit writes. In order
2993 * for a partial fence not to be evaluated between writes, we
2994 * precede the update with write to turn off the fence register,
2995 * and only enable the fence as the last step.
2997 * For extra levels of paranoia, we make sure each step lands
2998 * before applying the next step.
3000 I915_WRITE(fence_reg, 0);
3001 POSTING_READ(fence_reg);
3004 u32 size = i915_gem_obj_ggtt_size(obj);
3007 val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
3009 val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
3010 val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
3011 if (obj->tiling_mode == I915_TILING_Y)
3012 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
3013 val |= I965_FENCE_REG_VALID;
3015 I915_WRITE(fence_reg + 4, val >> 32);
3016 POSTING_READ(fence_reg + 4);
3018 I915_WRITE(fence_reg + 0, val);
3019 POSTING_READ(fence_reg);
3021 I915_WRITE(fence_reg + 4, 0);
3022 POSTING_READ(fence_reg + 4);
3026 static void i915_write_fence_reg(struct drm_device *dev, int reg,
3027 struct drm_i915_gem_object *obj)
3029 struct drm_i915_private *dev_priv = dev->dev_private;
3033 u32 size = i915_gem_obj_ggtt_size(obj);
3037 WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
3038 (size & -size) != size ||
3039 (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
3040 "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
3041 i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
3043 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
3048 /* Note: pitch better be a power of two tile widths */
3049 pitch_val = obj->stride / tile_width;
3050 pitch_val = ffs(pitch_val) - 1;
3052 val = i915_gem_obj_ggtt_offset(obj);
3053 if (obj->tiling_mode == I915_TILING_Y)
3054 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
3055 val |= I915_FENCE_SIZE_BITS(size);
3056 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
3057 val |= I830_FENCE_REG_VALID;
3062 reg = FENCE_REG_830_0 + reg * 4;
3064 reg = FENCE_REG_945_8 + (reg - 8) * 4;
3066 I915_WRITE(reg, val);
3070 static void i830_write_fence_reg(struct drm_device *dev, int reg,
3071 struct drm_i915_gem_object *obj)
3073 struct drm_i915_private *dev_priv = dev->dev_private;
3077 u32 size = i915_gem_obj_ggtt_size(obj);
3080 WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
3081 (size & -size) != size ||
3082 (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
3083 "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
3084 i915_gem_obj_ggtt_offset(obj), size);
3086 pitch_val = obj->stride / 128;
3087 pitch_val = ffs(pitch_val) - 1;
3089 val = i915_gem_obj_ggtt_offset(obj);
3090 if (obj->tiling_mode == I915_TILING_Y)
3091 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
3092 val |= I830_FENCE_SIZE_BITS(size);
3093 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
3094 val |= I830_FENCE_REG_VALID;
3098 I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
3099 POSTING_READ(FENCE_REG_830_0 + reg * 4);
3102 inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
3104 return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
3107 static void i915_gem_write_fence(struct drm_device *dev, int reg,
3108 struct drm_i915_gem_object *obj)
3110 struct drm_i915_private *dev_priv = dev->dev_private;
3112 /* Ensure that all CPU reads are completed before installing a fence
3113 * and all writes before removing the fence.
3115 if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
3118 WARN(obj && (!obj->stride || !obj->tiling_mode),
3119 "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
3120 obj->stride, obj->tiling_mode);
3122 switch (INTEL_INFO(dev)->gen) {
3127 case 4: i965_write_fence_reg(dev, reg, obj); break;
3128 case 3: i915_write_fence_reg(dev, reg, obj); break;
3129 case 2: i830_write_fence_reg(dev, reg, obj); break;
3133 /* And similarly be paranoid that no direct access to this region
3134 * is reordered to before the fence is installed.
3136 if (i915_gem_object_needs_mb(obj))
3140 static inline int fence_number(struct drm_i915_private *dev_priv,
3141 struct drm_i915_fence_reg *fence)
3143 return fence - dev_priv->fence_regs;
3146 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
3147 struct drm_i915_fence_reg *fence,
3150 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3151 int reg = fence_number(dev_priv, fence);
3153 i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
3156 obj->fence_reg = reg;
3158 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
3160 obj->fence_reg = I915_FENCE_REG_NONE;
3162 list_del_init(&fence->lru_list);
3164 obj->fence_dirty = false;
3168 i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
3170 if (obj->last_fenced_seqno) {
3171 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
3175 obj->last_fenced_seqno = 0;
3178 obj->fenced_gpu_access = false;
3183 i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
3185 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3186 struct drm_i915_fence_reg *fence;
3189 ret = i915_gem_object_wait_fence(obj);
3193 if (obj->fence_reg == I915_FENCE_REG_NONE)
3196 fence = &dev_priv->fence_regs[obj->fence_reg];
3198 if (WARN_ON(fence->pin_count))
3201 i915_gem_object_fence_lost(obj);
3202 i915_gem_object_update_fence(obj, fence, false);
3207 static struct drm_i915_fence_reg *
3208 i915_find_fence_reg(struct drm_device *dev)
3210 struct drm_i915_private *dev_priv = dev->dev_private;
3211 struct drm_i915_fence_reg *reg, *avail;
3214 /* First try to find a free reg */
3216 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
3217 reg = &dev_priv->fence_regs[i];
3221 if (!reg->pin_count)
3228 /* None available, try to steal one or wait for a user to finish */
3229 list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
3237 /* Wait for completion of pending flips which consume fences */
3238 if (intel_has_pending_fb_unpin(dev))
3239 return ERR_PTR(-EAGAIN);
3241 return ERR_PTR(-EDEADLK);
3245 * i915_gem_object_get_fence - set up fencing for an object
3246 * @obj: object to map through a fence reg
3248 * When mapping objects through the GTT, userspace wants to be able to write
3249 * to them without having to worry about swizzling if the object is tiled.
3250 * This function walks the fence regs looking for a free one for @obj,
3251 * stealing one if it can't find any.
3253 * It then sets up the reg based on the object's properties: address, pitch
3254 * and tiling format.
3256 * For an untiled surface, this removes any existing fence.
3259 i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
3261 struct drm_device *dev = obj->base.dev;
3262 struct drm_i915_private *dev_priv = dev->dev_private;
3263 bool enable = obj->tiling_mode != I915_TILING_NONE;
3264 struct drm_i915_fence_reg *reg;
3267 /* Have we updated the tiling parameters upon the object and so
3268 * will need to serialise the write to the associated fence register?
3270 if (obj->fence_dirty) {
3271 ret = i915_gem_object_wait_fence(obj);
3276 /* Just update our place in the LRU if our fence is getting reused. */
3277 if (obj->fence_reg != I915_FENCE_REG_NONE) {
3278 reg = &dev_priv->fence_regs[obj->fence_reg];
3279 if (!obj->fence_dirty) {
3280 list_move_tail(®->lru_list,
3281 &dev_priv->mm.fence_list);
3284 } else if (enable) {
3285 reg = i915_find_fence_reg(dev);
3287 return PTR_ERR(reg);
3290 struct drm_i915_gem_object *old = reg->obj;
3292 ret = i915_gem_object_wait_fence(old);
3296 i915_gem_object_fence_lost(old);
3301 i915_gem_object_update_fence(obj, reg, enable);
3306 static bool i915_gem_valid_gtt_space(struct drm_device *dev,
3307 struct drm_mm_node *gtt_space,
3308 unsigned long cache_level)
3310 struct drm_mm_node *other;
3312 /* On non-LLC machines we have to be careful when putting differing
3313 * types of snoopable memory together to avoid the prefetcher
3314 * crossing memory domains and dying.
3319 if (!drm_mm_node_allocated(gtt_space))
3322 if (list_empty(>t_space->node_list))
3325 other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
3326 if (other->allocated && !other->hole_follows && other->color != cache_level)
3329 other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
3330 if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
3336 static void i915_gem_verify_gtt(struct drm_device *dev)
3339 struct drm_i915_private *dev_priv = dev->dev_private;
3340 struct drm_i915_gem_object *obj;
3343 list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) {
3344 if (obj->gtt_space == NULL) {
3345 printk(KERN_ERR "object found on GTT list with no space reserved\n");
3350 if (obj->cache_level != obj->gtt_space->color) {
3351 printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
3352 i915_gem_obj_ggtt_offset(obj),
3353 i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
3355 obj->gtt_space->color);
3360 if (!i915_gem_valid_gtt_space(dev,
3362 obj->cache_level)) {
3363 printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
3364 i915_gem_obj_ggtt_offset(obj),
3365 i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
3377 * Finds free space in the GTT aperture and binds the object there.
3379 static struct i915_vma *
3380 i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3381 struct i915_address_space *vm,
3385 struct drm_device *dev = obj->base.dev;
3386 struct drm_i915_private *dev_priv = dev->dev_private;
3387 u32 size, fence_size, fence_alignment, unfenced_alignment;
3388 unsigned long start =
3389 flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
3391 flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
3392 struct i915_vma *vma;
3395 fence_size = i915_gem_get_gtt_size(dev,
3398 fence_alignment = i915_gem_get_gtt_alignment(dev,
3400 obj->tiling_mode, true);
3401 unfenced_alignment =
3402 i915_gem_get_gtt_alignment(dev,
3404 obj->tiling_mode, false);
3407 alignment = flags & PIN_MAPPABLE ? fence_alignment :
3409 if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
3410 DRM_DEBUG("Invalid object alignment requested %u\n", alignment);
3411 return ERR_PTR(-EINVAL);
3414 size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
3416 /* If the object is bigger than the entire aperture, reject it early
3417 * before evicting everything in a vain attempt to find space.
3419 if (obj->base.size > end) {
3420 DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%lu\n",
3422 flags & PIN_MAPPABLE ? "mappable" : "total",
3424 return ERR_PTR(-E2BIG);
3427 ret = i915_gem_object_get_pages(obj);
3429 return ERR_PTR(ret);
3431 i915_gem_object_pin_pages(obj);
3433 vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
3438 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
3442 DRM_MM_SEARCH_DEFAULT,
3443 DRM_MM_CREATE_DEFAULT);
3445 ret = i915_gem_evict_something(dev, vm, size, alignment,
3454 if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
3455 obj->cache_level))) {
3457 goto err_remove_node;
3460 ret = i915_gem_gtt_prepare_object(obj);
3462 goto err_remove_node;
3464 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
3465 list_add_tail(&vma->mm_list, &vm->inactive_list);
3467 if (i915_is_ggtt(vm)) {
3468 bool mappable, fenceable;
3470 fenceable = (vma->node.size == fence_size &&
3471 (vma->node.start & (fence_alignment - 1)) == 0);
3473 mappable = (vma->node.start + obj->base.size <=
3474 dev_priv->gtt.mappable_end);
3476 obj->map_and_fenceable = mappable && fenceable;
3479 WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
3481 trace_i915_vma_bind(vma, flags);
3482 vma->bind_vma(vma, obj->cache_level,
3483 flags & (PIN_MAPPABLE | PIN_GLOBAL) ? GLOBAL_BIND : 0);
3485 i915_gem_verify_gtt(dev);
3489 drm_mm_remove_node(&vma->node);
3491 i915_gem_vma_destroy(vma);
3494 i915_gem_object_unpin_pages(obj);
3499 i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3502 /* If we don't have a page list set up, then we're not pinned
3503 * to GPU, and we can ignore the cache flush because it'll happen
3504 * again at bind time.
3506 if (obj->pages == NULL)
3510 * Stolen memory is always coherent with the GPU as it is explicitly
3511 * marked as wc by the system, or the system is cache-coherent.
3516 /* If the GPU is snooping the contents of the CPU cache,
3517 * we do not need to manually clear the CPU cache lines. However,
3518 * the caches are only snooped when the render cache is
3519 * flushed/invalidated. As we always have to emit invalidations
3520 * and flushes when moving into and out of the RENDER domain, correct
3521 * snooping behaviour occurs naturally as the result of our domain
3524 if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
3527 trace_i915_gem_object_clflush(obj);
3528 drm_clflush_sg(obj->pages);
3533 /** Flushes the GTT write domain for the object if it's dirty. */
3535 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3537 uint32_t old_write_domain;
3539 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
3542 /* No actual flushing is required for the GTT write domain. Writes
3543 * to it immediately go to main memory as far as we know, so there's
3544 * no chipset flush. It also doesn't land in render cache.
3546 * However, we do have to enforce the order so that all writes through
3547 * the GTT land before any writes to the device, such as updates to
3552 old_write_domain = obj->base.write_domain;
3553 obj->base.write_domain = 0;
3555 intel_fb_obj_flush(obj, false);
3557 trace_i915_gem_object_change_domain(obj,
3558 obj->base.read_domains,
3562 /** Flushes the CPU write domain for the object if it's dirty. */
3564 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
3567 uint32_t old_write_domain;
3569 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3572 if (i915_gem_clflush_object(obj, force))
3573 i915_gem_chipset_flush(obj->base.dev);
3575 old_write_domain = obj->base.write_domain;
3576 obj->base.write_domain = 0;
3578 intel_fb_obj_flush(obj, false);
3580 trace_i915_gem_object_change_domain(obj,
3581 obj->base.read_domains,
3586 * Moves a single object to the GTT read, and possibly write domain.
3588 * This function returns when the move is complete, including waiting on
3592 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3594 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3595 uint32_t old_write_domain, old_read_domains;
3598 /* Not valid to be called on unbound objects. */
3599 if (!i915_gem_obj_bound_any(obj))
3602 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3605 ret = i915_gem_object_wait_rendering(obj, !write);
3609 i915_gem_object_retire(obj);
3610 i915_gem_object_flush_cpu_write_domain(obj, false);
3612 /* Serialise direct access to this object with the barriers for
3613 * coherent writes from the GPU, by effectively invalidating the
3614 * GTT domain upon first access.
3616 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3619 old_write_domain = obj->base.write_domain;
3620 old_read_domains = obj->base.read_domains;
3622 /* It should now be out of any other write domains, and we can update
3623 * the domain values for our changes.
3625 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3626 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3628 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3629 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3634 intel_fb_obj_invalidate(obj, NULL);
3636 trace_i915_gem_object_change_domain(obj,
3640 /* And bump the LRU for this access */
3641 if (i915_gem_object_is_inactive(obj)) {
3642 struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
3644 list_move_tail(&vma->mm_list,
3645 &dev_priv->gtt.base.inactive_list);
3652 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3653 enum i915_cache_level cache_level)
3655 struct drm_device *dev = obj->base.dev;
3656 struct i915_vma *vma, *next;
3659 if (obj->cache_level == cache_level)
3662 if (i915_gem_obj_is_pinned(obj)) {
3663 DRM_DEBUG("can not change the cache level of pinned objects\n");
3667 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
3668 if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
3669 ret = i915_vma_unbind(vma);
3675 if (i915_gem_obj_bound_any(obj)) {
3676 ret = i915_gem_object_finish_gpu(obj);
3680 i915_gem_object_finish_gtt(obj);
3682 /* Before SandyBridge, you could not use tiling or fence
3683 * registers with snooped memory, so relinquish any fences
3684 * currently pointing to our region in the aperture.
3686 if (INTEL_INFO(dev)->gen < 6) {
3687 ret = i915_gem_object_put_fence(obj);
3692 list_for_each_entry(vma, &obj->vma_list, vma_link)
3693 if (drm_mm_node_allocated(&vma->node))
3694 vma->bind_vma(vma, cache_level,
3695 obj->has_global_gtt_mapping ? GLOBAL_BIND : 0);
3698 list_for_each_entry(vma, &obj->vma_list, vma_link)
3699 vma->node.color = cache_level;
3700 obj->cache_level = cache_level;
3702 if (cpu_write_needs_clflush(obj)) {
3703 u32 old_read_domains, old_write_domain;
3705 /* If we're coming from LLC cached, then we haven't
3706 * actually been tracking whether the data is in the
3707 * CPU cache or not, since we only allow one bit set
3708 * in obj->write_domain and have been skipping the clflushes.
3709 * Just set it to the CPU cache for now.
3711 i915_gem_object_retire(obj);
3712 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
3714 old_read_domains = obj->base.read_domains;
3715 old_write_domain = obj->base.write_domain;
3717 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3718 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3720 trace_i915_gem_object_change_domain(obj,
3725 i915_gem_verify_gtt(dev);
3729 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3730 struct drm_file *file)
3732 struct drm_i915_gem_caching *args = data;
3733 struct drm_i915_gem_object *obj;
3736 ret = i915_mutex_lock_interruptible(dev);
3740 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3741 if (&obj->base == NULL) {
3746 switch (obj->cache_level) {
3747 case I915_CACHE_LLC:
3748 case I915_CACHE_L3_LLC:
3749 args->caching = I915_CACHING_CACHED;
3753 args->caching = I915_CACHING_DISPLAY;
3757 args->caching = I915_CACHING_NONE;
3761 drm_gem_object_unreference(&obj->base);
3763 mutex_unlock(&dev->struct_mutex);
3767 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3768 struct drm_file *file)
3770 struct drm_i915_gem_caching *args = data;
3771 struct drm_i915_gem_object *obj;
3772 enum i915_cache_level level;
3775 switch (args->caching) {
3776 case I915_CACHING_NONE:
3777 level = I915_CACHE_NONE;
3779 case I915_CACHING_CACHED:
3780 level = I915_CACHE_LLC;
3782 case I915_CACHING_DISPLAY:
3783 level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
3789 ret = i915_mutex_lock_interruptible(dev);
3793 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3794 if (&obj->base == NULL) {
3799 ret = i915_gem_object_set_cache_level(obj, level);
3801 drm_gem_object_unreference(&obj->base);
3803 mutex_unlock(&dev->struct_mutex);
3807 static bool is_pin_display(struct drm_i915_gem_object *obj)
3809 struct i915_vma *vma;
3811 if (list_empty(&obj->vma_list))
3814 vma = i915_gem_obj_to_ggtt(obj);
3818 /* There are 3 sources that pin objects:
3819 * 1. The display engine (scanouts, sprites, cursors);
3820 * 2. Reservations for execbuffer;
3823 * We can ignore reservations as we hold the struct_mutex and
3824 * are only called outside of the reservation path. The user
3825 * can only increment pin_count once, and so if after
3826 * subtracting the potential reference by the user, any pin_count
3827 * remains, it must be due to another use by the display engine.
3829 return vma->pin_count - !!obj->user_pin_count;
3833 * Prepare buffer for display plane (scanout, cursors, etc).
3834 * Can be called from an uninterruptible phase (modesetting) and allows
3835 * any flushes to be pipelined (for pageflips).
3838 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3840 struct intel_engine_cs *pipelined)
3842 u32 old_read_domains, old_write_domain;
3843 bool was_pin_display;
3846 if (pipelined != obj->ring) {
3847 ret = i915_gem_object_sync(obj, pipelined);
3852 /* Mark the pin_display early so that we account for the
3853 * display coherency whilst setting up the cache domains.
3855 was_pin_display = obj->pin_display;
3856 obj->pin_display = true;
3858 /* The display engine is not coherent with the LLC cache on gen6. As
3859 * a result, we make sure that the pinning that is about to occur is
3860 * done with uncached PTEs. This is lowest common denominator for all
3863 * However for gen6+, we could do better by using the GFDT bit instead
3864 * of uncaching, which would allow us to flush all the LLC-cached data
3865 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3867 ret = i915_gem_object_set_cache_level(obj,
3868 HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
3870 goto err_unpin_display;
3872 /* As the user may map the buffer once pinned in the display plane
3873 * (e.g. libkms for the bootup splash), we have to ensure that we
3874 * always use map_and_fenceable for all scanout buffers.
3876 ret = i915_gem_obj_ggtt_pin(obj, alignment, PIN_MAPPABLE);
3878 goto err_unpin_display;
3880 i915_gem_object_flush_cpu_write_domain(obj, true);
3882 old_write_domain = obj->base.write_domain;
3883 old_read_domains = obj->base.read_domains;
3885 /* It should now be out of any other write domains, and we can update
3886 * the domain values for our changes.
3888 obj->base.write_domain = 0;
3889 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3891 trace_i915_gem_object_change_domain(obj,
3898 WARN_ON(was_pin_display != is_pin_display(obj));
3899 obj->pin_display = was_pin_display;
3904 i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
3906 i915_gem_object_ggtt_unpin(obj);
3907 obj->pin_display = is_pin_display(obj);
3911 i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
3915 if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
3918 ret = i915_gem_object_wait_rendering(obj, false);
3922 /* Ensure that we invalidate the GPU's caches and TLBs. */
3923 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
3928 * Moves a single object to the CPU read, and possibly write domain.
3930 * This function returns when the move is complete, including waiting on
3934 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3936 uint32_t old_write_domain, old_read_domains;
3939 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3942 ret = i915_gem_object_wait_rendering(obj, !write);
3946 i915_gem_object_retire(obj);
3947 i915_gem_object_flush_gtt_write_domain(obj);
3949 old_write_domain = obj->base.write_domain;
3950 old_read_domains = obj->base.read_domains;
3952 /* Flush the CPU cache if it's still invalid. */
3953 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3954 i915_gem_clflush_object(obj, false);
3956 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3959 /* It should now be out of any other write domains, and we can update
3960 * the domain values for our changes.
3962 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3964 /* If we're writing through the CPU, then the GPU read domains will
3965 * need to be invalidated at next use.
3968 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3969 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3973 intel_fb_obj_invalidate(obj, NULL);
3975 trace_i915_gem_object_change_domain(obj,
3982 /* Throttle our rendering by waiting until the ring has completed our requests
3983 * emitted over 20 msec ago.
3985 * Note that if we were to use the current jiffies each time around the loop,
3986 * we wouldn't escape the function with any frames outstanding if the time to
3987 * render a frame was over 20ms.
3989 * This should get us reasonable parallelism between CPU and GPU but also
3990 * relatively low latency when blocking on a particular request to finish.
3993 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3995 struct drm_i915_private *dev_priv = dev->dev_private;
3996 struct drm_i915_file_private *file_priv = file->driver_priv;
3997 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
3998 struct drm_i915_gem_request *request;
3999 struct intel_engine_cs *ring = NULL;
4000 unsigned reset_counter;
4004 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
4008 ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
4012 spin_lock(&file_priv->mm.lock);
4013 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
4014 if (time_after_eq(request->emitted_jiffies, recent_enough))
4017 ring = request->ring;
4018 seqno = request->seqno;
4020 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
4021 spin_unlock(&file_priv->mm.lock);
4026 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
4028 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
4034 i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
4036 struct drm_i915_gem_object *obj = vma->obj;
4039 vma->node.start & (alignment - 1))
4042 if (flags & PIN_MAPPABLE && !obj->map_and_fenceable)
4045 if (flags & PIN_OFFSET_BIAS &&
4046 vma->node.start < (flags & PIN_OFFSET_MASK))
4053 i915_gem_object_pin(struct drm_i915_gem_object *obj,
4054 struct i915_address_space *vm,
4058 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
4059 struct i915_vma *vma;
4062 if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base))
4065 if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
4068 vma = i915_gem_obj_to_vma(obj, vm);
4070 if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
4073 if (i915_vma_misplaced(vma, alignment, flags)) {
4074 WARN(vma->pin_count,
4075 "bo is already pinned with incorrect alignment:"
4076 " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
4077 " obj->map_and_fenceable=%d\n",
4078 i915_gem_obj_offset(obj, vm), alignment,
4079 !!(flags & PIN_MAPPABLE),
4080 obj->map_and_fenceable);
4081 ret = i915_vma_unbind(vma);
4089 if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
4090 vma = i915_gem_object_bind_to_vm(obj, vm, alignment, flags);
4092 return PTR_ERR(vma);
4095 if (flags & PIN_GLOBAL && !obj->has_global_gtt_mapping)
4096 vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
4099 if (flags & PIN_MAPPABLE)
4100 obj->pin_mappable |= true;
4106 i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
4108 struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
4111 BUG_ON(vma->pin_count == 0);
4112 BUG_ON(!i915_gem_obj_ggtt_bound(obj));
4114 if (--vma->pin_count == 0)
4115 obj->pin_mappable = false;
4119 i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
4121 if (obj->fence_reg != I915_FENCE_REG_NONE) {
4122 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
4123 struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);
4125 WARN_ON(!ggtt_vma ||
4126 dev_priv->fence_regs[obj->fence_reg].pin_count >
4127 ggtt_vma->pin_count);
4128 dev_priv->fence_regs[obj->fence_reg].pin_count++;
4135 i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
4137 if (obj->fence_reg != I915_FENCE_REG_NONE) {
4138 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
4139 WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
4140 dev_priv->fence_regs[obj->fence_reg].pin_count--;
4145 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
4146 struct drm_file *file)
4148 struct drm_i915_gem_pin *args = data;
4149 struct drm_i915_gem_object *obj;
4152 if (INTEL_INFO(dev)->gen >= 6)
4155 ret = i915_mutex_lock_interruptible(dev);
4159 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
4160 if (&obj->base == NULL) {
4165 if (obj->madv != I915_MADV_WILLNEED) {
4166 DRM_DEBUG("Attempting to pin a purgeable buffer\n");
4171 if (obj->pin_filp != NULL && obj->pin_filp != file) {
4172 DRM_DEBUG("Already pinned in i915_gem_pin_ioctl(): %d\n",
4178 if (obj->user_pin_count == ULONG_MAX) {
4183 if (obj->user_pin_count == 0) {
4184 ret = i915_gem_obj_ggtt_pin(obj, args->alignment, PIN_MAPPABLE);
4189 obj->user_pin_count++;
4190 obj->pin_filp = file;
4192 args->offset = i915_gem_obj_ggtt_offset(obj);
4194 drm_gem_object_unreference(&obj->base);
4196 mutex_unlock(&dev->struct_mutex);
4201 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
4202 struct drm_file *file)
4204 struct drm_i915_gem_pin *args = data;
4205 struct drm_i915_gem_object *obj;
4208 ret = i915_mutex_lock_interruptible(dev);
4212 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
4213 if (&obj->base == NULL) {
4218 if (obj->pin_filp != file) {
4219 DRM_DEBUG("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
4224 obj->user_pin_count--;
4225 if (obj->user_pin_count == 0) {
4226 obj->pin_filp = NULL;
4227 i915_gem_object_ggtt_unpin(obj);
4231 drm_gem_object_unreference(&obj->base);
4233 mutex_unlock(&dev->struct_mutex);
4238 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4239 struct drm_file *file)
4241 struct drm_i915_gem_busy *args = data;
4242 struct drm_i915_gem_object *obj;
4245 ret = i915_mutex_lock_interruptible(dev);
4249 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
4250 if (&obj->base == NULL) {
4255 /* Count all active objects as busy, even if they are currently not used
4256 * by the gpu. Users of this interface expect objects to eventually
4257 * become non-busy without any further actions, therefore emit any
4258 * necessary flushes here.
4260 ret = i915_gem_object_flush_active(obj);
4262 args->busy = obj->active;
4264 BUILD_BUG_ON(I915_NUM_RINGS > 16);
4265 args->busy |= intel_ring_flag(obj->ring) << 16;
4268 drm_gem_object_unreference(&obj->base);
4270 mutex_unlock(&dev->struct_mutex);
4275 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4276 struct drm_file *file_priv)
4278 return i915_gem_ring_throttle(dev, file_priv);
4282 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4283 struct drm_file *file_priv)
4285 struct drm_i915_gem_madvise *args = data;
4286 struct drm_i915_gem_object *obj;
4289 switch (args->madv) {
4290 case I915_MADV_DONTNEED:
4291 case I915_MADV_WILLNEED:
4297 ret = i915_mutex_lock_interruptible(dev);
4301 obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
4302 if (&obj->base == NULL) {
4307 if (i915_gem_obj_is_pinned(obj)) {
4312 if (obj->madv != __I915_MADV_PURGED)
4313 obj->madv = args->madv;
4315 /* if the object is no longer attached, discard its backing storage */
4316 if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
4317 i915_gem_object_truncate(obj);
4319 args->retained = obj->madv != __I915_MADV_PURGED;
4322 drm_gem_object_unreference(&obj->base);
4324 mutex_unlock(&dev->struct_mutex);
4328 void i915_gem_object_init(struct drm_i915_gem_object *obj,
4329 const struct drm_i915_gem_object_ops *ops)
4331 INIT_LIST_HEAD(&obj->global_list);
4332 INIT_LIST_HEAD(&obj->ring_list);
4333 INIT_LIST_HEAD(&obj->obj_exec_link);
4334 INIT_LIST_HEAD(&obj->vma_list);
4338 obj->fence_reg = I915_FENCE_REG_NONE;
4339 obj->madv = I915_MADV_WILLNEED;
4340 /* Avoid an unnecessary call to unbind on the first bind. */
4341 obj->map_and_fenceable = true;
4343 i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
4346 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4347 .get_pages = i915_gem_object_get_pages_gtt,
4348 .put_pages = i915_gem_object_put_pages_gtt,
4351 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
4354 struct drm_i915_gem_object *obj;
4355 struct address_space *mapping;
4358 obj = i915_gem_object_alloc(dev);
4362 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
4363 i915_gem_object_free(obj);
4367 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4368 if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
4369 /* 965gm cannot relocate objects above 4GiB. */
4370 mask &= ~__GFP_HIGHMEM;
4371 mask |= __GFP_DMA32;
4374 mapping = file_inode(obj->base.filp)->i_mapping;
4375 mapping_set_gfp_mask(mapping, mask);
4377 i915_gem_object_init(obj, &i915_gem_object_ops);
4379 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4380 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4383 /* On some devices, we can have the GPU use the LLC (the CPU
4384 * cache) for about a 10% performance improvement
4385 * compared to uncached. Graphics requests other than
4386 * display scanout are coherent with the CPU in
4387 * accessing this cache. This means in this mode we
4388 * don't need to clflush on the CPU side, and on the
4389 * GPU side we only need to flush internal caches to
4390 * get data visible to the CPU.
4392 * However, we maintain the display planes as UC, and so
4393 * need to rebind when first used as such.
4395 obj->cache_level = I915_CACHE_LLC;
4397 obj->cache_level = I915_CACHE_NONE;
4399 trace_i915_gem_object_create(obj);
4404 static bool discard_backing_storage(struct drm_i915_gem_object *obj)
4406 /* If we are the last user of the backing storage (be it shmemfs
4407 * pages or stolen etc), we know that the pages are going to be
4408 * immediately released. In this case, we can then skip copying
4409 * back the contents from the GPU.
4412 if (obj->madv != I915_MADV_WILLNEED)
4415 if (obj->base.filp == NULL)
4418 /* At first glance, this looks racy, but then again so would be
4419 * userspace racing mmap against close. However, the first external
4420 * reference to the filp can only be obtained through the
4421 * i915_gem_mmap_ioctl() which safeguards us against the user
4422 * acquiring such a reference whilst we are in the middle of
4423 * freeing the object.
4425 return atomic_long_read(&obj->base.filp->f_count) == 1;
4428 void i915_gem_free_object(struct drm_gem_object *gem_obj)
4430 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4431 struct drm_device *dev = obj->base.dev;
4432 struct drm_i915_private *dev_priv = dev->dev_private;
4433 struct i915_vma *vma, *next;
4435 intel_runtime_pm_get(dev_priv);
4437 trace_i915_gem_object_destroy(obj);
4439 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
4443 ret = i915_vma_unbind(vma);
4444 if (WARN_ON(ret == -ERESTARTSYS)) {
4445 bool was_interruptible;
4447 was_interruptible = dev_priv->mm.interruptible;
4448 dev_priv->mm.interruptible = false;
4450 WARN_ON(i915_vma_unbind(vma));
4452 dev_priv->mm.interruptible = was_interruptible;
4456 i915_gem_object_detach_phys(obj);
4458 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4459 * before progressing. */
4461 i915_gem_object_unpin_pages(obj);
4463 WARN_ON(obj->frontbuffer_bits);
4465 if (WARN_ON(obj->pages_pin_count))
4466 obj->pages_pin_count = 0;
4467 if (discard_backing_storage(obj))
4468 obj->madv = I915_MADV_DONTNEED;
4469 i915_gem_object_put_pages(obj);
4470 i915_gem_object_free_mmap_offset(obj);
4474 if (obj->base.import_attach)
4475 drm_prime_gem_destroy(&obj->base, NULL);
4477 if (obj->ops->release)
4478 obj->ops->release(obj);
4480 drm_gem_object_release(&obj->base);
4481 i915_gem_info_remove_obj(dev_priv, obj->base.size);
4484 i915_gem_object_free(obj);
4486 intel_runtime_pm_put(dev_priv);
4489 struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
4490 struct i915_address_space *vm)
4492 struct i915_vma *vma;
4493 list_for_each_entry(vma, &obj->vma_list, vma_link)
4500 void i915_gem_vma_destroy(struct i915_vma *vma)
4502 WARN_ON(vma->node.allocated);
4504 /* Keep the vma as a placeholder in the execbuffer reservation lists */
4505 if (!list_empty(&vma->exec_list))
4508 list_del(&vma->vma_link);
4514 i915_gem_stop_ringbuffers(struct drm_device *dev)
4516 struct drm_i915_private *dev_priv = dev->dev_private;
4517 struct intel_engine_cs *ring;
4520 for_each_ring(ring, dev_priv, i)
4521 intel_stop_ring_buffer(ring);
4525 i915_gem_suspend(struct drm_device *dev)
4527 struct drm_i915_private *dev_priv = dev->dev_private;
4530 mutex_lock(&dev->struct_mutex);
4531 if (dev_priv->ums.mm_suspended)
4534 ret = i915_gpu_idle(dev);
4538 i915_gem_retire_requests(dev);
4540 /* Under UMS, be paranoid and evict. */
4541 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4542 i915_gem_evict_everything(dev);
4544 i915_kernel_lost_context(dev);
4545 i915_gem_stop_ringbuffers(dev);
4547 /* Hack! Don't let anybody do execbuf while we don't control the chip.
4548 * We need to replace this with a semaphore, or something.
4549 * And not confound ums.mm_suspended!
4551 dev_priv->ums.mm_suspended = !drm_core_check_feature(dev,
4553 mutex_unlock(&dev->struct_mutex);
4555 del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
4556 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4557 cancel_delayed_work_sync(&dev_priv->mm.idle_work);
4562 mutex_unlock(&dev->struct_mutex);
4566 int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice)
4568 struct drm_device *dev = ring->dev;
4569 struct drm_i915_private *dev_priv = dev->dev_private;
4570 u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
4571 u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
4574 if (!HAS_L3_DPF(dev) || !remap_info)
4577 ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3);
4582 * Note: We do not worry about the concurrent register cacheline hang
4583 * here because no other code should access these registers other than
4584 * at initialization time.
4586 for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
4587 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
4588 intel_ring_emit(ring, reg_base + i);
4589 intel_ring_emit(ring, remap_info[i/4]);
4592 intel_ring_advance(ring);
4597 void i915_gem_init_swizzling(struct drm_device *dev)
4599 struct drm_i915_private *dev_priv = dev->dev_private;
4601 if (INTEL_INFO(dev)->gen < 5 ||
4602 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4605 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4606 DISP_TILE_SURFACE_SWIZZLING);
4611 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4613 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4614 else if (IS_GEN7(dev))
4615 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
4616 else if (IS_GEN8(dev))
4617 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
4623 intel_enable_blt(struct drm_device *dev)
4628 /* The blitter was dysfunctional on early prototypes */
4629 if (IS_GEN6(dev) && dev->pdev->revision < 8) {
4630 DRM_INFO("BLT not supported on this pre-production hardware;"
4631 " graphics performance will be degraded.\n");
4638 static int i915_gem_init_rings(struct drm_device *dev)
4640 struct drm_i915_private *dev_priv = dev->dev_private;
4643 ret = intel_init_render_ring_buffer(dev);
4648 ret = intel_init_bsd_ring_buffer(dev);
4650 goto cleanup_render_ring;
4653 if (intel_enable_blt(dev)) {
4654 ret = intel_init_blt_ring_buffer(dev);
4656 goto cleanup_bsd_ring;
4659 if (HAS_VEBOX(dev)) {
4660 ret = intel_init_vebox_ring_buffer(dev);
4662 goto cleanup_blt_ring;
4665 if (HAS_BSD2(dev)) {
4666 ret = intel_init_bsd2_ring_buffer(dev);
4668 goto cleanup_vebox_ring;
4671 ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
4673 goto cleanup_bsd2_ring;
4678 intel_cleanup_ring_buffer(&dev_priv->ring[VCS2]);
4680 intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
4682 intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
4684 intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
4685 cleanup_render_ring:
4686 intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
4692 i915_gem_init_hw(struct drm_device *dev)
4694 struct drm_i915_private *dev_priv = dev->dev_private;
4697 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
4700 if (dev_priv->ellc_size)
4701 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4703 if (IS_HASWELL(dev))
4704 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
4705 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
4707 if (HAS_PCH_NOP(dev)) {
4708 if (IS_IVYBRIDGE(dev)) {
4709 u32 temp = I915_READ(GEN7_MSG_CTL);
4710 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4711 I915_WRITE(GEN7_MSG_CTL, temp);
4712 } else if (INTEL_INFO(dev)->gen >= 7) {
4713 u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
4714 temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
4715 I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
4719 i915_gem_init_swizzling(dev);
4721 ret = i915_gem_init_rings(dev);
4725 for (i = 0; i < NUM_L3_SLICES(dev); i++)
4726 i915_gem_l3_remap(&dev_priv->ring[RCS], i);
4729 * XXX: Contexts should only be initialized once. Doing a switch to the
4730 * default context switch however is something we'd like to do after
4731 * reset or thaw (the latter may not actually be necessary for HW, but
4732 * goes with our code better). Context switching requires rings (for
4733 * the do_switch), but before enabling PPGTT. So don't move this.
4735 ret = i915_gem_context_enable(dev_priv);
4736 if (ret && ret != -EIO) {
4737 DRM_ERROR("Context enable failed %d\n", ret);
4738 i915_gem_cleanup_ringbuffer(dev);
4744 int i915_gem_init(struct drm_device *dev)
4746 struct drm_i915_private *dev_priv = dev->dev_private;
4749 mutex_lock(&dev->struct_mutex);
4751 if (IS_VALLEYVIEW(dev)) {
4752 /* VLVA0 (potential hack), BIOS isn't actually waking us */
4753 I915_WRITE(VLV_GTLC_WAKE_CTRL, VLV_GTLC_ALLOWWAKEREQ);
4754 if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) &
4755 VLV_GTLC_ALLOWWAKEACK), 10))
4756 DRM_DEBUG_DRIVER("allow wake ack timed out\n");
4759 i915_gem_init_userptr(dev);
4760 i915_gem_init_global_gtt(dev);
4762 ret = i915_gem_context_init(dev);
4764 mutex_unlock(&dev->struct_mutex);
4768 ret = i915_gem_init_hw(dev);
4770 /* Allow ring initialisation to fail by marking the GPU as
4771 * wedged. But we only want to do this where the GPU is angry,
4772 * for all other failure, such as an allocation failure, bail.
4774 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
4775 atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
4778 mutex_unlock(&dev->struct_mutex);
4780 /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
4781 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4782 dev_priv->dri1.allow_batchbuffer = 1;
4787 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4789 struct drm_i915_private *dev_priv = dev->dev_private;
4790 struct intel_engine_cs *ring;
4793 for_each_ring(ring, dev_priv, i)
4794 intel_cleanup_ring_buffer(ring);
4798 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4799 struct drm_file *file_priv)
4801 struct drm_i915_private *dev_priv = dev->dev_private;
4804 if (drm_core_check_feature(dev, DRIVER_MODESET))
4807 if (i915_reset_in_progress(&dev_priv->gpu_error)) {
4808 DRM_ERROR("Reenabling wedged hardware, good luck\n");
4809 atomic_set(&dev_priv->gpu_error.reset_counter, 0);
4812 mutex_lock(&dev->struct_mutex);
4813 dev_priv->ums.mm_suspended = 0;
4815 ret = i915_gem_init_hw(dev);
4817 mutex_unlock(&dev->struct_mutex);
4821 BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
4823 ret = drm_irq_install(dev, dev->pdev->irq);
4825 goto cleanup_ringbuffer;
4826 mutex_unlock(&dev->struct_mutex);
4831 i915_gem_cleanup_ringbuffer(dev);
4832 dev_priv->ums.mm_suspended = 1;
4833 mutex_unlock(&dev->struct_mutex);
4839 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4840 struct drm_file *file_priv)
4842 if (drm_core_check_feature(dev, DRIVER_MODESET))
4845 mutex_lock(&dev->struct_mutex);
4846 drm_irq_uninstall(dev);
4847 mutex_unlock(&dev->struct_mutex);
4849 return i915_gem_suspend(dev);
4853 i915_gem_lastclose(struct drm_device *dev)
4857 if (drm_core_check_feature(dev, DRIVER_MODESET))
4860 ret = i915_gem_suspend(dev);
4862 DRM_ERROR("failed to idle hardware: %d\n", ret);
4866 init_ring_lists(struct intel_engine_cs *ring)
4868 INIT_LIST_HEAD(&ring->active_list);
4869 INIT_LIST_HEAD(&ring->request_list);
4872 void i915_init_vm(struct drm_i915_private *dev_priv,
4873 struct i915_address_space *vm)
4875 if (!i915_is_ggtt(vm))
4876 drm_mm_init(&vm->mm, vm->start, vm->total);
4877 vm->dev = dev_priv->dev;
4878 INIT_LIST_HEAD(&vm->active_list);
4879 INIT_LIST_HEAD(&vm->inactive_list);
4880 INIT_LIST_HEAD(&vm->global_link);
4881 list_add_tail(&vm->global_link, &dev_priv->vm_list);
4885 i915_gem_load(struct drm_device *dev)
4887 struct drm_i915_private *dev_priv = dev->dev_private;
4891 kmem_cache_create("i915_gem_object",
4892 sizeof(struct drm_i915_gem_object), 0,
4896 INIT_LIST_HEAD(&dev_priv->vm_list);
4897 i915_init_vm(dev_priv, &dev_priv->gtt.base);
4899 INIT_LIST_HEAD(&dev_priv->context_list);
4900 INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4901 INIT_LIST_HEAD(&dev_priv->mm.bound_list);
4902 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4903 for (i = 0; i < I915_NUM_RINGS; i++)
4904 init_ring_lists(&dev_priv->ring[i]);
4905 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
4906 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
4907 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4908 i915_gem_retire_work_handler);
4909 INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
4910 i915_gem_idle_work_handler);
4911 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
4913 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4914 if (!drm_core_check_feature(dev, DRIVER_MODESET) && IS_GEN3(dev)) {
4915 I915_WRITE(MI_ARB_STATE,
4916 _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
4919 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4921 /* Old X drivers will take 0-2 for front, back, depth buffers */
4922 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4923 dev_priv->fence_reg_start = 3;
4925 if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
4926 dev_priv->num_fence_regs = 32;
4927 else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4928 dev_priv->num_fence_regs = 16;
4930 dev_priv->num_fence_regs = 8;
4932 /* Initialize fence registers to zero */
4933 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4934 i915_gem_restore_fences(dev);
4936 i915_gem_detect_bit_6_swizzle(dev);
4937 init_waitqueue_head(&dev_priv->pending_flip_queue);
4939 dev_priv->mm.interruptible = true;
4941 dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
4942 dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
4943 dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
4944 register_shrinker(&dev_priv->mm.shrinker);
4946 dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
4947 register_oom_notifier(&dev_priv->mm.oom_notifier);
4949 mutex_init(&dev_priv->fb_tracking.lock);
4952 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4954 struct drm_i915_file_private *file_priv = file->driver_priv;
4956 cancel_delayed_work_sync(&file_priv->mm.idle_work);
4958 /* Clean up our request list when the client is going away, so that
4959 * later retire_requests won't dereference our soon-to-be-gone
4962 spin_lock(&file_priv->mm.lock);
4963 while (!list_empty(&file_priv->mm.request_list)) {
4964 struct drm_i915_gem_request *request;
4966 request = list_first_entry(&file_priv->mm.request_list,
4967 struct drm_i915_gem_request,
4969 list_del(&request->client_list);
4970 request->file_priv = NULL;
4972 spin_unlock(&file_priv->mm.lock);
4976 i915_gem_file_idle_work_handler(struct work_struct *work)
4978 struct drm_i915_file_private *file_priv =
4979 container_of(work, typeof(*file_priv), mm.idle_work.work);
4981 atomic_set(&file_priv->rps_wait_boost, false);
4984 int i915_gem_open(struct drm_device *dev, struct drm_file *file)
4986 struct drm_i915_file_private *file_priv;
4989 DRM_DEBUG_DRIVER("\n");
4991 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
4995 file->driver_priv = file_priv;
4996 file_priv->dev_priv = dev->dev_private;
4997 file_priv->file = file;
4999 spin_lock_init(&file_priv->mm.lock);
5000 INIT_LIST_HEAD(&file_priv->mm.request_list);
5001 INIT_DELAYED_WORK(&file_priv->mm.idle_work,
5002 i915_gem_file_idle_work_handler);
5004 ret = i915_gem_context_open(dev, file);
5011 void i915_gem_track_fb(struct drm_i915_gem_object *old,
5012 struct drm_i915_gem_object *new,
5013 unsigned frontbuffer_bits)
5016 WARN_ON(!mutex_is_locked(&old->base.dev->struct_mutex));
5017 WARN_ON(!(old->frontbuffer_bits & frontbuffer_bits));
5018 old->frontbuffer_bits &= ~frontbuffer_bits;
5022 WARN_ON(!mutex_is_locked(&new->base.dev->struct_mutex));
5023 WARN_ON(new->frontbuffer_bits & frontbuffer_bits);
5024 new->frontbuffer_bits |= frontbuffer_bits;
5028 static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
5030 if (!mutex_is_locked(mutex))
5033 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
5034 return mutex->owner == task;
5036 /* Since UP may be pre-empted, we cannot assume that we own the lock */
5041 static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
5043 if (!mutex_trylock(&dev->struct_mutex)) {
5044 if (!mutex_is_locked_by(&dev->struct_mutex, current))
5047 if (to_i915(dev)->mm.shrinker_no_lock_stealing)
5057 static int num_vma_bound(struct drm_i915_gem_object *obj)
5059 struct i915_vma *vma;
5062 list_for_each_entry(vma, &obj->vma_list, vma_link)
5063 if (drm_mm_node_allocated(&vma->node))
5069 static unsigned long
5070 i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
5072 struct drm_i915_private *dev_priv =
5073 container_of(shrinker, struct drm_i915_private, mm.shrinker);
5074 struct drm_device *dev = dev_priv->dev;
5075 struct drm_i915_gem_object *obj;
5076 unsigned long count;
5079 if (!i915_gem_shrinker_lock(dev, &unlock))
5083 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
5084 if (obj->pages_pin_count == 0)
5085 count += obj->base.size >> PAGE_SHIFT;
5087 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
5088 if (!i915_gem_obj_is_pinned(obj) &&
5089 obj->pages_pin_count == num_vma_bound(obj))
5090 count += obj->base.size >> PAGE_SHIFT;
5094 mutex_unlock(&dev->struct_mutex);
5099 /* All the new VM stuff */
5100 unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
5101 struct i915_address_space *vm)
5103 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5104 struct i915_vma *vma;
5106 if (!dev_priv->mm.aliasing_ppgtt ||
5107 vm == &dev_priv->mm.aliasing_ppgtt->base)
5108 vm = &dev_priv->gtt.base;
5110 list_for_each_entry(vma, &o->vma_list, vma_link) {
5112 return vma->node.start;
5115 WARN(1, "%s vma for this object not found.\n",
5116 i915_is_ggtt(vm) ? "global" : "ppgtt");
5120 bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
5121 struct i915_address_space *vm)
5123 struct i915_vma *vma;
5125 list_for_each_entry(vma, &o->vma_list, vma_link)
5126 if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
5132 bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
5134 struct i915_vma *vma;
5136 list_for_each_entry(vma, &o->vma_list, vma_link)
5137 if (drm_mm_node_allocated(&vma->node))
5143 unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
5144 struct i915_address_space *vm)
5146 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5147 struct i915_vma *vma;
5149 if (!dev_priv->mm.aliasing_ppgtt ||
5150 vm == &dev_priv->mm.aliasing_ppgtt->base)
5151 vm = &dev_priv->gtt.base;
5153 BUG_ON(list_empty(&o->vma_list));
5155 list_for_each_entry(vma, &o->vma_list, vma_link)
5157 return vma->node.size;
5162 static unsigned long
5163 i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
5165 struct drm_i915_private *dev_priv =
5166 container_of(shrinker, struct drm_i915_private, mm.shrinker);
5167 struct drm_device *dev = dev_priv->dev;
5168 unsigned long freed;
5171 if (!i915_gem_shrinker_lock(dev, &unlock))
5174 freed = i915_gem_purge(dev_priv, sc->nr_to_scan);
5175 if (freed < sc->nr_to_scan)
5176 freed += __i915_gem_shrink(dev_priv,
5177 sc->nr_to_scan - freed,
5180 mutex_unlock(&dev->struct_mutex);
5186 i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
5188 struct drm_i915_private *dev_priv =
5189 container_of(nb, struct drm_i915_private, mm.oom_notifier);
5190 struct drm_device *dev = dev_priv->dev;
5191 struct drm_i915_gem_object *obj;
5192 unsigned long timeout = msecs_to_jiffies(5000) + 1;
5193 unsigned long pinned, bound, unbound, freed;
5194 bool was_interruptible;
5197 while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout) {
5198 schedule_timeout_killable(1);
5199 if (fatal_signal_pending(current))
5203 pr_err("Unable to purge GPU memory due lock contention.\n");
5207 was_interruptible = dev_priv->mm.interruptible;
5208 dev_priv->mm.interruptible = false;
5210 freed = i915_gem_shrink_all(dev_priv);
5212 dev_priv->mm.interruptible = was_interruptible;
5214 /* Because we may be allocating inside our own driver, we cannot
5215 * assert that there are no objects with pinned pages that are not
5216 * being pointed to by hardware.
5218 unbound = bound = pinned = 0;
5219 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
5220 if (!obj->base.filp) /* not backed by a freeable object */
5223 if (obj->pages_pin_count)
5224 pinned += obj->base.size;
5226 unbound += obj->base.size;
5228 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
5229 if (!obj->base.filp)
5232 if (obj->pages_pin_count)
5233 pinned += obj->base.size;
5235 bound += obj->base.size;
5239 mutex_unlock(&dev->struct_mutex);
5241 pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n",
5243 if (unbound || bound)
5244 pr_err("%lu and %lu bytes still available in the "
5245 "bound and unbound GPU page lists.\n",
5248 *(unsigned long *)ptr += freed;
5252 struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
5254 struct i915_vma *vma;
5256 /* This WARN has probably outlived its usefulness (callers already
5257 * WARN if they don't find the GGTT vma they expect). When removing,
5258 * remember to remove the pre-check in is_pin_display() as well */
5259 if (WARN_ON(list_empty(&obj->vma_list)))
5262 vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
5263 if (vma->vm != obj_to_ggtt(obj))