2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
29 #include <drm/drm_vma_manager.h>
30 #include <drm/i915_drm.h>
32 #include "i915_trace.h"
33 #include "intel_drv.h"
34 #include <linux/oom.h>
35 #include <linux/shmem_fs.h>
36 #include <linux/slab.h>
37 #include <linux/swap.h>
38 #include <linux/pci.h>
39 #include <linux/dma-buf.h>
41 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
42 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
44 static __must_check int
45 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
48 i915_gem_object_retire(struct drm_i915_gem_object *obj);
50 static void i915_gem_write_fence(struct drm_device *dev, int reg,
51 struct drm_i915_gem_object *obj);
52 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
53 struct drm_i915_fence_reg *fence,
56 static unsigned long i915_gem_shrinker_count(struct shrinker *shrinker,
57 struct shrink_control *sc);
58 static unsigned long i915_gem_shrinker_scan(struct shrinker *shrinker,
59 struct shrink_control *sc);
60 static int i915_gem_shrinker_oom(struct notifier_block *nb,
63 static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
65 static bool cpu_cache_is_coherent(struct drm_device *dev,
66 enum i915_cache_level level)
68 return HAS_LLC(dev) || level != I915_CACHE_NONE;
71 static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
73 if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
76 return obj->pin_display;
79 static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
82 i915_gem_release_mmap(obj);
84 /* As we do not have an associated fence register, we will force
85 * a tiling change if we ever need to acquire one.
87 obj->fence_dirty = false;
88 obj->fence_reg = I915_FENCE_REG_NONE;
91 /* some bookkeeping */
92 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
95 spin_lock(&dev_priv->mm.object_stat_lock);
96 dev_priv->mm.object_count++;
97 dev_priv->mm.object_memory += size;
98 spin_unlock(&dev_priv->mm.object_stat_lock);
101 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
104 spin_lock(&dev_priv->mm.object_stat_lock);
105 dev_priv->mm.object_count--;
106 dev_priv->mm.object_memory -= size;
107 spin_unlock(&dev_priv->mm.object_stat_lock);
111 i915_gem_wait_for_error(struct i915_gpu_error *error)
115 #define EXIT_COND (!i915_reset_in_progress(error) || \
116 i915_terminally_wedged(error))
121 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
122 * userspace. If it takes that long something really bad is going on and
123 * we should simply try to bail out and fail as gracefully as possible.
125 ret = wait_event_interruptible_timeout(error->reset_queue,
129 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
131 } else if (ret < 0) {
139 int i915_mutex_lock_interruptible(struct drm_device *dev)
141 struct drm_i915_private *dev_priv = dev->dev_private;
144 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
148 ret = mutex_lock_interruptible(&dev->struct_mutex);
152 WARN_ON(i915_verify_lists(dev));
157 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
159 return i915_gem_obj_bound_any(obj) && !obj->active;
163 i915_gem_init_ioctl(struct drm_device *dev, void *data,
164 struct drm_file *file)
166 struct drm_i915_private *dev_priv = dev->dev_private;
167 struct drm_i915_gem_init *args = data;
169 if (drm_core_check_feature(dev, DRIVER_MODESET))
172 if (args->gtt_start >= args->gtt_end ||
173 (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
176 /* GEM with user mode setting was never supported on ilk and later. */
177 if (INTEL_INFO(dev)->gen >= 5)
180 mutex_lock(&dev->struct_mutex);
181 i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
183 dev_priv->gtt.mappable_end = args->gtt_end;
184 mutex_unlock(&dev->struct_mutex);
190 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
191 struct drm_file *file)
193 struct drm_i915_private *dev_priv = dev->dev_private;
194 struct drm_i915_gem_get_aperture *args = data;
195 struct drm_i915_gem_object *obj;
199 mutex_lock(&dev->struct_mutex);
200 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
201 if (i915_gem_obj_is_pinned(obj))
202 pinned += i915_gem_obj_ggtt_size(obj);
203 mutex_unlock(&dev->struct_mutex);
205 args->aper_size = dev_priv->gtt.base.total;
206 args->aper_available_size = args->aper_size - pinned;
211 static void i915_gem_object_detach_phys(struct drm_i915_gem_object *obj)
213 drm_dma_handle_t *phys = obj->phys_handle;
218 if (obj->madv == I915_MADV_WILLNEED) {
219 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
220 char *vaddr = phys->vaddr;
223 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
224 struct page *page = shmem_read_mapping_page(mapping, i);
226 char *dst = kmap_atomic(page);
227 memcpy(dst, vaddr, PAGE_SIZE);
228 drm_clflush_virt_range(dst, PAGE_SIZE);
231 set_page_dirty(page);
232 mark_page_accessed(page);
233 page_cache_release(page);
237 i915_gem_chipset_flush(obj->base.dev);
241 set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE);
243 drm_pci_free(obj->base.dev, phys);
244 obj->phys_handle = NULL;
248 i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
251 drm_dma_handle_t *phys;
252 struct address_space *mapping;
256 if (obj->phys_handle) {
257 if ((unsigned long)obj->phys_handle->vaddr & (align -1))
263 if (obj->madv != I915_MADV_WILLNEED)
266 if (obj->base.filp == NULL)
269 /* create a new object */
270 phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
276 set_memory_wc((unsigned long)vaddr, phys->size / PAGE_SIZE);
278 mapping = file_inode(obj->base.filp)->i_mapping;
279 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
283 page = shmem_read_mapping_page(mapping, i);
286 set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE);
288 drm_pci_free(obj->base.dev, phys);
289 return PTR_ERR(page);
292 src = kmap_atomic(page);
293 memcpy(vaddr, src, PAGE_SIZE);
296 mark_page_accessed(page);
297 page_cache_release(page);
302 obj->phys_handle = phys;
307 i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
308 struct drm_i915_gem_pwrite *args,
309 struct drm_file *file_priv)
311 struct drm_device *dev = obj->base.dev;
312 void *vaddr = obj->phys_handle->vaddr + args->offset;
313 char __user *user_data = to_user_ptr(args->data_ptr);
315 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
316 unsigned long unwritten;
318 /* The physical object once assigned is fixed for the lifetime
319 * of the obj, so we can safely drop the lock and continue
322 mutex_unlock(&dev->struct_mutex);
323 unwritten = copy_from_user(vaddr, user_data, args->size);
324 mutex_lock(&dev->struct_mutex);
329 i915_gem_chipset_flush(dev);
333 void *i915_gem_object_alloc(struct drm_device *dev)
335 struct drm_i915_private *dev_priv = dev->dev_private;
336 return kmem_cache_zalloc(dev_priv->slab, GFP_KERNEL);
339 void i915_gem_object_free(struct drm_i915_gem_object *obj)
341 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
342 kmem_cache_free(dev_priv->slab, obj);
346 i915_gem_create(struct drm_file *file,
347 struct drm_device *dev,
352 struct drm_i915_gem_object *obj;
356 size = roundup(size, PAGE_SIZE);
360 /* Allocate the new object */
361 obj = i915_gem_alloc_object(dev, size);
365 obj->base.dumb = dumb;
366 ret = drm_gem_handle_create(file, &obj->base, &handle);
367 /* drop reference from allocate - handle holds it now */
368 drm_gem_object_unreference_unlocked(&obj->base);
377 i915_gem_dumb_create(struct drm_file *file,
378 struct drm_device *dev,
379 struct drm_mode_create_dumb *args)
381 /* have to work out size/pitch and return them */
382 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
383 args->size = args->pitch * args->height;
384 return i915_gem_create(file, dev,
385 args->size, true, &args->handle);
389 * Creates a new mm object and returns a handle to it.
392 i915_gem_create_ioctl(struct drm_device *dev, void *data,
393 struct drm_file *file)
395 struct drm_i915_gem_create *args = data;
397 return i915_gem_create(file, dev,
398 args->size, false, &args->handle);
402 __copy_to_user_swizzled(char __user *cpu_vaddr,
403 const char *gpu_vaddr, int gpu_offset,
406 int ret, cpu_offset = 0;
409 int cacheline_end = ALIGN(gpu_offset + 1, 64);
410 int this_length = min(cacheline_end - gpu_offset, length);
411 int swizzled_gpu_offset = gpu_offset ^ 64;
413 ret = __copy_to_user(cpu_vaddr + cpu_offset,
414 gpu_vaddr + swizzled_gpu_offset,
419 cpu_offset += this_length;
420 gpu_offset += this_length;
421 length -= this_length;
428 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
429 const char __user *cpu_vaddr,
432 int ret, cpu_offset = 0;
435 int cacheline_end = ALIGN(gpu_offset + 1, 64);
436 int this_length = min(cacheline_end - gpu_offset, length);
437 int swizzled_gpu_offset = gpu_offset ^ 64;
439 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
440 cpu_vaddr + cpu_offset,
445 cpu_offset += this_length;
446 gpu_offset += this_length;
447 length -= this_length;
454 * Pins the specified object's pages and synchronizes the object with
455 * GPU accesses. Sets needs_clflush to non-zero if the caller should
456 * flush the object from the CPU cache.
458 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
468 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
469 /* If we're not in the cpu read domain, set ourself into the gtt
470 * read domain and manually flush cachelines (if required). This
471 * optimizes for the case when the gpu will dirty the data
472 * anyway again before the next pread happens. */
473 *needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
475 ret = i915_gem_object_wait_rendering(obj, true);
479 i915_gem_object_retire(obj);
482 ret = i915_gem_object_get_pages(obj);
486 i915_gem_object_pin_pages(obj);
491 /* Per-page copy function for the shmem pread fastpath.
492 * Flushes invalid cachelines before reading the target if
493 * needs_clflush is set. */
495 shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
496 char __user *user_data,
497 bool page_do_bit17_swizzling, bool needs_clflush)
502 if (unlikely(page_do_bit17_swizzling))
505 vaddr = kmap_atomic(page);
507 drm_clflush_virt_range(vaddr + shmem_page_offset,
509 ret = __copy_to_user_inatomic(user_data,
510 vaddr + shmem_page_offset,
512 kunmap_atomic(vaddr);
514 return ret ? -EFAULT : 0;
518 shmem_clflush_swizzled_range(char *addr, unsigned long length,
521 if (unlikely(swizzled)) {
522 unsigned long start = (unsigned long) addr;
523 unsigned long end = (unsigned long) addr + length;
525 /* For swizzling simply ensure that we always flush both
526 * channels. Lame, but simple and it works. Swizzled
527 * pwrite/pread is far from a hotpath - current userspace
528 * doesn't use it at all. */
529 start = round_down(start, 128);
530 end = round_up(end, 128);
532 drm_clflush_virt_range((void *)start, end - start);
534 drm_clflush_virt_range(addr, length);
539 /* Only difference to the fast-path function is that this can handle bit17
540 * and uses non-atomic copy and kmap functions. */
542 shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
543 char __user *user_data,
544 bool page_do_bit17_swizzling, bool needs_clflush)
551 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
553 page_do_bit17_swizzling);
555 if (page_do_bit17_swizzling)
556 ret = __copy_to_user_swizzled(user_data,
557 vaddr, shmem_page_offset,
560 ret = __copy_to_user(user_data,
561 vaddr + shmem_page_offset,
565 return ret ? - EFAULT : 0;
569 i915_gem_shmem_pread(struct drm_device *dev,
570 struct drm_i915_gem_object *obj,
571 struct drm_i915_gem_pread *args,
572 struct drm_file *file)
574 char __user *user_data;
577 int shmem_page_offset, page_length, ret = 0;
578 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
580 int needs_clflush = 0;
581 struct sg_page_iter sg_iter;
583 user_data = to_user_ptr(args->data_ptr);
586 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
588 ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
592 offset = args->offset;
594 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
595 offset >> PAGE_SHIFT) {
596 struct page *page = sg_page_iter_page(&sg_iter);
601 /* Operation in this page
603 * shmem_page_offset = offset within page in shmem file
604 * page_length = bytes to copy for this page
606 shmem_page_offset = offset_in_page(offset);
607 page_length = remain;
608 if ((shmem_page_offset + page_length) > PAGE_SIZE)
609 page_length = PAGE_SIZE - shmem_page_offset;
611 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
612 (page_to_phys(page) & (1 << 17)) != 0;
614 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
615 user_data, page_do_bit17_swizzling,
620 mutex_unlock(&dev->struct_mutex);
622 if (likely(!i915.prefault_disable) && !prefaulted) {
623 ret = fault_in_multipages_writeable(user_data, remain);
624 /* Userspace is tricking us, but we've already clobbered
625 * its pages with the prefault and promised to write the
626 * data up to the first fault. Hence ignore any errors
627 * and just continue. */
632 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
633 user_data, page_do_bit17_swizzling,
636 mutex_lock(&dev->struct_mutex);
642 remain -= page_length;
643 user_data += page_length;
644 offset += page_length;
648 i915_gem_object_unpin_pages(obj);
654 * Reads data from the object referenced by handle.
656 * On error, the contents of *data are undefined.
659 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
660 struct drm_file *file)
662 struct drm_i915_gem_pread *args = data;
663 struct drm_i915_gem_object *obj;
669 if (!access_ok(VERIFY_WRITE,
670 to_user_ptr(args->data_ptr),
674 ret = i915_mutex_lock_interruptible(dev);
678 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
679 if (&obj->base == NULL) {
684 /* Bounds check source. */
685 if (args->offset > obj->base.size ||
686 args->size > obj->base.size - args->offset) {
691 /* prime objects have no backing filp to GEM pread/pwrite
694 if (!obj->base.filp) {
699 trace_i915_gem_object_pread(obj, args->offset, args->size);
701 ret = i915_gem_shmem_pread(dev, obj, args, file);
704 drm_gem_object_unreference(&obj->base);
706 mutex_unlock(&dev->struct_mutex);
710 /* This is the fast write path which cannot handle
711 * page faults in the source data
715 fast_user_write(struct io_mapping *mapping,
716 loff_t page_base, int page_offset,
717 char __user *user_data,
720 void __iomem *vaddr_atomic;
722 unsigned long unwritten;
724 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
725 /* We can use the cpu mem copy function because this is X86. */
726 vaddr = (void __force*)vaddr_atomic + page_offset;
727 unwritten = __copy_from_user_inatomic_nocache(vaddr,
729 io_mapping_unmap_atomic(vaddr_atomic);
734 * This is the fast pwrite path, where we copy the data directly from the
735 * user into the GTT, uncached.
738 i915_gem_gtt_pwrite_fast(struct drm_device *dev,
739 struct drm_i915_gem_object *obj,
740 struct drm_i915_gem_pwrite *args,
741 struct drm_file *file)
743 struct drm_i915_private *dev_priv = dev->dev_private;
745 loff_t offset, page_base;
746 char __user *user_data;
747 int page_offset, page_length, ret;
749 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
753 ret = i915_gem_object_set_to_gtt_domain(obj, true);
757 ret = i915_gem_object_put_fence(obj);
761 user_data = to_user_ptr(args->data_ptr);
764 offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
767 /* Operation in this page
769 * page_base = page offset within aperture
770 * page_offset = offset within page
771 * page_length = bytes to copy for this page
773 page_base = offset & PAGE_MASK;
774 page_offset = offset_in_page(offset);
775 page_length = remain;
776 if ((page_offset + remain) > PAGE_SIZE)
777 page_length = PAGE_SIZE - page_offset;
779 /* If we get a fault while copying data, then (presumably) our
780 * source page isn't available. Return the error and we'll
781 * retry in the slow path.
783 if (fast_user_write(dev_priv->gtt.mappable, page_base,
784 page_offset, user_data, page_length)) {
789 remain -= page_length;
790 user_data += page_length;
791 offset += page_length;
795 i915_gem_object_ggtt_unpin(obj);
800 /* Per-page copy function for the shmem pwrite fastpath.
801 * Flushes invalid cachelines before writing to the target if
802 * needs_clflush_before is set and flushes out any written cachelines after
803 * writing if needs_clflush is set. */
805 shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
806 char __user *user_data,
807 bool page_do_bit17_swizzling,
808 bool needs_clflush_before,
809 bool needs_clflush_after)
814 if (unlikely(page_do_bit17_swizzling))
817 vaddr = kmap_atomic(page);
818 if (needs_clflush_before)
819 drm_clflush_virt_range(vaddr + shmem_page_offset,
821 ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
822 user_data, page_length);
823 if (needs_clflush_after)
824 drm_clflush_virt_range(vaddr + shmem_page_offset,
826 kunmap_atomic(vaddr);
828 return ret ? -EFAULT : 0;
831 /* Only difference to the fast-path function is that this can handle bit17
832 * and uses non-atomic copy and kmap functions. */
834 shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
835 char __user *user_data,
836 bool page_do_bit17_swizzling,
837 bool needs_clflush_before,
838 bool needs_clflush_after)
844 if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
845 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
847 page_do_bit17_swizzling);
848 if (page_do_bit17_swizzling)
849 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
853 ret = __copy_from_user(vaddr + shmem_page_offset,
856 if (needs_clflush_after)
857 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
859 page_do_bit17_swizzling);
862 return ret ? -EFAULT : 0;
866 i915_gem_shmem_pwrite(struct drm_device *dev,
867 struct drm_i915_gem_object *obj,
868 struct drm_i915_gem_pwrite *args,
869 struct drm_file *file)
873 char __user *user_data;
874 int shmem_page_offset, page_length, ret = 0;
875 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
876 int hit_slowpath = 0;
877 int needs_clflush_after = 0;
878 int needs_clflush_before = 0;
879 struct sg_page_iter sg_iter;
881 user_data = to_user_ptr(args->data_ptr);
884 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
886 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
887 /* If we're not in the cpu write domain, set ourself into the gtt
888 * write domain and manually flush cachelines (if required). This
889 * optimizes for the case when the gpu will use the data
890 * right away and we therefore have to clflush anyway. */
891 needs_clflush_after = cpu_write_needs_clflush(obj);
892 ret = i915_gem_object_wait_rendering(obj, false);
896 i915_gem_object_retire(obj);
898 /* Same trick applies to invalidate partially written cachelines read
900 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
901 needs_clflush_before =
902 !cpu_cache_is_coherent(dev, obj->cache_level);
904 ret = i915_gem_object_get_pages(obj);
908 i915_gem_object_pin_pages(obj);
910 offset = args->offset;
913 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
914 offset >> PAGE_SHIFT) {
915 struct page *page = sg_page_iter_page(&sg_iter);
916 int partial_cacheline_write;
921 /* Operation in this page
923 * shmem_page_offset = offset within page in shmem file
924 * page_length = bytes to copy for this page
926 shmem_page_offset = offset_in_page(offset);
928 page_length = remain;
929 if ((shmem_page_offset + page_length) > PAGE_SIZE)
930 page_length = PAGE_SIZE - shmem_page_offset;
932 /* If we don't overwrite a cacheline completely we need to be
933 * careful to have up-to-date data by first clflushing. Don't
934 * overcomplicate things and flush the entire patch. */
935 partial_cacheline_write = needs_clflush_before &&
936 ((shmem_page_offset | page_length)
937 & (boot_cpu_data.x86_clflush_size - 1));
939 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
940 (page_to_phys(page) & (1 << 17)) != 0;
942 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
943 user_data, page_do_bit17_swizzling,
944 partial_cacheline_write,
945 needs_clflush_after);
950 mutex_unlock(&dev->struct_mutex);
951 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
952 user_data, page_do_bit17_swizzling,
953 partial_cacheline_write,
954 needs_clflush_after);
956 mutex_lock(&dev->struct_mutex);
962 remain -= page_length;
963 user_data += page_length;
964 offset += page_length;
968 i915_gem_object_unpin_pages(obj);
972 * Fixup: Flush cpu caches in case we didn't flush the dirty
973 * cachelines in-line while writing and the object moved
974 * out of the cpu write domain while we've dropped the lock.
976 if (!needs_clflush_after &&
977 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
978 if (i915_gem_clflush_object(obj, obj->pin_display))
979 i915_gem_chipset_flush(dev);
983 if (needs_clflush_after)
984 i915_gem_chipset_flush(dev);
990 * Writes data to the object referenced by handle.
992 * On error, the contents of the buffer that were to be modified are undefined.
995 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
996 struct drm_file *file)
998 struct drm_i915_gem_pwrite *args = data;
999 struct drm_i915_gem_object *obj;
1002 if (args->size == 0)
1005 if (!access_ok(VERIFY_READ,
1006 to_user_ptr(args->data_ptr),
1010 if (likely(!i915.prefault_disable)) {
1011 ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
1017 ret = i915_mutex_lock_interruptible(dev);
1021 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1022 if (&obj->base == NULL) {
1027 /* Bounds check destination. */
1028 if (args->offset > obj->base.size ||
1029 args->size > obj->base.size - args->offset) {
1034 /* prime objects have no backing filp to GEM pread/pwrite
1037 if (!obj->base.filp) {
1042 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
1045 /* We can only do the GTT pwrite on untiled buffers, as otherwise
1046 * it would end up going through the fenced access, and we'll get
1047 * different detiling behavior between reading and writing.
1048 * pread/pwrite currently are reading and writing from the CPU
1049 * perspective, requiring manual detiling by the client.
1051 if (obj->phys_handle) {
1052 ret = i915_gem_phys_pwrite(obj, args, file);
1056 if (obj->tiling_mode == I915_TILING_NONE &&
1057 obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
1058 cpu_write_needs_clflush(obj)) {
1059 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
1060 /* Note that the gtt paths might fail with non-page-backed user
1061 * pointers (e.g. gtt mappings when moving data between
1062 * textures). Fallback to the shmem path in that case. */
1065 if (ret == -EFAULT || ret == -ENOSPC)
1066 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
1069 drm_gem_object_unreference(&obj->base);
1071 mutex_unlock(&dev->struct_mutex);
1076 i915_gem_check_wedge(struct i915_gpu_error *error,
1079 if (i915_reset_in_progress(error)) {
1080 /* Non-interruptible callers can't handle -EAGAIN, hence return
1081 * -EIO unconditionally for these. */
1085 /* Recovery complete, but the reset failed ... */
1086 if (i915_terminally_wedged(error))
1090 * Check if GPU Reset is in progress - we need intel_ring_begin
1091 * to work properly to reinit the hw state while the gpu is
1092 * still marked as reset-in-progress. Handle this with a flag.
1094 if (!error->reload_in_reset)
1102 * Compare seqno against outstanding lazy request. Emit a request if they are
1106 i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno)
1110 BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
1113 if (seqno == ring->outstanding_lazy_seqno)
1114 ret = i915_add_request(ring, NULL);
1119 static void fake_irq(unsigned long data)
1121 wake_up_process((struct task_struct *)data);
1124 static bool missed_irq(struct drm_i915_private *dev_priv,
1125 struct intel_engine_cs *ring)
1127 return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
1130 static bool can_wait_boost(struct drm_i915_file_private *file_priv)
1132 if (file_priv == NULL)
1135 return !atomic_xchg(&file_priv->rps_wait_boost, true);
1139 * __i915_wait_seqno - wait until execution of seqno has finished
1140 * @ring: the ring expected to report seqno
1142 * @reset_counter: reset sequence associated with the given seqno
1143 * @interruptible: do an interruptible wait (normally yes)
1144 * @timeout: in - how long to wait (NULL forever); out - how much time remaining
1146 * Note: It is of utmost importance that the passed in seqno and reset_counter
1147 * values have been read by the caller in an smp safe manner. Where read-side
1148 * locks are involved, it is sufficient to read the reset_counter before
1149 * unlocking the lock that protects the seqno. For lockless tricks, the
1150 * reset_counter _must_ be read before, and an appropriate smp_rmb must be
1153 * Returns 0 if the seqno was found within the alloted time. Else returns the
1154 * errno with remaining time filled in timeout argument.
1156 int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno,
1157 unsigned reset_counter,
1160 struct drm_i915_file_private *file_priv)
1162 struct drm_device *dev = ring->dev;
1163 struct drm_i915_private *dev_priv = dev->dev_private;
1164 const bool irq_test_in_progress =
1165 ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
1167 unsigned long timeout_expire;
1171 WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
1173 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
1176 timeout_expire = timeout ? jiffies + nsecs_to_jiffies((u64)*timeout) : 0;
1178 if (INTEL_INFO(dev)->gen >= 6 && ring->id == RCS && can_wait_boost(file_priv)) {
1179 gen6_rps_boost(dev_priv);
1181 mod_delayed_work(dev_priv->wq,
1182 &file_priv->mm.idle_work,
1183 msecs_to_jiffies(100));
1186 if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring)))
1189 /* Record current time in case interrupted by signal, or wedged */
1190 trace_i915_gem_request_wait_begin(ring, seqno);
1191 before = ktime_get_raw_ns();
1193 struct timer_list timer;
1195 prepare_to_wait(&ring->irq_queue, &wait,
1196 interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
1198 /* We need to check whether any gpu reset happened in between
1199 * the caller grabbing the seqno and now ... */
1200 if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) {
1201 /* ... but upgrade the -EAGAIN to an -EIO if the gpu
1202 * is truely gone. */
1203 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1209 if (i915_seqno_passed(ring->get_seqno(ring, false), seqno)) {
1214 if (interruptible && signal_pending(current)) {
1219 if (timeout && time_after_eq(jiffies, timeout_expire)) {
1224 timer.function = NULL;
1225 if (timeout || missed_irq(dev_priv, ring)) {
1226 unsigned long expire;
1228 setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
1229 expire = missed_irq(dev_priv, ring) ? jiffies + 1 : timeout_expire;
1230 mod_timer(&timer, expire);
1235 if (timer.function) {
1236 del_singleshot_timer_sync(&timer);
1237 destroy_timer_on_stack(&timer);
1240 now = ktime_get_raw_ns();
1241 trace_i915_gem_request_wait_end(ring, seqno);
1243 if (!irq_test_in_progress)
1244 ring->irq_put(ring);
1246 finish_wait(&ring->irq_queue, &wait);
1249 s64 tres = *timeout - (now - before);
1251 *timeout = tres < 0 ? 0 : tres;
1258 * Waits for a sequence number to be signaled, and cleans up the
1259 * request and object lists appropriately for that event.
1262 i915_wait_seqno(struct intel_engine_cs *ring, uint32_t seqno)
1264 struct drm_device *dev = ring->dev;
1265 struct drm_i915_private *dev_priv = dev->dev_private;
1266 bool interruptible = dev_priv->mm.interruptible;
1267 unsigned reset_counter;
1270 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1273 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1277 ret = i915_gem_check_olr(ring, seqno);
1281 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
1282 return __i915_wait_seqno(ring, seqno, reset_counter, interruptible,
1287 i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj)
1292 /* Manually manage the write flush as we may have not yet
1293 * retired the buffer.
1295 * Note that the last_write_seqno is always the earlier of
1296 * the two (read/write) seqno, so if we haved successfully waited,
1297 * we know we have passed the last write.
1299 obj->last_write_seqno = 0;
1305 * Ensures that all rendering to the object has completed and the object is
1306 * safe to unbind from the GTT or access from the CPU.
1308 static __must_check int
1309 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1312 struct intel_engine_cs *ring = obj->ring;
1316 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1320 ret = i915_wait_seqno(ring, seqno);
1324 return i915_gem_object_wait_rendering__tail(obj);
1327 /* A nonblocking variant of the above wait. This is a highly dangerous routine
1328 * as the object state may change during this call.
1330 static __must_check int
1331 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1332 struct drm_i915_file_private *file_priv,
1335 struct drm_device *dev = obj->base.dev;
1336 struct drm_i915_private *dev_priv = dev->dev_private;
1337 struct intel_engine_cs *ring = obj->ring;
1338 unsigned reset_counter;
1342 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1343 BUG_ON(!dev_priv->mm.interruptible);
1345 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1349 ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
1353 ret = i915_gem_check_olr(ring, seqno);
1357 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
1358 mutex_unlock(&dev->struct_mutex);
1359 ret = __i915_wait_seqno(ring, seqno, reset_counter, true, NULL,
1361 mutex_lock(&dev->struct_mutex);
1365 return i915_gem_object_wait_rendering__tail(obj);
1369 * Called when user space prepares to use an object with the CPU, either
1370 * through the mmap ioctl's mapping or a GTT mapping.
1373 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1374 struct drm_file *file)
1376 struct drm_i915_gem_set_domain *args = data;
1377 struct drm_i915_gem_object *obj;
1378 uint32_t read_domains = args->read_domains;
1379 uint32_t write_domain = args->write_domain;
1382 /* Only handle setting domains to types used by the CPU. */
1383 if (write_domain & I915_GEM_GPU_DOMAINS)
1386 if (read_domains & I915_GEM_GPU_DOMAINS)
1389 /* Having something in the write domain implies it's in the read
1390 * domain, and only that read domain. Enforce that in the request.
1392 if (write_domain != 0 && read_domains != write_domain)
1395 ret = i915_mutex_lock_interruptible(dev);
1399 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1400 if (&obj->base == NULL) {
1405 /* Try to flush the object off the GPU without holding the lock.
1406 * We will repeat the flush holding the lock in the normal manner
1407 * to catch cases where we are gazumped.
1409 ret = i915_gem_object_wait_rendering__nonblocking(obj,
1415 if (read_domains & I915_GEM_DOMAIN_GTT) {
1416 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1418 /* Silently promote "you're not bound, there was nothing to do"
1419 * to success, since the client was just asking us to
1420 * make sure everything was done.
1425 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1429 drm_gem_object_unreference(&obj->base);
1431 mutex_unlock(&dev->struct_mutex);
1436 * Called when user space has done writes to this buffer
1439 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1440 struct drm_file *file)
1442 struct drm_i915_gem_sw_finish *args = data;
1443 struct drm_i915_gem_object *obj;
1446 ret = i915_mutex_lock_interruptible(dev);
1450 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1451 if (&obj->base == NULL) {
1456 /* Pinned buffers may be scanout, so flush the cache */
1457 if (obj->pin_display)
1458 i915_gem_object_flush_cpu_write_domain(obj, true);
1460 drm_gem_object_unreference(&obj->base);
1462 mutex_unlock(&dev->struct_mutex);
1467 * Maps the contents of an object, returning the address it is mapped
1470 * While the mapping holds a reference on the contents of the object, it doesn't
1471 * imply a ref on the object itself.
1475 * DRM driver writers who look a this function as an example for how to do GEM
1476 * mmap support, please don't implement mmap support like here. The modern way
1477 * to implement DRM mmap support is with an mmap offset ioctl (like
1478 * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
1479 * That way debug tooling like valgrind will understand what's going on, hiding
1480 * the mmap call in a driver private ioctl will break that. The i915 driver only
1481 * does cpu mmaps this way because we didn't know better.
1484 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1485 struct drm_file *file)
1487 struct drm_i915_gem_mmap *args = data;
1488 struct drm_gem_object *obj;
1491 obj = drm_gem_object_lookup(dev, file, args->handle);
1495 /* prime objects have no backing filp to GEM mmap
1499 drm_gem_object_unreference_unlocked(obj);
1503 addr = vm_mmap(obj->filp, 0, args->size,
1504 PROT_READ | PROT_WRITE, MAP_SHARED,
1506 drm_gem_object_unreference_unlocked(obj);
1507 if (IS_ERR((void *)addr))
1510 args->addr_ptr = (uint64_t) addr;
1516 * i915_gem_fault - fault a page into the GTT
1517 * vma: VMA in question
1520 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1521 * from userspace. The fault handler takes care of binding the object to
1522 * the GTT (if needed), allocating and programming a fence register (again,
1523 * only if needed based on whether the old reg is still valid or the object
1524 * is tiled) and inserting a new PTE into the faulting process.
1526 * Note that the faulting process may involve evicting existing objects
1527 * from the GTT and/or fence registers to make room. So performance may
1528 * suffer if the GTT working set is large or there are few fence registers
1531 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1533 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1534 struct drm_device *dev = obj->base.dev;
1535 struct drm_i915_private *dev_priv = dev->dev_private;
1536 pgoff_t page_offset;
1539 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1541 intel_runtime_pm_get(dev_priv);
1543 /* We don't use vmf->pgoff since that has the fake offset */
1544 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1547 ret = i915_mutex_lock_interruptible(dev);
1551 trace_i915_gem_object_fault(obj, page_offset, true, write);
1553 /* Try to flush the object off the GPU first without holding the lock.
1554 * Upon reacquiring the lock, we will perform our sanity checks and then
1555 * repeat the flush holding the lock in the normal manner to catch cases
1556 * where we are gazumped.
1558 ret = i915_gem_object_wait_rendering__nonblocking(obj, NULL, !write);
1562 /* Access to snoopable pages through the GTT is incoherent. */
1563 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1568 /* Now bind it into the GTT if needed */
1569 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
1573 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1577 ret = i915_gem_object_get_fence(obj);
1581 /* Finally, remap it using the new GTT offset */
1582 pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
1585 if (!obj->fault_mappable) {
1586 unsigned long size = min_t(unsigned long,
1587 vma->vm_end - vma->vm_start,
1591 for (i = 0; i < size >> PAGE_SHIFT; i++) {
1592 ret = vm_insert_pfn(vma,
1593 (unsigned long)vma->vm_start + i * PAGE_SIZE,
1599 obj->fault_mappable = true;
1601 ret = vm_insert_pfn(vma,
1602 (unsigned long)vmf->virtual_address,
1605 i915_gem_object_ggtt_unpin(obj);
1607 mutex_unlock(&dev->struct_mutex);
1612 * We eat errors when the gpu is terminally wedged to avoid
1613 * userspace unduly crashing (gl has no provisions for mmaps to
1614 * fail). But any other -EIO isn't ours (e.g. swap in failure)
1615 * and so needs to be reported.
1617 if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
1618 ret = VM_FAULT_SIGBUS;
1623 * EAGAIN means the gpu is hung and we'll wait for the error
1624 * handler to reset everything when re-faulting in
1625 * i915_mutex_lock_interruptible.
1632 * EBUSY is ok: this just means that another thread
1633 * already did the job.
1635 ret = VM_FAULT_NOPAGE;
1642 ret = VM_FAULT_SIGBUS;
1645 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
1646 ret = VM_FAULT_SIGBUS;
1650 intel_runtime_pm_put(dev_priv);
1655 * i915_gem_release_mmap - remove physical page mappings
1656 * @obj: obj in question
1658 * Preserve the reservation of the mmapping with the DRM core code, but
1659 * relinquish ownership of the pages back to the system.
1661 * It is vital that we remove the page mapping if we have mapped a tiled
1662 * object through the GTT and then lose the fence register due to
1663 * resource pressure. Similarly if the object has been moved out of the
1664 * aperture, than pages mapped into userspace must be revoked. Removing the
1665 * mapping will then trigger a page fault on the next user access, allowing
1666 * fixup by i915_gem_fault().
1669 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1671 if (!obj->fault_mappable)
1674 drm_vma_node_unmap(&obj->base.vma_node,
1675 obj->base.dev->anon_inode->i_mapping);
1676 obj->fault_mappable = false;
1680 i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
1682 struct drm_i915_gem_object *obj;
1684 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
1685 i915_gem_release_mmap(obj);
1689 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1693 if (INTEL_INFO(dev)->gen >= 4 ||
1694 tiling_mode == I915_TILING_NONE)
1697 /* Previous chips need a power-of-two fence region when tiling */
1698 if (INTEL_INFO(dev)->gen == 3)
1699 gtt_size = 1024*1024;
1701 gtt_size = 512*1024;
1703 while (gtt_size < size)
1710 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1711 * @obj: object to check
1713 * Return the required GTT alignment for an object, taking into account
1714 * potential fence register mapping.
1717 i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
1718 int tiling_mode, bool fenced)
1721 * Minimum alignment is 4k (GTT page size), but might be greater
1722 * if a fence register is needed for the object.
1724 if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
1725 tiling_mode == I915_TILING_NONE)
1729 * Previous chips need to be aligned to the size of the smallest
1730 * fence register that can contain the object.
1732 return i915_gem_get_gtt_size(dev, size, tiling_mode);
1735 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1737 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1740 if (drm_vma_node_has_offset(&obj->base.vma_node))
1743 dev_priv->mm.shrinker_no_lock_stealing = true;
1745 ret = drm_gem_create_mmap_offset(&obj->base);
1749 /* Badly fragmented mmap space? The only way we can recover
1750 * space is by destroying unwanted objects. We can't randomly release
1751 * mmap_offsets as userspace expects them to be persistent for the
1752 * lifetime of the objects. The closest we can is to release the
1753 * offsets on purgeable objects by truncating it and marking it purged,
1754 * which prevents userspace from ever using that object again.
1756 i915_gem_shrink(dev_priv,
1757 obj->base.size >> PAGE_SHIFT,
1759 I915_SHRINK_UNBOUND |
1760 I915_SHRINK_PURGEABLE);
1761 ret = drm_gem_create_mmap_offset(&obj->base);
1765 i915_gem_shrink_all(dev_priv);
1766 ret = drm_gem_create_mmap_offset(&obj->base);
1768 dev_priv->mm.shrinker_no_lock_stealing = false;
1773 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1775 drm_gem_free_mmap_offset(&obj->base);
1779 i915_gem_mmap_gtt(struct drm_file *file,
1780 struct drm_device *dev,
1781 uint32_t handle, bool dumb,
1784 struct drm_i915_private *dev_priv = dev->dev_private;
1785 struct drm_i915_gem_object *obj;
1788 ret = i915_mutex_lock_interruptible(dev);
1792 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
1793 if (&obj->base == NULL) {
1799 * We don't allow dumb mmaps on objects created using another
1802 WARN_ONCE(dumb && !(obj->base.dumb || obj->base.import_attach),
1803 "Illegal dumb map of accelerated buffer.\n");
1805 if (obj->base.size > dev_priv->gtt.mappable_end) {
1810 if (obj->madv != I915_MADV_WILLNEED) {
1811 DRM_DEBUG("Attempting to mmap a purgeable buffer\n");
1816 ret = i915_gem_object_create_mmap_offset(obj);
1820 *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
1823 drm_gem_object_unreference(&obj->base);
1825 mutex_unlock(&dev->struct_mutex);
1830 i915_gem_dumb_map_offset(struct drm_file *file,
1831 struct drm_device *dev,
1835 return i915_gem_mmap_gtt(file, dev, handle, true, offset);
1839 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1841 * @data: GTT mapping ioctl data
1842 * @file: GEM object info
1844 * Simply returns the fake offset to userspace so it can mmap it.
1845 * The mmap call will end up in drm_gem_mmap(), which will set things
1846 * up so we can get faults in the handler above.
1848 * The fault handler will take care of binding the object into the GTT
1849 * (since it may have been evicted to make room for something), allocating
1850 * a fence register, and mapping the appropriate aperture address into
1854 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1855 struct drm_file *file)
1857 struct drm_i915_gem_mmap_gtt *args = data;
1859 return i915_gem_mmap_gtt(file, dev, args->handle, false, &args->offset);
1863 i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1865 return obj->madv == I915_MADV_DONTNEED;
1868 /* Immediately discard the backing storage */
1870 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1872 i915_gem_object_free_mmap_offset(obj);
1874 if (obj->base.filp == NULL)
1877 /* Our goal here is to return as much of the memory as
1878 * is possible back to the system as we are called from OOM.
1879 * To do this we must instruct the shmfs to drop all of its
1880 * backing pages, *now*.
1882 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
1883 obj->madv = __I915_MADV_PURGED;
1886 /* Try to discard unwanted pages */
1888 i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
1890 struct address_space *mapping;
1892 switch (obj->madv) {
1893 case I915_MADV_DONTNEED:
1894 i915_gem_object_truncate(obj);
1895 case __I915_MADV_PURGED:
1899 if (obj->base.filp == NULL)
1902 mapping = file_inode(obj->base.filp)->i_mapping,
1903 invalidate_mapping_pages(mapping, 0, (loff_t)-1);
1907 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1909 struct sg_page_iter sg_iter;
1912 BUG_ON(obj->madv == __I915_MADV_PURGED);
1914 ret = i915_gem_object_set_to_cpu_domain(obj, true);
1916 /* In the event of a disaster, abandon all caches and
1917 * hope for the best.
1919 WARN_ON(ret != -EIO);
1920 i915_gem_clflush_object(obj, true);
1921 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1924 if (i915_gem_object_needs_bit17_swizzle(obj))
1925 i915_gem_object_save_bit_17_swizzle(obj);
1927 if (obj->madv == I915_MADV_DONTNEED)
1930 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
1931 struct page *page = sg_page_iter_page(&sg_iter);
1934 set_page_dirty(page);
1936 if (obj->madv == I915_MADV_WILLNEED)
1937 mark_page_accessed(page);
1939 page_cache_release(page);
1943 sg_free_table(obj->pages);
1948 i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1950 const struct drm_i915_gem_object_ops *ops = obj->ops;
1952 if (obj->pages == NULL)
1955 if (obj->pages_pin_count)
1958 BUG_ON(i915_gem_obj_bound_any(obj));
1960 /* ->put_pages might need to allocate memory for the bit17 swizzle
1961 * array, hence protect them from being reaped by removing them from gtt
1963 list_del(&obj->global_list);
1965 ops->put_pages(obj);
1968 i915_gem_object_invalidate(obj);
1974 i915_gem_shrink(struct drm_i915_private *dev_priv,
1975 long target, unsigned flags)
1978 struct list_head *list;
1981 { &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND },
1982 { &dev_priv->mm.bound_list, I915_SHRINK_BOUND },
1985 unsigned long count = 0;
1988 * As we may completely rewrite the (un)bound list whilst unbinding
1989 * (due to retiring requests) we have to strictly process only
1990 * one element of the list at the time, and recheck the list
1991 * on every iteration.
1993 * In particular, we must hold a reference whilst removing the
1994 * object as we may end up waiting for and/or retiring the objects.
1995 * This might release the final reference (held by the active list)
1996 * and result in the object being freed from under us. This is
1997 * similar to the precautions the eviction code must take whilst
2000 * Also note that although these lists do not hold a reference to
2001 * the object we can safely grab one here: The final object
2002 * unreferencing and the bound_list are both protected by the
2003 * dev->struct_mutex and so we won't ever be able to observe an
2004 * object on the bound_list with a reference count equals 0.
2006 for (phase = phases; phase->list; phase++) {
2007 struct list_head still_in_list;
2009 if ((flags & phase->bit) == 0)
2012 INIT_LIST_HEAD(&still_in_list);
2013 while (count < target && !list_empty(phase->list)) {
2014 struct drm_i915_gem_object *obj;
2015 struct i915_vma *vma, *v;
2017 obj = list_first_entry(phase->list,
2018 typeof(*obj), global_list);
2019 list_move_tail(&obj->global_list, &still_in_list);
2021 if (flags & I915_SHRINK_PURGEABLE &&
2022 !i915_gem_object_is_purgeable(obj))
2025 drm_gem_object_reference(&obj->base);
2027 /* For the unbound phase, this should be a no-op! */
2028 list_for_each_entry_safe(vma, v,
2029 &obj->vma_list, vma_link)
2030 if (i915_vma_unbind(vma))
2033 if (i915_gem_object_put_pages(obj) == 0)
2034 count += obj->base.size >> PAGE_SHIFT;
2036 drm_gem_object_unreference(&obj->base);
2038 list_splice(&still_in_list, phase->list);
2044 static unsigned long
2045 i915_gem_shrink_all(struct drm_i915_private *dev_priv)
2047 i915_gem_evict_everything(dev_priv->dev);
2048 return i915_gem_shrink(dev_priv, LONG_MAX,
2049 I915_SHRINK_BOUND | I915_SHRINK_UNBOUND);
2053 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2055 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2057 struct address_space *mapping;
2058 struct sg_table *st;
2059 struct scatterlist *sg;
2060 struct sg_page_iter sg_iter;
2062 unsigned long last_pfn = 0; /* suppress gcc warning */
2065 /* Assert that the object is not currently in any GPU domain. As it
2066 * wasn't in the GTT, there shouldn't be any way it could have been in
2069 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2070 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2072 st = kmalloc(sizeof(*st), GFP_KERNEL);
2076 page_count = obj->base.size / PAGE_SIZE;
2077 if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
2082 /* Get the list of pages out of our struct file. They'll be pinned
2083 * at this point until we release them.
2085 * Fail silently without starting the shrinker
2087 mapping = file_inode(obj->base.filp)->i_mapping;
2088 gfp = mapping_gfp_mask(mapping);
2089 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
2090 gfp &= ~(__GFP_IO | __GFP_WAIT);
2093 for (i = 0; i < page_count; i++) {
2094 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2096 i915_gem_shrink(dev_priv,
2099 I915_SHRINK_UNBOUND |
2100 I915_SHRINK_PURGEABLE);
2101 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2104 /* We've tried hard to allocate the memory by reaping
2105 * our own buffer, now let the real VM do its job and
2106 * go down in flames if truly OOM.
2108 i915_gem_shrink_all(dev_priv);
2109 page = shmem_read_mapping_page(mapping, i);
2113 #ifdef CONFIG_SWIOTLB
2114 if (swiotlb_nr_tbl()) {
2116 sg_set_page(sg, page, PAGE_SIZE, 0);
2121 if (!i || page_to_pfn(page) != last_pfn + 1) {
2125 sg_set_page(sg, page, PAGE_SIZE, 0);
2127 sg->length += PAGE_SIZE;
2129 last_pfn = page_to_pfn(page);
2131 /* Check that the i965g/gm workaround works. */
2132 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
2134 #ifdef CONFIG_SWIOTLB
2135 if (!swiotlb_nr_tbl())
2140 if (i915_gem_object_needs_bit17_swizzle(obj))
2141 i915_gem_object_do_bit_17_swizzle(obj);
2147 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
2148 page_cache_release(sg_page_iter_page(&sg_iter));
2152 /* shmemfs first checks if there is enough memory to allocate the page
2153 * and reports ENOSPC should there be insufficient, along with the usual
2154 * ENOMEM for a genuine allocation failure.
2156 * We use ENOSPC in our driver to mean that we have run out of aperture
2157 * space and so want to translate the error from shmemfs back to our
2158 * usual understanding of ENOMEM.
2160 if (PTR_ERR(page) == -ENOSPC)
2163 return PTR_ERR(page);
2166 /* Ensure that the associated pages are gathered from the backing storage
2167 * and pinned into our object. i915_gem_object_get_pages() may be called
2168 * multiple times before they are released by a single call to
2169 * i915_gem_object_put_pages() - once the pages are no longer referenced
2170 * either as a result of memory pressure (reaping pages under the shrinker)
2171 * or as the object is itself released.
2174 i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2176 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2177 const struct drm_i915_gem_object_ops *ops = obj->ops;
2183 if (obj->madv != I915_MADV_WILLNEED) {
2184 DRM_DEBUG("Attempting to obtain a purgeable object\n");
2188 BUG_ON(obj->pages_pin_count);
2190 ret = ops->get_pages(obj);
2194 list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2199 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
2200 struct intel_engine_cs *ring)
2202 u32 seqno = intel_ring_get_seqno(ring);
2204 BUG_ON(ring == NULL);
2205 if (obj->ring != ring && obj->last_write_seqno) {
2206 /* Keep the seqno relative to the current ring */
2207 obj->last_write_seqno = seqno;
2211 /* Add a reference if we're newly entering the active list. */
2213 drm_gem_object_reference(&obj->base);
2217 list_move_tail(&obj->ring_list, &ring->active_list);
2219 obj->last_read_seqno = seqno;
2222 void i915_vma_move_to_active(struct i915_vma *vma,
2223 struct intel_engine_cs *ring)
2225 list_move_tail(&vma->mm_list, &vma->vm->active_list);
2226 return i915_gem_object_move_to_active(vma->obj, ring);
2230 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
2232 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2233 struct i915_address_space *vm;
2234 struct i915_vma *vma;
2236 BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
2237 BUG_ON(!obj->active);
2239 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
2240 vma = i915_gem_obj_to_vma(obj, vm);
2241 if (vma && !list_empty(&vma->mm_list))
2242 list_move_tail(&vma->mm_list, &vm->inactive_list);
2245 intel_fb_obj_flush(obj, true);
2247 list_del_init(&obj->ring_list);
2250 obj->last_read_seqno = 0;
2251 obj->last_write_seqno = 0;
2252 obj->base.write_domain = 0;
2254 obj->last_fenced_seqno = 0;
2257 drm_gem_object_unreference(&obj->base);
2259 WARN_ON(i915_verify_lists(dev));
2263 i915_gem_object_retire(struct drm_i915_gem_object *obj)
2265 struct intel_engine_cs *ring = obj->ring;
2270 if (i915_seqno_passed(ring->get_seqno(ring, true),
2271 obj->last_read_seqno))
2272 i915_gem_object_move_to_inactive(obj);
2276 i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
2278 struct drm_i915_private *dev_priv = dev->dev_private;
2279 struct intel_engine_cs *ring;
2282 /* Carefully retire all requests without writing to the rings */
2283 for_each_ring(ring, dev_priv, i) {
2284 ret = intel_ring_idle(ring);
2288 i915_gem_retire_requests(dev);
2290 /* Finally reset hw state */
2291 for_each_ring(ring, dev_priv, i) {
2292 intel_ring_init_seqno(ring, seqno);
2294 for (j = 0; j < ARRAY_SIZE(ring->semaphore.sync_seqno); j++)
2295 ring->semaphore.sync_seqno[j] = 0;
2301 int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
2303 struct drm_i915_private *dev_priv = dev->dev_private;
2309 /* HWS page needs to be set less than what we
2310 * will inject to ring
2312 ret = i915_gem_init_seqno(dev, seqno - 1);
2316 /* Carefully set the last_seqno value so that wrap
2317 * detection still works
2319 dev_priv->next_seqno = seqno;
2320 dev_priv->last_seqno = seqno - 1;
2321 if (dev_priv->last_seqno == 0)
2322 dev_priv->last_seqno--;
2328 i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
2330 struct drm_i915_private *dev_priv = dev->dev_private;
2332 /* reserve 0 for non-seqno */
2333 if (dev_priv->next_seqno == 0) {
2334 int ret = i915_gem_init_seqno(dev, 0);
2338 dev_priv->next_seqno = 1;
2341 *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
2345 int __i915_add_request(struct intel_engine_cs *ring,
2346 struct drm_file *file,
2347 struct drm_i915_gem_object *obj,
2350 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2351 struct drm_i915_gem_request *request;
2352 struct intel_ringbuffer *ringbuf;
2353 u32 request_ring_position, request_start;
2356 request = ring->preallocated_lazy_request;
2357 if (WARN_ON(request == NULL))
2360 if (i915.enable_execlists) {
2361 struct intel_context *ctx = request->ctx;
2362 ringbuf = ctx->engine[ring->id].ringbuf;
2364 ringbuf = ring->buffer;
2366 request_start = intel_ring_get_tail(ringbuf);
2368 * Emit any outstanding flushes - execbuf can fail to emit the flush
2369 * after having emitted the batchbuffer command. Hence we need to fix
2370 * things up similar to emitting the lazy request. The difference here
2371 * is that the flush _must_ happen before the next request, no matter
2374 if (i915.enable_execlists) {
2375 ret = logical_ring_flush_all_caches(ringbuf);
2379 ret = intel_ring_flush_all_caches(ring);
2384 /* Record the position of the start of the request so that
2385 * should we detect the updated seqno part-way through the
2386 * GPU processing the request, we never over-estimate the
2387 * position of the head.
2389 request_ring_position = intel_ring_get_tail(ringbuf);
2391 if (i915.enable_execlists) {
2392 ret = ring->emit_request(ringbuf);
2396 ret = ring->add_request(ring);
2401 request->seqno = intel_ring_get_seqno(ring);
2402 request->ring = ring;
2403 request->head = request_start;
2404 request->tail = request_ring_position;
2406 /* Whilst this request exists, batch_obj will be on the
2407 * active_list, and so will hold the active reference. Only when this
2408 * request is retired will the the batch_obj be moved onto the
2409 * inactive_list and lose its active reference. Hence we do not need
2410 * to explicitly hold another reference here.
2412 request->batch_obj = obj;
2414 if (!i915.enable_execlists) {
2415 /* Hold a reference to the current context so that we can inspect
2416 * it later in case a hangcheck error event fires.
2418 request->ctx = ring->last_context;
2420 i915_gem_context_reference(request->ctx);
2423 request->emitted_jiffies = jiffies;
2424 list_add_tail(&request->list, &ring->request_list);
2425 request->file_priv = NULL;
2428 struct drm_i915_file_private *file_priv = file->driver_priv;
2430 spin_lock(&file_priv->mm.lock);
2431 request->file_priv = file_priv;
2432 list_add_tail(&request->client_list,
2433 &file_priv->mm.request_list);
2434 spin_unlock(&file_priv->mm.lock);
2437 trace_i915_gem_request_add(ring, request->seqno);
2438 ring->outstanding_lazy_seqno = 0;
2439 ring->preallocated_lazy_request = NULL;
2441 if (!dev_priv->ums.mm_suspended) {
2442 i915_queue_hangcheck(ring->dev);
2444 cancel_delayed_work_sync(&dev_priv->mm.idle_work);
2445 queue_delayed_work(dev_priv->wq,
2446 &dev_priv->mm.retire_work,
2447 round_jiffies_up_relative(HZ));
2448 intel_mark_busy(dev_priv->dev);
2452 *out_seqno = request->seqno;
2457 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
2459 struct drm_i915_file_private *file_priv = request->file_priv;
2464 spin_lock(&file_priv->mm.lock);
2465 list_del(&request->client_list);
2466 request->file_priv = NULL;
2467 spin_unlock(&file_priv->mm.lock);
2470 static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
2471 const struct intel_context *ctx)
2473 unsigned long elapsed;
2475 elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
2477 if (ctx->hang_stats.banned)
2480 if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
2481 if (!i915_gem_context_is_default(ctx)) {
2482 DRM_DEBUG("context hanging too fast, banning!\n");
2484 } else if (i915_stop_ring_allow_ban(dev_priv)) {
2485 if (i915_stop_ring_allow_warn(dev_priv))
2486 DRM_ERROR("gpu hanging too fast, banning!\n");
2494 static void i915_set_reset_status(struct drm_i915_private *dev_priv,
2495 struct intel_context *ctx,
2498 struct i915_ctx_hang_stats *hs;
2503 hs = &ctx->hang_stats;
2506 hs->banned = i915_context_is_banned(dev_priv, ctx);
2508 hs->guilty_ts = get_seconds();
2510 hs->batch_pending++;
2514 static void i915_gem_free_request(struct drm_i915_gem_request *request)
2516 list_del(&request->list);
2517 i915_gem_request_remove_from_client(request);
2520 i915_gem_context_unreference(request->ctx);
2525 struct drm_i915_gem_request *
2526 i915_gem_find_active_request(struct intel_engine_cs *ring)
2528 struct drm_i915_gem_request *request;
2529 u32 completed_seqno;
2531 completed_seqno = ring->get_seqno(ring, false);
2533 list_for_each_entry(request, &ring->request_list, list) {
2534 if (i915_seqno_passed(completed_seqno, request->seqno))
2543 static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
2544 struct intel_engine_cs *ring)
2546 struct drm_i915_gem_request *request;
2549 request = i915_gem_find_active_request(ring);
2551 if (request == NULL)
2554 ring_hung = ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
2556 i915_set_reset_status(dev_priv, request->ctx, ring_hung);
2558 list_for_each_entry_continue(request, &ring->request_list, list)
2559 i915_set_reset_status(dev_priv, request->ctx, false);
2562 static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
2563 struct intel_engine_cs *ring)
2565 while (!list_empty(&ring->active_list)) {
2566 struct drm_i915_gem_object *obj;
2568 obj = list_first_entry(&ring->active_list,
2569 struct drm_i915_gem_object,
2572 i915_gem_object_move_to_inactive(obj);
2576 * We must free the requests after all the corresponding objects have
2577 * been moved off active lists. Which is the same order as the normal
2578 * retire_requests function does. This is important if object hold
2579 * implicit references on things like e.g. ppgtt address spaces through
2582 while (!list_empty(&ring->request_list)) {
2583 struct drm_i915_gem_request *request;
2585 request = list_first_entry(&ring->request_list,
2586 struct drm_i915_gem_request,
2589 i915_gem_free_request(request);
2592 while (!list_empty(&ring->execlist_queue)) {
2593 struct intel_ctx_submit_request *submit_req;
2595 submit_req = list_first_entry(&ring->execlist_queue,
2596 struct intel_ctx_submit_request,
2598 list_del(&submit_req->execlist_link);
2599 intel_runtime_pm_put(dev_priv);
2600 i915_gem_context_unreference(submit_req->ctx);
2604 /* These may not have been flush before the reset, do so now */
2605 kfree(ring->preallocated_lazy_request);
2606 ring->preallocated_lazy_request = NULL;
2607 ring->outstanding_lazy_seqno = 0;
2610 void i915_gem_restore_fences(struct drm_device *dev)
2612 struct drm_i915_private *dev_priv = dev->dev_private;
2615 for (i = 0; i < dev_priv->num_fence_regs; i++) {
2616 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
2619 * Commit delayed tiling changes if we have an object still
2620 * attached to the fence, otherwise just clear the fence.
2623 i915_gem_object_update_fence(reg->obj, reg,
2624 reg->obj->tiling_mode);
2626 i915_gem_write_fence(dev, i, NULL);
2631 void i915_gem_reset(struct drm_device *dev)
2633 struct drm_i915_private *dev_priv = dev->dev_private;
2634 struct intel_engine_cs *ring;
2638 * Before we free the objects from the requests, we need to inspect
2639 * them for finding the guilty party. As the requests only borrow
2640 * their reference to the objects, the inspection must be done first.
2642 for_each_ring(ring, dev_priv, i)
2643 i915_gem_reset_ring_status(dev_priv, ring);
2645 for_each_ring(ring, dev_priv, i)
2646 i915_gem_reset_ring_cleanup(dev_priv, ring);
2648 i915_gem_context_reset(dev);
2650 i915_gem_restore_fences(dev);
2654 * This function clears the request list as sequence numbers are passed.
2657 i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
2661 if (list_empty(&ring->request_list))
2664 WARN_ON(i915_verify_lists(ring->dev));
2666 seqno = ring->get_seqno(ring, true);
2668 /* Move any buffers on the active list that are no longer referenced
2669 * by the ringbuffer to the flushing/inactive lists as appropriate,
2670 * before we free the context associated with the requests.
2672 while (!list_empty(&ring->active_list)) {
2673 struct drm_i915_gem_object *obj;
2675 obj = list_first_entry(&ring->active_list,
2676 struct drm_i915_gem_object,
2679 if (!i915_seqno_passed(seqno, obj->last_read_seqno))
2682 i915_gem_object_move_to_inactive(obj);
2686 while (!list_empty(&ring->request_list)) {
2687 struct drm_i915_gem_request *request;
2688 struct intel_ringbuffer *ringbuf;
2690 request = list_first_entry(&ring->request_list,
2691 struct drm_i915_gem_request,
2694 if (!i915_seqno_passed(seqno, request->seqno))
2697 trace_i915_gem_request_retire(ring, request->seqno);
2699 /* This is one of the few common intersection points
2700 * between legacy ringbuffer submission and execlists:
2701 * we need to tell them apart in order to find the correct
2702 * ringbuffer to which the request belongs to.
2704 if (i915.enable_execlists) {
2705 struct intel_context *ctx = request->ctx;
2706 ringbuf = ctx->engine[ring->id].ringbuf;
2708 ringbuf = ring->buffer;
2710 /* We know the GPU must have read the request to have
2711 * sent us the seqno + interrupt, so use the position
2712 * of tail of the request to update the last known position
2715 ringbuf->last_retired_head = request->tail;
2717 i915_gem_free_request(request);
2720 if (unlikely(ring->trace_irq_seqno &&
2721 i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
2722 ring->irq_put(ring);
2723 ring->trace_irq_seqno = 0;
2726 WARN_ON(i915_verify_lists(ring->dev));
2730 i915_gem_retire_requests(struct drm_device *dev)
2732 struct drm_i915_private *dev_priv = dev->dev_private;
2733 struct intel_engine_cs *ring;
2737 for_each_ring(ring, dev_priv, i) {
2738 i915_gem_retire_requests_ring(ring);
2739 idle &= list_empty(&ring->request_list);
2743 mod_delayed_work(dev_priv->wq,
2744 &dev_priv->mm.idle_work,
2745 msecs_to_jiffies(100));
2751 i915_gem_retire_work_handler(struct work_struct *work)
2753 struct drm_i915_private *dev_priv =
2754 container_of(work, typeof(*dev_priv), mm.retire_work.work);
2755 struct drm_device *dev = dev_priv->dev;
2758 /* Come back later if the device is busy... */
2760 if (mutex_trylock(&dev->struct_mutex)) {
2761 idle = i915_gem_retire_requests(dev);
2762 mutex_unlock(&dev->struct_mutex);
2765 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2766 round_jiffies_up_relative(HZ));
2770 i915_gem_idle_work_handler(struct work_struct *work)
2772 struct drm_i915_private *dev_priv =
2773 container_of(work, typeof(*dev_priv), mm.idle_work.work);
2775 intel_mark_idle(dev_priv->dev);
2779 * Ensures that an object will eventually get non-busy by flushing any required
2780 * write domains, emitting any outstanding lazy request and retiring and
2781 * completed requests.
2784 i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
2789 ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
2793 i915_gem_retire_requests_ring(obj->ring);
2800 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2801 * @DRM_IOCTL_ARGS: standard ioctl arguments
2803 * Returns 0 if successful, else an error is returned with the remaining time in
2804 * the timeout parameter.
2805 * -ETIME: object is still busy after timeout
2806 * -ERESTARTSYS: signal interrupted the wait
2807 * -ENONENT: object doesn't exist
2808 * Also possible, but rare:
2809 * -EAGAIN: GPU wedged
2811 * -ENODEV: Internal IRQ fail
2812 * -E?: The add request failed
2814 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2815 * non-zero timeout parameter the wait ioctl will wait for the given number of
2816 * nanoseconds on an object becoming unbusy. Since the wait itself does so
2817 * without holding struct_mutex the object may become re-busied before this
2818 * function completes. A similar but shorter * race condition exists in the busy
2822 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2824 struct drm_i915_private *dev_priv = dev->dev_private;
2825 struct drm_i915_gem_wait *args = data;
2826 struct drm_i915_gem_object *obj;
2827 struct intel_engine_cs *ring = NULL;
2828 unsigned reset_counter;
2832 if (args->flags != 0)
2835 ret = i915_mutex_lock_interruptible(dev);
2839 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
2840 if (&obj->base == NULL) {
2841 mutex_unlock(&dev->struct_mutex);
2845 /* Need to make sure the object gets inactive eventually. */
2846 ret = i915_gem_object_flush_active(obj);
2851 seqno = obj->last_read_seqno;
2858 /* Do this after OLR check to make sure we make forward progress polling
2859 * on this IOCTL with a timeout <=0 (like busy ioctl)
2861 if (args->timeout_ns <= 0) {
2866 drm_gem_object_unreference(&obj->base);
2867 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
2868 mutex_unlock(&dev->struct_mutex);
2870 return __i915_wait_seqno(ring, seqno, reset_counter, true,
2871 &args->timeout_ns, file->driver_priv);
2874 drm_gem_object_unreference(&obj->base);
2875 mutex_unlock(&dev->struct_mutex);
2880 * i915_gem_object_sync - sync an object to a ring.
2882 * @obj: object which may be in use on another ring.
2883 * @to: ring we wish to use the object on. May be NULL.
2885 * This code is meant to abstract object synchronization with the GPU.
2886 * Calling with NULL implies synchronizing the object with the CPU
2887 * rather than a particular GPU ring.
2889 * Returns 0 if successful, else propagates up the lower layer error.
2892 i915_gem_object_sync(struct drm_i915_gem_object *obj,
2893 struct intel_engine_cs *to)
2895 struct intel_engine_cs *from = obj->ring;
2899 if (from == NULL || to == from)
2902 if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
2903 return i915_gem_object_wait_rendering(obj, false);
2905 idx = intel_ring_sync_index(from, to);
2907 seqno = obj->last_read_seqno;
2908 /* Optimization: Avoid semaphore sync when we are sure we already
2909 * waited for an object with higher seqno */
2910 if (seqno <= from->semaphore.sync_seqno[idx])
2913 ret = i915_gem_check_olr(obj->ring, seqno);
2917 trace_i915_gem_ring_sync_to(from, to, seqno);
2918 ret = to->semaphore.sync_to(to, from, seqno);
2920 /* We use last_read_seqno because sync_to()
2921 * might have just caused seqno wrap under
2924 from->semaphore.sync_seqno[idx] = obj->last_read_seqno;
2929 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2931 u32 old_write_domain, old_read_domains;
2933 /* Force a pagefault for domain tracking on next user access */
2934 i915_gem_release_mmap(obj);
2936 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2939 /* Wait for any direct GTT access to complete */
2942 old_read_domains = obj->base.read_domains;
2943 old_write_domain = obj->base.write_domain;
2945 obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2946 obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2948 trace_i915_gem_object_change_domain(obj,
2953 int i915_vma_unbind(struct i915_vma *vma)
2955 struct drm_i915_gem_object *obj = vma->obj;
2956 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2959 if (list_empty(&vma->vma_link))
2962 if (!drm_mm_node_allocated(&vma->node)) {
2963 i915_gem_vma_destroy(vma);
2970 BUG_ON(obj->pages == NULL);
2972 ret = i915_gem_object_finish_gpu(obj);
2975 /* Continue on if we fail due to EIO, the GPU is hung so we
2976 * should be safe and we need to cleanup or else we might
2977 * cause memory corruption through use-after-free.
2980 /* Throw away the active reference before moving to the unbound list */
2981 i915_gem_object_retire(obj);
2983 if (i915_is_ggtt(vma->vm)) {
2984 i915_gem_object_finish_gtt(obj);
2986 /* release the fence reg _after_ flushing */
2987 ret = i915_gem_object_put_fence(obj);
2992 trace_i915_vma_unbind(vma);
2994 vma->unbind_vma(vma);
2996 list_del_init(&vma->mm_list);
2997 if (i915_is_ggtt(vma->vm))
2998 obj->map_and_fenceable = false;
3000 drm_mm_remove_node(&vma->node);
3001 i915_gem_vma_destroy(vma);
3003 /* Since the unbound list is global, only move to that list if
3004 * no more VMAs exist. */
3005 if (list_empty(&obj->vma_list)) {
3006 i915_gem_gtt_finish_object(obj);
3007 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
3010 /* And finally now the object is completely decoupled from this vma,
3011 * we can drop its hold on the backing storage and allow it to be
3012 * reaped by the shrinker.
3014 i915_gem_object_unpin_pages(obj);
3019 int i915_gpu_idle(struct drm_device *dev)
3021 struct drm_i915_private *dev_priv = dev->dev_private;
3022 struct intel_engine_cs *ring;
3025 /* Flush everything onto the inactive list. */
3026 for_each_ring(ring, dev_priv, i) {
3027 if (!i915.enable_execlists) {
3028 ret = i915_switch_context(ring, ring->default_context);
3033 ret = intel_ring_idle(ring);
3041 static void i965_write_fence_reg(struct drm_device *dev, int reg,
3042 struct drm_i915_gem_object *obj)
3044 struct drm_i915_private *dev_priv = dev->dev_private;
3046 int fence_pitch_shift;
3048 if (INTEL_INFO(dev)->gen >= 6) {
3049 fence_reg = FENCE_REG_SANDYBRIDGE_0;
3050 fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
3052 fence_reg = FENCE_REG_965_0;
3053 fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
3056 fence_reg += reg * 8;
3058 /* To w/a incoherency with non-atomic 64-bit register updates,
3059 * we split the 64-bit update into two 32-bit writes. In order
3060 * for a partial fence not to be evaluated between writes, we
3061 * precede the update with write to turn off the fence register,
3062 * and only enable the fence as the last step.
3064 * For extra levels of paranoia, we make sure each step lands
3065 * before applying the next step.
3067 I915_WRITE(fence_reg, 0);
3068 POSTING_READ(fence_reg);
3071 u32 size = i915_gem_obj_ggtt_size(obj);
3074 val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
3076 val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
3077 val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
3078 if (obj->tiling_mode == I915_TILING_Y)
3079 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
3080 val |= I965_FENCE_REG_VALID;
3082 I915_WRITE(fence_reg + 4, val >> 32);
3083 POSTING_READ(fence_reg + 4);
3085 I915_WRITE(fence_reg + 0, val);
3086 POSTING_READ(fence_reg);
3088 I915_WRITE(fence_reg + 4, 0);
3089 POSTING_READ(fence_reg + 4);
3093 static void i915_write_fence_reg(struct drm_device *dev, int reg,
3094 struct drm_i915_gem_object *obj)
3096 struct drm_i915_private *dev_priv = dev->dev_private;
3100 u32 size = i915_gem_obj_ggtt_size(obj);
3104 WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
3105 (size & -size) != size ||
3106 (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
3107 "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
3108 i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
3110 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
3115 /* Note: pitch better be a power of two tile widths */
3116 pitch_val = obj->stride / tile_width;
3117 pitch_val = ffs(pitch_val) - 1;
3119 val = i915_gem_obj_ggtt_offset(obj);
3120 if (obj->tiling_mode == I915_TILING_Y)
3121 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
3122 val |= I915_FENCE_SIZE_BITS(size);
3123 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
3124 val |= I830_FENCE_REG_VALID;
3129 reg = FENCE_REG_830_0 + reg * 4;
3131 reg = FENCE_REG_945_8 + (reg - 8) * 4;
3133 I915_WRITE(reg, val);
3137 static void i830_write_fence_reg(struct drm_device *dev, int reg,
3138 struct drm_i915_gem_object *obj)
3140 struct drm_i915_private *dev_priv = dev->dev_private;
3144 u32 size = i915_gem_obj_ggtt_size(obj);
3147 WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
3148 (size & -size) != size ||
3149 (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
3150 "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
3151 i915_gem_obj_ggtt_offset(obj), size);
3153 pitch_val = obj->stride / 128;
3154 pitch_val = ffs(pitch_val) - 1;
3156 val = i915_gem_obj_ggtt_offset(obj);
3157 if (obj->tiling_mode == I915_TILING_Y)
3158 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
3159 val |= I830_FENCE_SIZE_BITS(size);
3160 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
3161 val |= I830_FENCE_REG_VALID;
3165 I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
3166 POSTING_READ(FENCE_REG_830_0 + reg * 4);
3169 inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
3171 return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
3174 static void i915_gem_write_fence(struct drm_device *dev, int reg,
3175 struct drm_i915_gem_object *obj)
3177 struct drm_i915_private *dev_priv = dev->dev_private;
3179 /* Ensure that all CPU reads are completed before installing a fence
3180 * and all writes before removing the fence.
3182 if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
3185 WARN(obj && (!obj->stride || !obj->tiling_mode),
3186 "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
3187 obj->stride, obj->tiling_mode);
3189 switch (INTEL_INFO(dev)->gen) {
3195 case 4: i965_write_fence_reg(dev, reg, obj); break;
3196 case 3: i915_write_fence_reg(dev, reg, obj); break;
3197 case 2: i830_write_fence_reg(dev, reg, obj); break;
3201 /* And similarly be paranoid that no direct access to this region
3202 * is reordered to before the fence is installed.
3204 if (i915_gem_object_needs_mb(obj))
3208 static inline int fence_number(struct drm_i915_private *dev_priv,
3209 struct drm_i915_fence_reg *fence)
3211 return fence - dev_priv->fence_regs;
3214 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
3215 struct drm_i915_fence_reg *fence,
3218 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3219 int reg = fence_number(dev_priv, fence);
3221 i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
3224 obj->fence_reg = reg;
3226 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
3228 obj->fence_reg = I915_FENCE_REG_NONE;
3230 list_del_init(&fence->lru_list);
3232 obj->fence_dirty = false;
3236 i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
3238 if (obj->last_fenced_seqno) {
3239 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
3243 obj->last_fenced_seqno = 0;
3250 i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
3252 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3253 struct drm_i915_fence_reg *fence;
3256 ret = i915_gem_object_wait_fence(obj);
3260 if (obj->fence_reg == I915_FENCE_REG_NONE)
3263 fence = &dev_priv->fence_regs[obj->fence_reg];
3265 if (WARN_ON(fence->pin_count))
3268 i915_gem_object_fence_lost(obj);
3269 i915_gem_object_update_fence(obj, fence, false);
3274 static struct drm_i915_fence_reg *
3275 i915_find_fence_reg(struct drm_device *dev)
3277 struct drm_i915_private *dev_priv = dev->dev_private;
3278 struct drm_i915_fence_reg *reg, *avail;
3281 /* First try to find a free reg */
3283 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
3284 reg = &dev_priv->fence_regs[i];
3288 if (!reg->pin_count)
3295 /* None available, try to steal one or wait for a user to finish */
3296 list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
3304 /* Wait for completion of pending flips which consume fences */
3305 if (intel_has_pending_fb_unpin(dev))
3306 return ERR_PTR(-EAGAIN);
3308 return ERR_PTR(-EDEADLK);
3312 * i915_gem_object_get_fence - set up fencing for an object
3313 * @obj: object to map through a fence reg
3315 * When mapping objects through the GTT, userspace wants to be able to write
3316 * to them without having to worry about swizzling if the object is tiled.
3317 * This function walks the fence regs looking for a free one for @obj,
3318 * stealing one if it can't find any.
3320 * It then sets up the reg based on the object's properties: address, pitch
3321 * and tiling format.
3323 * For an untiled surface, this removes any existing fence.
3326 i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
3328 struct drm_device *dev = obj->base.dev;
3329 struct drm_i915_private *dev_priv = dev->dev_private;
3330 bool enable = obj->tiling_mode != I915_TILING_NONE;
3331 struct drm_i915_fence_reg *reg;
3334 /* Have we updated the tiling parameters upon the object and so
3335 * will need to serialise the write to the associated fence register?
3337 if (obj->fence_dirty) {
3338 ret = i915_gem_object_wait_fence(obj);
3343 /* Just update our place in the LRU if our fence is getting reused. */
3344 if (obj->fence_reg != I915_FENCE_REG_NONE) {
3345 reg = &dev_priv->fence_regs[obj->fence_reg];
3346 if (!obj->fence_dirty) {
3347 list_move_tail(®->lru_list,
3348 &dev_priv->mm.fence_list);
3351 } else if (enable) {
3352 if (WARN_ON(!obj->map_and_fenceable))
3355 reg = i915_find_fence_reg(dev);
3357 return PTR_ERR(reg);
3360 struct drm_i915_gem_object *old = reg->obj;
3362 ret = i915_gem_object_wait_fence(old);
3366 i915_gem_object_fence_lost(old);
3371 i915_gem_object_update_fence(obj, reg, enable);
3376 static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
3377 unsigned long cache_level)
3379 struct drm_mm_node *gtt_space = &vma->node;
3380 struct drm_mm_node *other;
3383 * On some machines we have to be careful when putting differing types
3384 * of snoopable memory together to avoid the prefetcher crossing memory
3385 * domains and dying. During vm initialisation, we decide whether or not
3386 * these constraints apply and set the drm_mm.color_adjust
3389 if (vma->vm->mm.color_adjust == NULL)
3392 if (!drm_mm_node_allocated(gtt_space))
3395 if (list_empty(>t_space->node_list))
3398 other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
3399 if (other->allocated && !other->hole_follows && other->color != cache_level)
3402 other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
3403 if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
3410 * Finds free space in the GTT aperture and binds the object there.
3412 static struct i915_vma *
3413 i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3414 struct i915_address_space *vm,
3418 struct drm_device *dev = obj->base.dev;
3419 struct drm_i915_private *dev_priv = dev->dev_private;
3420 u32 size, fence_size, fence_alignment, unfenced_alignment;
3421 unsigned long start =
3422 flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
3424 flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
3425 struct i915_vma *vma;
3428 fence_size = i915_gem_get_gtt_size(dev,
3431 fence_alignment = i915_gem_get_gtt_alignment(dev,
3433 obj->tiling_mode, true);
3434 unfenced_alignment =
3435 i915_gem_get_gtt_alignment(dev,
3437 obj->tiling_mode, false);
3440 alignment = flags & PIN_MAPPABLE ? fence_alignment :
3442 if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
3443 DRM_DEBUG("Invalid object alignment requested %u\n", alignment);
3444 return ERR_PTR(-EINVAL);
3447 size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
3449 /* If the object is bigger than the entire aperture, reject it early
3450 * before evicting everything in a vain attempt to find space.
3452 if (obj->base.size > end) {
3453 DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%lu\n",
3455 flags & PIN_MAPPABLE ? "mappable" : "total",
3457 return ERR_PTR(-E2BIG);
3460 ret = i915_gem_object_get_pages(obj);
3462 return ERR_PTR(ret);
3464 i915_gem_object_pin_pages(obj);
3466 vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
3471 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
3475 DRM_MM_SEARCH_DEFAULT,
3476 DRM_MM_CREATE_DEFAULT);
3478 ret = i915_gem_evict_something(dev, vm, size, alignment,
3487 if (WARN_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level))) {
3489 goto err_remove_node;
3492 ret = i915_gem_gtt_prepare_object(obj);
3494 goto err_remove_node;
3496 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
3497 list_add_tail(&vma->mm_list, &vm->inactive_list);
3499 trace_i915_vma_bind(vma, flags);
3500 vma->bind_vma(vma, obj->cache_level,
3501 flags & PIN_GLOBAL ? GLOBAL_BIND : 0);
3506 drm_mm_remove_node(&vma->node);
3508 i915_gem_vma_destroy(vma);
3511 i915_gem_object_unpin_pages(obj);
3516 i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3519 /* If we don't have a page list set up, then we're not pinned
3520 * to GPU, and we can ignore the cache flush because it'll happen
3521 * again at bind time.
3523 if (obj->pages == NULL)
3527 * Stolen memory is always coherent with the GPU as it is explicitly
3528 * marked as wc by the system, or the system is cache-coherent.
3533 /* If the GPU is snooping the contents of the CPU cache,
3534 * we do not need to manually clear the CPU cache lines. However,
3535 * the caches are only snooped when the render cache is
3536 * flushed/invalidated. As we always have to emit invalidations
3537 * and flushes when moving into and out of the RENDER domain, correct
3538 * snooping behaviour occurs naturally as the result of our domain
3541 if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
3544 trace_i915_gem_object_clflush(obj);
3545 drm_clflush_sg(obj->pages);
3550 /** Flushes the GTT write domain for the object if it's dirty. */
3552 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3554 uint32_t old_write_domain;
3556 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
3559 /* No actual flushing is required for the GTT write domain. Writes
3560 * to it immediately go to main memory as far as we know, so there's
3561 * no chipset flush. It also doesn't land in render cache.
3563 * However, we do have to enforce the order so that all writes through
3564 * the GTT land before any writes to the device, such as updates to
3569 old_write_domain = obj->base.write_domain;
3570 obj->base.write_domain = 0;
3572 intel_fb_obj_flush(obj, false);
3574 trace_i915_gem_object_change_domain(obj,
3575 obj->base.read_domains,
3579 /** Flushes the CPU write domain for the object if it's dirty. */
3581 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
3584 uint32_t old_write_domain;
3586 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3589 if (i915_gem_clflush_object(obj, force))
3590 i915_gem_chipset_flush(obj->base.dev);
3592 old_write_domain = obj->base.write_domain;
3593 obj->base.write_domain = 0;
3595 intel_fb_obj_flush(obj, false);
3597 trace_i915_gem_object_change_domain(obj,
3598 obj->base.read_domains,
3603 * Moves a single object to the GTT read, and possibly write domain.
3605 * This function returns when the move is complete, including waiting on
3609 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3611 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3612 struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
3613 uint32_t old_write_domain, old_read_domains;
3616 /* Not valid to be called on unbound objects. */
3620 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3623 ret = i915_gem_object_wait_rendering(obj, !write);
3627 i915_gem_object_retire(obj);
3628 i915_gem_object_flush_cpu_write_domain(obj, false);
3630 /* Serialise direct access to this object with the barriers for
3631 * coherent writes from the GPU, by effectively invalidating the
3632 * GTT domain upon first access.
3634 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3637 old_write_domain = obj->base.write_domain;
3638 old_read_domains = obj->base.read_domains;
3640 /* It should now be out of any other write domains, and we can update
3641 * the domain values for our changes.
3643 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3644 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3646 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3647 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3652 intel_fb_obj_invalidate(obj, NULL);
3654 trace_i915_gem_object_change_domain(obj,
3658 /* And bump the LRU for this access */
3659 if (i915_gem_object_is_inactive(obj))
3660 list_move_tail(&vma->mm_list,
3661 &dev_priv->gtt.base.inactive_list);
3666 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3667 enum i915_cache_level cache_level)
3669 struct drm_device *dev = obj->base.dev;
3670 struct i915_vma *vma, *next;
3673 if (obj->cache_level == cache_level)
3676 if (i915_gem_obj_is_pinned(obj)) {
3677 DRM_DEBUG("can not change the cache level of pinned objects\n");
3681 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
3682 if (!i915_gem_valid_gtt_space(vma, cache_level)) {
3683 ret = i915_vma_unbind(vma);
3689 if (i915_gem_obj_bound_any(obj)) {
3690 ret = i915_gem_object_finish_gpu(obj);
3694 i915_gem_object_finish_gtt(obj);
3696 /* Before SandyBridge, you could not use tiling or fence
3697 * registers with snooped memory, so relinquish any fences
3698 * currently pointing to our region in the aperture.
3700 if (INTEL_INFO(dev)->gen < 6) {
3701 ret = i915_gem_object_put_fence(obj);
3706 list_for_each_entry(vma, &obj->vma_list, vma_link)
3707 if (drm_mm_node_allocated(&vma->node))
3708 vma->bind_vma(vma, cache_level,
3709 vma->bound & GLOBAL_BIND);
3712 list_for_each_entry(vma, &obj->vma_list, vma_link)
3713 vma->node.color = cache_level;
3714 obj->cache_level = cache_level;
3716 if (cpu_write_needs_clflush(obj)) {
3717 u32 old_read_domains, old_write_domain;
3719 /* If we're coming from LLC cached, then we haven't
3720 * actually been tracking whether the data is in the
3721 * CPU cache or not, since we only allow one bit set
3722 * in obj->write_domain and have been skipping the clflushes.
3723 * Just set it to the CPU cache for now.
3725 i915_gem_object_retire(obj);
3726 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
3728 old_read_domains = obj->base.read_domains;
3729 old_write_domain = obj->base.write_domain;
3731 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3732 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3734 trace_i915_gem_object_change_domain(obj,
3742 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3743 struct drm_file *file)
3745 struct drm_i915_gem_caching *args = data;
3746 struct drm_i915_gem_object *obj;
3749 ret = i915_mutex_lock_interruptible(dev);
3753 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3754 if (&obj->base == NULL) {
3759 switch (obj->cache_level) {
3760 case I915_CACHE_LLC:
3761 case I915_CACHE_L3_LLC:
3762 args->caching = I915_CACHING_CACHED;
3766 args->caching = I915_CACHING_DISPLAY;
3770 args->caching = I915_CACHING_NONE;
3774 drm_gem_object_unreference(&obj->base);
3776 mutex_unlock(&dev->struct_mutex);
3780 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3781 struct drm_file *file)
3783 struct drm_i915_gem_caching *args = data;
3784 struct drm_i915_gem_object *obj;
3785 enum i915_cache_level level;
3788 switch (args->caching) {
3789 case I915_CACHING_NONE:
3790 level = I915_CACHE_NONE;
3792 case I915_CACHING_CACHED:
3793 level = I915_CACHE_LLC;
3795 case I915_CACHING_DISPLAY:
3796 level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
3802 ret = i915_mutex_lock_interruptible(dev);
3806 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3807 if (&obj->base == NULL) {
3812 ret = i915_gem_object_set_cache_level(obj, level);
3814 drm_gem_object_unreference(&obj->base);
3816 mutex_unlock(&dev->struct_mutex);
3820 static bool is_pin_display(struct drm_i915_gem_object *obj)
3822 struct i915_vma *vma;
3824 vma = i915_gem_obj_to_ggtt(obj);
3828 /* There are 3 sources that pin objects:
3829 * 1. The display engine (scanouts, sprites, cursors);
3830 * 2. Reservations for execbuffer;
3833 * We can ignore reservations as we hold the struct_mutex and
3834 * are only called outside of the reservation path. The user
3835 * can only increment pin_count once, and so if after
3836 * subtracting the potential reference by the user, any pin_count
3837 * remains, it must be due to another use by the display engine.
3839 return vma->pin_count - !!obj->user_pin_count;
3843 * Prepare buffer for display plane (scanout, cursors, etc).
3844 * Can be called from an uninterruptible phase (modesetting) and allows
3845 * any flushes to be pipelined (for pageflips).
3848 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3850 struct intel_engine_cs *pipelined)
3852 u32 old_read_domains, old_write_domain;
3853 bool was_pin_display;
3856 if (pipelined != obj->ring) {
3857 ret = i915_gem_object_sync(obj, pipelined);
3862 /* Mark the pin_display early so that we account for the
3863 * display coherency whilst setting up the cache domains.
3865 was_pin_display = obj->pin_display;
3866 obj->pin_display = true;
3868 /* The display engine is not coherent with the LLC cache on gen6. As
3869 * a result, we make sure that the pinning that is about to occur is
3870 * done with uncached PTEs. This is lowest common denominator for all
3873 * However for gen6+, we could do better by using the GFDT bit instead
3874 * of uncaching, which would allow us to flush all the LLC-cached data
3875 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3877 ret = i915_gem_object_set_cache_level(obj,
3878 HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
3880 goto err_unpin_display;
3882 /* As the user may map the buffer once pinned in the display plane
3883 * (e.g. libkms for the bootup splash), we have to ensure that we
3884 * always use map_and_fenceable for all scanout buffers.
3886 ret = i915_gem_obj_ggtt_pin(obj, alignment, PIN_MAPPABLE);
3888 goto err_unpin_display;
3890 i915_gem_object_flush_cpu_write_domain(obj, true);
3892 old_write_domain = obj->base.write_domain;
3893 old_read_domains = obj->base.read_domains;
3895 /* It should now be out of any other write domains, and we can update
3896 * the domain values for our changes.
3898 obj->base.write_domain = 0;
3899 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3901 trace_i915_gem_object_change_domain(obj,
3908 WARN_ON(was_pin_display != is_pin_display(obj));
3909 obj->pin_display = was_pin_display;
3914 i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
3916 i915_gem_object_ggtt_unpin(obj);
3917 obj->pin_display = is_pin_display(obj);
3921 i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
3925 if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
3928 ret = i915_gem_object_wait_rendering(obj, false);
3932 /* Ensure that we invalidate the GPU's caches and TLBs. */
3933 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
3938 * Moves a single object to the CPU read, and possibly write domain.
3940 * This function returns when the move is complete, including waiting on
3944 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3946 uint32_t old_write_domain, old_read_domains;
3949 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3952 ret = i915_gem_object_wait_rendering(obj, !write);
3956 i915_gem_object_retire(obj);
3957 i915_gem_object_flush_gtt_write_domain(obj);
3959 old_write_domain = obj->base.write_domain;
3960 old_read_domains = obj->base.read_domains;
3962 /* Flush the CPU cache if it's still invalid. */
3963 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3964 i915_gem_clflush_object(obj, false);
3966 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3969 /* It should now be out of any other write domains, and we can update
3970 * the domain values for our changes.
3972 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3974 /* If we're writing through the CPU, then the GPU read domains will
3975 * need to be invalidated at next use.
3978 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3979 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3983 intel_fb_obj_invalidate(obj, NULL);
3985 trace_i915_gem_object_change_domain(obj,
3992 /* Throttle our rendering by waiting until the ring has completed our requests
3993 * emitted over 20 msec ago.
3995 * Note that if we were to use the current jiffies each time around the loop,
3996 * we wouldn't escape the function with any frames outstanding if the time to
3997 * render a frame was over 20ms.
3999 * This should get us reasonable parallelism between CPU and GPU but also
4000 * relatively low latency when blocking on a particular request to finish.
4003 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
4005 struct drm_i915_private *dev_priv = dev->dev_private;
4006 struct drm_i915_file_private *file_priv = file->driver_priv;
4007 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
4008 struct drm_i915_gem_request *request;
4009 struct intel_engine_cs *ring = NULL;
4010 unsigned reset_counter;
4014 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
4018 ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
4022 spin_lock(&file_priv->mm.lock);
4023 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
4024 if (time_after_eq(request->emitted_jiffies, recent_enough))
4027 ring = request->ring;
4028 seqno = request->seqno;
4030 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
4031 spin_unlock(&file_priv->mm.lock);
4036 ret = __i915_wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
4038 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
4044 i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
4046 struct drm_i915_gem_object *obj = vma->obj;
4049 vma->node.start & (alignment - 1))
4052 if (flags & PIN_MAPPABLE && !obj->map_and_fenceable)
4055 if (flags & PIN_OFFSET_BIAS &&
4056 vma->node.start < (flags & PIN_OFFSET_MASK))
4063 i915_gem_object_pin(struct drm_i915_gem_object *obj,
4064 struct i915_address_space *vm,
4068 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
4069 struct i915_vma *vma;
4073 if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base))
4076 if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
4079 if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE))
4082 vma = i915_gem_obj_to_vma(obj, vm);
4084 if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
4087 if (i915_vma_misplaced(vma, alignment, flags)) {
4088 WARN(vma->pin_count,
4089 "bo is already pinned with incorrect alignment:"
4090 " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
4091 " obj->map_and_fenceable=%d\n",
4092 i915_gem_obj_offset(obj, vm), alignment,
4093 !!(flags & PIN_MAPPABLE),
4094 obj->map_and_fenceable);
4095 ret = i915_vma_unbind(vma);
4103 bound = vma ? vma->bound : 0;
4104 if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
4105 vma = i915_gem_object_bind_to_vm(obj, vm, alignment, flags);
4107 return PTR_ERR(vma);
4110 if (flags & PIN_GLOBAL && !(vma->bound & GLOBAL_BIND))
4111 vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
4113 if ((bound ^ vma->bound) & GLOBAL_BIND) {
4114 bool mappable, fenceable;
4115 u32 fence_size, fence_alignment;
4117 fence_size = i915_gem_get_gtt_size(obj->base.dev,
4120 fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev,
4125 fenceable = (vma->node.size == fence_size &&
4126 (vma->node.start & (fence_alignment - 1)) == 0);
4128 mappable = (vma->node.start + obj->base.size <=
4129 dev_priv->gtt.mappable_end);
4131 obj->map_and_fenceable = mappable && fenceable;
4134 WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
4137 if (flags & PIN_MAPPABLE)
4138 obj->pin_mappable |= true;
4144 i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
4146 struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
4149 BUG_ON(vma->pin_count == 0);
4150 BUG_ON(!i915_gem_obj_ggtt_bound(obj));
4152 if (--vma->pin_count == 0)
4153 obj->pin_mappable = false;
4157 i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
4159 if (obj->fence_reg != I915_FENCE_REG_NONE) {
4160 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
4161 struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);
4163 WARN_ON(!ggtt_vma ||
4164 dev_priv->fence_regs[obj->fence_reg].pin_count >
4165 ggtt_vma->pin_count);
4166 dev_priv->fence_regs[obj->fence_reg].pin_count++;
4173 i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
4175 if (obj->fence_reg != I915_FENCE_REG_NONE) {
4176 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
4177 WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
4178 dev_priv->fence_regs[obj->fence_reg].pin_count--;
4183 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
4184 struct drm_file *file)
4186 struct drm_i915_gem_pin *args = data;
4187 struct drm_i915_gem_object *obj;
4190 if (INTEL_INFO(dev)->gen >= 6)
4193 ret = i915_mutex_lock_interruptible(dev);
4197 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
4198 if (&obj->base == NULL) {
4203 if (obj->madv != I915_MADV_WILLNEED) {
4204 DRM_DEBUG("Attempting to pin a purgeable buffer\n");
4209 if (obj->pin_filp != NULL && obj->pin_filp != file) {
4210 DRM_DEBUG("Already pinned in i915_gem_pin_ioctl(): %d\n",
4216 if (obj->user_pin_count == ULONG_MAX) {
4221 if (obj->user_pin_count == 0) {
4222 ret = i915_gem_obj_ggtt_pin(obj, args->alignment, PIN_MAPPABLE);
4227 obj->user_pin_count++;
4228 obj->pin_filp = file;
4230 args->offset = i915_gem_obj_ggtt_offset(obj);
4232 drm_gem_object_unreference(&obj->base);
4234 mutex_unlock(&dev->struct_mutex);
4239 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
4240 struct drm_file *file)
4242 struct drm_i915_gem_pin *args = data;
4243 struct drm_i915_gem_object *obj;
4246 ret = i915_mutex_lock_interruptible(dev);
4250 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
4251 if (&obj->base == NULL) {
4256 if (obj->pin_filp != file) {
4257 DRM_DEBUG("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
4262 obj->user_pin_count--;
4263 if (obj->user_pin_count == 0) {
4264 obj->pin_filp = NULL;
4265 i915_gem_object_ggtt_unpin(obj);
4269 drm_gem_object_unreference(&obj->base);
4271 mutex_unlock(&dev->struct_mutex);
4276 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4277 struct drm_file *file)
4279 struct drm_i915_gem_busy *args = data;
4280 struct drm_i915_gem_object *obj;
4283 ret = i915_mutex_lock_interruptible(dev);
4287 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
4288 if (&obj->base == NULL) {
4293 /* Count all active objects as busy, even if they are currently not used
4294 * by the gpu. Users of this interface expect objects to eventually
4295 * become non-busy without any further actions, therefore emit any
4296 * necessary flushes here.
4298 ret = i915_gem_object_flush_active(obj);
4300 args->busy = obj->active;
4302 BUILD_BUG_ON(I915_NUM_RINGS > 16);
4303 args->busy |= intel_ring_flag(obj->ring) << 16;
4306 drm_gem_object_unreference(&obj->base);
4308 mutex_unlock(&dev->struct_mutex);
4313 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4314 struct drm_file *file_priv)
4316 return i915_gem_ring_throttle(dev, file_priv);
4320 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4321 struct drm_file *file_priv)
4323 struct drm_i915_gem_madvise *args = data;
4324 struct drm_i915_gem_object *obj;
4327 switch (args->madv) {
4328 case I915_MADV_DONTNEED:
4329 case I915_MADV_WILLNEED:
4335 ret = i915_mutex_lock_interruptible(dev);
4339 obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
4340 if (&obj->base == NULL) {
4345 if (i915_gem_obj_is_pinned(obj)) {
4350 if (obj->madv != __I915_MADV_PURGED)
4351 obj->madv = args->madv;
4353 /* if the object is no longer attached, discard its backing storage */
4354 if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
4355 i915_gem_object_truncate(obj);
4357 args->retained = obj->madv != __I915_MADV_PURGED;
4360 drm_gem_object_unreference(&obj->base);
4362 mutex_unlock(&dev->struct_mutex);
4366 void i915_gem_object_init(struct drm_i915_gem_object *obj,
4367 const struct drm_i915_gem_object_ops *ops)
4369 INIT_LIST_HEAD(&obj->global_list);
4370 INIT_LIST_HEAD(&obj->ring_list);
4371 INIT_LIST_HEAD(&obj->obj_exec_link);
4372 INIT_LIST_HEAD(&obj->vma_list);
4376 obj->fence_reg = I915_FENCE_REG_NONE;
4377 obj->madv = I915_MADV_WILLNEED;
4379 i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
4382 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4383 .get_pages = i915_gem_object_get_pages_gtt,
4384 .put_pages = i915_gem_object_put_pages_gtt,
4387 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
4390 struct drm_i915_gem_object *obj;
4391 struct address_space *mapping;
4394 obj = i915_gem_object_alloc(dev);
4398 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
4399 i915_gem_object_free(obj);
4403 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4404 if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
4405 /* 965gm cannot relocate objects above 4GiB. */
4406 mask &= ~__GFP_HIGHMEM;
4407 mask |= __GFP_DMA32;
4410 mapping = file_inode(obj->base.filp)->i_mapping;
4411 mapping_set_gfp_mask(mapping, mask);
4413 i915_gem_object_init(obj, &i915_gem_object_ops);
4415 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4416 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4419 /* On some devices, we can have the GPU use the LLC (the CPU
4420 * cache) for about a 10% performance improvement
4421 * compared to uncached. Graphics requests other than
4422 * display scanout are coherent with the CPU in
4423 * accessing this cache. This means in this mode we
4424 * don't need to clflush on the CPU side, and on the
4425 * GPU side we only need to flush internal caches to
4426 * get data visible to the CPU.
4428 * However, we maintain the display planes as UC, and so
4429 * need to rebind when first used as such.
4431 obj->cache_level = I915_CACHE_LLC;
4433 obj->cache_level = I915_CACHE_NONE;
4435 trace_i915_gem_object_create(obj);
4440 static bool discard_backing_storage(struct drm_i915_gem_object *obj)
4442 /* If we are the last user of the backing storage (be it shmemfs
4443 * pages or stolen etc), we know that the pages are going to be
4444 * immediately released. In this case, we can then skip copying
4445 * back the contents from the GPU.
4448 if (obj->madv != I915_MADV_WILLNEED)
4451 if (obj->base.filp == NULL)
4454 /* At first glance, this looks racy, but then again so would be
4455 * userspace racing mmap against close. However, the first external
4456 * reference to the filp can only be obtained through the
4457 * i915_gem_mmap_ioctl() which safeguards us against the user
4458 * acquiring such a reference whilst we are in the middle of
4459 * freeing the object.
4461 return atomic_long_read(&obj->base.filp->f_count) == 1;
4464 void i915_gem_free_object(struct drm_gem_object *gem_obj)
4466 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4467 struct drm_device *dev = obj->base.dev;
4468 struct drm_i915_private *dev_priv = dev->dev_private;
4469 struct i915_vma *vma, *next;
4471 intel_runtime_pm_get(dev_priv);
4473 trace_i915_gem_object_destroy(obj);
4475 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
4479 ret = i915_vma_unbind(vma);
4480 if (WARN_ON(ret == -ERESTARTSYS)) {
4481 bool was_interruptible;
4483 was_interruptible = dev_priv->mm.interruptible;
4484 dev_priv->mm.interruptible = false;
4486 WARN_ON(i915_vma_unbind(vma));
4488 dev_priv->mm.interruptible = was_interruptible;
4492 i915_gem_object_detach_phys(obj);
4494 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4495 * before progressing. */
4497 i915_gem_object_unpin_pages(obj);
4499 WARN_ON(obj->frontbuffer_bits);
4501 if (WARN_ON(obj->pages_pin_count))
4502 obj->pages_pin_count = 0;
4503 if (discard_backing_storage(obj))
4504 obj->madv = I915_MADV_DONTNEED;
4505 i915_gem_object_put_pages(obj);
4506 i915_gem_object_free_mmap_offset(obj);
4510 if (obj->base.import_attach)
4511 drm_prime_gem_destroy(&obj->base, NULL);
4513 if (obj->ops->release)
4514 obj->ops->release(obj);
4516 drm_gem_object_release(&obj->base);
4517 i915_gem_info_remove_obj(dev_priv, obj->base.size);
4520 i915_gem_object_free(obj);
4522 intel_runtime_pm_put(dev_priv);
4525 struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
4526 struct i915_address_space *vm)
4528 struct i915_vma *vma;
4529 list_for_each_entry(vma, &obj->vma_list, vma_link)
4536 void i915_gem_vma_destroy(struct i915_vma *vma)
4538 struct i915_address_space *vm = NULL;
4539 WARN_ON(vma->node.allocated);
4541 /* Keep the vma as a placeholder in the execbuffer reservation lists */
4542 if (!list_empty(&vma->exec_list))
4547 if (!i915_is_ggtt(vm))
4548 i915_ppgtt_put(i915_vm_to_ppgtt(vm));
4550 list_del(&vma->vma_link);
4556 i915_gem_stop_ringbuffers(struct drm_device *dev)
4558 struct drm_i915_private *dev_priv = dev->dev_private;
4559 struct intel_engine_cs *ring;
4562 for_each_ring(ring, dev_priv, i)
4563 dev_priv->gt.stop_ring(ring);
4567 i915_gem_suspend(struct drm_device *dev)
4569 struct drm_i915_private *dev_priv = dev->dev_private;
4572 mutex_lock(&dev->struct_mutex);
4573 if (dev_priv->ums.mm_suspended)
4576 ret = i915_gpu_idle(dev);
4580 i915_gem_retire_requests(dev);
4582 /* Under UMS, be paranoid and evict. */
4583 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4584 i915_gem_evict_everything(dev);
4586 i915_kernel_lost_context(dev);
4587 i915_gem_stop_ringbuffers(dev);
4589 /* Hack! Don't let anybody do execbuf while we don't control the chip.
4590 * We need to replace this with a semaphore, or something.
4591 * And not confound ums.mm_suspended!
4593 dev_priv->ums.mm_suspended = !drm_core_check_feature(dev,
4595 mutex_unlock(&dev->struct_mutex);
4597 del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
4598 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4599 flush_delayed_work(&dev_priv->mm.idle_work);
4604 mutex_unlock(&dev->struct_mutex);
4608 int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice)
4610 struct drm_device *dev = ring->dev;
4611 struct drm_i915_private *dev_priv = dev->dev_private;
4612 u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
4613 u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
4616 if (!HAS_L3_DPF(dev) || !remap_info)
4619 ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3);
4624 * Note: We do not worry about the concurrent register cacheline hang
4625 * here because no other code should access these registers other than
4626 * at initialization time.
4628 for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
4629 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
4630 intel_ring_emit(ring, reg_base + i);
4631 intel_ring_emit(ring, remap_info[i/4]);
4634 intel_ring_advance(ring);
4639 void i915_gem_init_swizzling(struct drm_device *dev)
4641 struct drm_i915_private *dev_priv = dev->dev_private;
4643 if (INTEL_INFO(dev)->gen < 5 ||
4644 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4647 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4648 DISP_TILE_SURFACE_SWIZZLING);
4653 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4655 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4656 else if (IS_GEN7(dev))
4657 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
4658 else if (IS_GEN8(dev))
4659 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
4665 intel_enable_blt(struct drm_device *dev)
4670 /* The blitter was dysfunctional on early prototypes */
4671 if (IS_GEN6(dev) && dev->pdev->revision < 8) {
4672 DRM_INFO("BLT not supported on this pre-production hardware;"
4673 " graphics performance will be degraded.\n");
4680 static void init_unused_ring(struct drm_device *dev, u32 base)
4682 struct drm_i915_private *dev_priv = dev->dev_private;
4684 I915_WRITE(RING_CTL(base), 0);
4685 I915_WRITE(RING_HEAD(base), 0);
4686 I915_WRITE(RING_TAIL(base), 0);
4687 I915_WRITE(RING_START(base), 0);
4690 static void init_unused_rings(struct drm_device *dev)
4693 init_unused_ring(dev, PRB1_BASE);
4694 init_unused_ring(dev, SRB0_BASE);
4695 init_unused_ring(dev, SRB1_BASE);
4696 init_unused_ring(dev, SRB2_BASE);
4697 init_unused_ring(dev, SRB3_BASE);
4698 } else if (IS_GEN2(dev)) {
4699 init_unused_ring(dev, SRB0_BASE);
4700 init_unused_ring(dev, SRB1_BASE);
4701 } else if (IS_GEN3(dev)) {
4702 init_unused_ring(dev, PRB1_BASE);
4703 init_unused_ring(dev, PRB2_BASE);
4707 int i915_gem_init_rings(struct drm_device *dev)
4709 struct drm_i915_private *dev_priv = dev->dev_private;
4713 * At least 830 can leave some of the unused rings
4714 * "active" (ie. head != tail) after resume which
4715 * will prevent c3 entry. Makes sure all unused rings
4718 init_unused_rings(dev);
4720 ret = intel_init_render_ring_buffer(dev);
4725 ret = intel_init_bsd_ring_buffer(dev);
4727 goto cleanup_render_ring;
4730 if (intel_enable_blt(dev)) {
4731 ret = intel_init_blt_ring_buffer(dev);
4733 goto cleanup_bsd_ring;
4736 if (HAS_VEBOX(dev)) {
4737 ret = intel_init_vebox_ring_buffer(dev);
4739 goto cleanup_blt_ring;
4742 if (HAS_BSD2(dev)) {
4743 ret = intel_init_bsd2_ring_buffer(dev);
4745 goto cleanup_vebox_ring;
4748 ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
4750 goto cleanup_bsd2_ring;
4755 intel_cleanup_ring_buffer(&dev_priv->ring[VCS2]);
4757 intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
4759 intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
4761 intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
4762 cleanup_render_ring:
4763 intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
4769 i915_gem_init_hw(struct drm_device *dev)
4771 struct drm_i915_private *dev_priv = dev->dev_private;
4774 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
4777 if (dev_priv->ellc_size)
4778 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4780 if (IS_HASWELL(dev))
4781 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
4782 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
4784 if (HAS_PCH_NOP(dev)) {
4785 if (IS_IVYBRIDGE(dev)) {
4786 u32 temp = I915_READ(GEN7_MSG_CTL);
4787 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4788 I915_WRITE(GEN7_MSG_CTL, temp);
4789 } else if (INTEL_INFO(dev)->gen >= 7) {
4790 u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
4791 temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
4792 I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
4796 i915_gem_init_swizzling(dev);
4798 ret = dev_priv->gt.init_rings(dev);
4802 for (i = 0; i < NUM_L3_SLICES(dev); i++)
4803 i915_gem_l3_remap(&dev_priv->ring[RCS], i);
4806 * XXX: Contexts should only be initialized once. Doing a switch to the
4807 * default context switch however is something we'd like to do after
4808 * reset or thaw (the latter may not actually be necessary for HW, but
4809 * goes with our code better). Context switching requires rings (for
4810 * the do_switch), but before enabling PPGTT. So don't move this.
4812 ret = i915_gem_context_enable(dev_priv);
4813 if (ret && ret != -EIO) {
4814 DRM_ERROR("Context enable failed %d\n", ret);
4815 i915_gem_cleanup_ringbuffer(dev);
4820 ret = i915_ppgtt_init_hw(dev);
4821 if (ret && ret != -EIO) {
4822 DRM_ERROR("PPGTT enable failed %d\n", ret);
4823 i915_gem_cleanup_ringbuffer(dev);
4829 int i915_gem_init(struct drm_device *dev)
4831 struct drm_i915_private *dev_priv = dev->dev_private;
4834 i915.enable_execlists = intel_sanitize_enable_execlists(dev,
4835 i915.enable_execlists);
4837 mutex_lock(&dev->struct_mutex);
4839 if (IS_VALLEYVIEW(dev)) {
4840 /* VLVA0 (potential hack), BIOS isn't actually waking us */
4841 I915_WRITE(VLV_GTLC_WAKE_CTRL, VLV_GTLC_ALLOWWAKEREQ);
4842 if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) &
4843 VLV_GTLC_ALLOWWAKEACK), 10))
4844 DRM_DEBUG_DRIVER("allow wake ack timed out\n");
4847 if (!i915.enable_execlists) {
4848 dev_priv->gt.do_execbuf = i915_gem_ringbuffer_submission;
4849 dev_priv->gt.init_rings = i915_gem_init_rings;
4850 dev_priv->gt.cleanup_ring = intel_cleanup_ring_buffer;
4851 dev_priv->gt.stop_ring = intel_stop_ring_buffer;
4853 dev_priv->gt.do_execbuf = intel_execlists_submission;
4854 dev_priv->gt.init_rings = intel_logical_rings_init;
4855 dev_priv->gt.cleanup_ring = intel_logical_ring_cleanup;
4856 dev_priv->gt.stop_ring = intel_logical_ring_stop;
4859 ret = i915_gem_init_userptr(dev);
4861 mutex_unlock(&dev->struct_mutex);
4865 i915_gem_init_global_gtt(dev);
4867 ret = i915_gem_context_init(dev);
4869 mutex_unlock(&dev->struct_mutex);
4873 ret = i915_gem_init_hw(dev);
4875 /* Allow ring initialisation to fail by marking the GPU as
4876 * wedged. But we only want to do this where the GPU is angry,
4877 * for all other failure, such as an allocation failure, bail.
4879 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
4880 atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
4883 mutex_unlock(&dev->struct_mutex);
4885 /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
4886 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4887 dev_priv->dri1.allow_batchbuffer = 1;
4892 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4894 struct drm_i915_private *dev_priv = dev->dev_private;
4895 struct intel_engine_cs *ring;
4898 for_each_ring(ring, dev_priv, i)
4899 dev_priv->gt.cleanup_ring(ring);
4903 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4904 struct drm_file *file_priv)
4906 struct drm_i915_private *dev_priv = dev->dev_private;
4909 if (drm_core_check_feature(dev, DRIVER_MODESET))
4912 if (i915_reset_in_progress(&dev_priv->gpu_error)) {
4913 DRM_ERROR("Reenabling wedged hardware, good luck\n");
4914 atomic_set(&dev_priv->gpu_error.reset_counter, 0);
4917 mutex_lock(&dev->struct_mutex);
4918 dev_priv->ums.mm_suspended = 0;
4920 ret = i915_gem_init_hw(dev);
4922 mutex_unlock(&dev->struct_mutex);
4926 BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
4928 ret = drm_irq_install(dev, dev->pdev->irq);
4930 goto cleanup_ringbuffer;
4931 mutex_unlock(&dev->struct_mutex);
4936 i915_gem_cleanup_ringbuffer(dev);
4937 dev_priv->ums.mm_suspended = 1;
4938 mutex_unlock(&dev->struct_mutex);
4944 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4945 struct drm_file *file_priv)
4947 if (drm_core_check_feature(dev, DRIVER_MODESET))
4950 mutex_lock(&dev->struct_mutex);
4951 drm_irq_uninstall(dev);
4952 mutex_unlock(&dev->struct_mutex);
4954 return i915_gem_suspend(dev);
4958 i915_gem_lastclose(struct drm_device *dev)
4962 if (drm_core_check_feature(dev, DRIVER_MODESET))
4965 ret = i915_gem_suspend(dev);
4967 DRM_ERROR("failed to idle hardware: %d\n", ret);
4971 init_ring_lists(struct intel_engine_cs *ring)
4973 INIT_LIST_HEAD(&ring->active_list);
4974 INIT_LIST_HEAD(&ring->request_list);
4977 void i915_init_vm(struct drm_i915_private *dev_priv,
4978 struct i915_address_space *vm)
4980 if (!i915_is_ggtt(vm))
4981 drm_mm_init(&vm->mm, vm->start, vm->total);
4982 vm->dev = dev_priv->dev;
4983 INIT_LIST_HEAD(&vm->active_list);
4984 INIT_LIST_HEAD(&vm->inactive_list);
4985 INIT_LIST_HEAD(&vm->global_link);
4986 list_add_tail(&vm->global_link, &dev_priv->vm_list);
4990 i915_gem_load(struct drm_device *dev)
4992 struct drm_i915_private *dev_priv = dev->dev_private;
4996 kmem_cache_create("i915_gem_object",
4997 sizeof(struct drm_i915_gem_object), 0,
5001 INIT_LIST_HEAD(&dev_priv->vm_list);
5002 i915_init_vm(dev_priv, &dev_priv->gtt.base);
5004 INIT_LIST_HEAD(&dev_priv->context_list);
5005 INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
5006 INIT_LIST_HEAD(&dev_priv->mm.bound_list);
5007 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
5008 for (i = 0; i < I915_NUM_RINGS; i++)
5009 init_ring_lists(&dev_priv->ring[i]);
5010 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
5011 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
5012 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
5013 i915_gem_retire_work_handler);
5014 INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
5015 i915_gem_idle_work_handler);
5016 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
5018 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
5019 if (!drm_core_check_feature(dev, DRIVER_MODESET) && IS_GEN3(dev)) {
5020 I915_WRITE(MI_ARB_STATE,
5021 _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
5024 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
5026 /* Old X drivers will take 0-2 for front, back, depth buffers */
5027 if (!drm_core_check_feature(dev, DRIVER_MODESET))
5028 dev_priv->fence_reg_start = 3;
5030 if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
5031 dev_priv->num_fence_regs = 32;
5032 else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
5033 dev_priv->num_fence_regs = 16;
5035 dev_priv->num_fence_regs = 8;
5037 /* Initialize fence registers to zero */
5038 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
5039 i915_gem_restore_fences(dev);
5041 i915_gem_detect_bit_6_swizzle(dev);
5042 init_waitqueue_head(&dev_priv->pending_flip_queue);
5044 dev_priv->mm.interruptible = true;
5046 dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
5047 dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
5048 dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
5049 register_shrinker(&dev_priv->mm.shrinker);
5051 dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
5052 register_oom_notifier(&dev_priv->mm.oom_notifier);
5054 mutex_init(&dev_priv->fb_tracking.lock);
5057 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
5059 struct drm_i915_file_private *file_priv = file->driver_priv;
5061 cancel_delayed_work_sync(&file_priv->mm.idle_work);
5063 /* Clean up our request list when the client is going away, so that
5064 * later retire_requests won't dereference our soon-to-be-gone
5067 spin_lock(&file_priv->mm.lock);
5068 while (!list_empty(&file_priv->mm.request_list)) {
5069 struct drm_i915_gem_request *request;
5071 request = list_first_entry(&file_priv->mm.request_list,
5072 struct drm_i915_gem_request,
5074 list_del(&request->client_list);
5075 request->file_priv = NULL;
5077 spin_unlock(&file_priv->mm.lock);
5081 i915_gem_file_idle_work_handler(struct work_struct *work)
5083 struct drm_i915_file_private *file_priv =
5084 container_of(work, typeof(*file_priv), mm.idle_work.work);
5086 atomic_set(&file_priv->rps_wait_boost, false);
5089 int i915_gem_open(struct drm_device *dev, struct drm_file *file)
5091 struct drm_i915_file_private *file_priv;
5094 DRM_DEBUG_DRIVER("\n");
5096 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
5100 file->driver_priv = file_priv;
5101 file_priv->dev_priv = dev->dev_private;
5102 file_priv->file = file;
5104 spin_lock_init(&file_priv->mm.lock);
5105 INIT_LIST_HEAD(&file_priv->mm.request_list);
5106 INIT_DELAYED_WORK(&file_priv->mm.idle_work,
5107 i915_gem_file_idle_work_handler);
5109 ret = i915_gem_context_open(dev, file);
5117 * i915_gem_track_fb - update frontbuffer tracking
5118 * old: current GEM buffer for the frontbuffer slots
5119 * new: new GEM buffer for the frontbuffer slots
5120 * frontbuffer_bits: bitmask of frontbuffer slots
5122 * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
5123 * from @old and setting them in @new. Both @old and @new can be NULL.
5125 void i915_gem_track_fb(struct drm_i915_gem_object *old,
5126 struct drm_i915_gem_object *new,
5127 unsigned frontbuffer_bits)
5130 WARN_ON(!mutex_is_locked(&old->base.dev->struct_mutex));
5131 WARN_ON(!(old->frontbuffer_bits & frontbuffer_bits));
5132 old->frontbuffer_bits &= ~frontbuffer_bits;
5136 WARN_ON(!mutex_is_locked(&new->base.dev->struct_mutex));
5137 WARN_ON(new->frontbuffer_bits & frontbuffer_bits);
5138 new->frontbuffer_bits |= frontbuffer_bits;
5142 static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
5144 if (!mutex_is_locked(mutex))
5147 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
5148 return mutex->owner == task;
5150 /* Since UP may be pre-empted, we cannot assume that we own the lock */
5155 static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
5157 if (!mutex_trylock(&dev->struct_mutex)) {
5158 if (!mutex_is_locked_by(&dev->struct_mutex, current))
5161 if (to_i915(dev)->mm.shrinker_no_lock_stealing)
5171 static int num_vma_bound(struct drm_i915_gem_object *obj)
5173 struct i915_vma *vma;
5176 list_for_each_entry(vma, &obj->vma_list, vma_link)
5177 if (drm_mm_node_allocated(&vma->node))
5183 static unsigned long
5184 i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
5186 struct drm_i915_private *dev_priv =
5187 container_of(shrinker, struct drm_i915_private, mm.shrinker);
5188 struct drm_device *dev = dev_priv->dev;
5189 struct drm_i915_gem_object *obj;
5190 unsigned long count;
5193 if (!i915_gem_shrinker_lock(dev, &unlock))
5197 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
5198 if (obj->pages_pin_count == 0)
5199 count += obj->base.size >> PAGE_SHIFT;
5201 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
5202 if (!i915_gem_obj_is_pinned(obj) &&
5203 obj->pages_pin_count == num_vma_bound(obj))
5204 count += obj->base.size >> PAGE_SHIFT;
5208 mutex_unlock(&dev->struct_mutex);
5213 /* All the new VM stuff */
5214 unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
5215 struct i915_address_space *vm)
5217 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5218 struct i915_vma *vma;
5220 WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
5222 list_for_each_entry(vma, &o->vma_list, vma_link) {
5224 return vma->node.start;
5227 WARN(1, "%s vma for this object not found.\n",
5228 i915_is_ggtt(vm) ? "global" : "ppgtt");
5232 bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
5233 struct i915_address_space *vm)
5235 struct i915_vma *vma;
5237 list_for_each_entry(vma, &o->vma_list, vma_link)
5238 if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
5244 bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
5246 struct i915_vma *vma;
5248 list_for_each_entry(vma, &o->vma_list, vma_link)
5249 if (drm_mm_node_allocated(&vma->node))
5255 unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
5256 struct i915_address_space *vm)
5258 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5259 struct i915_vma *vma;
5261 WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
5263 BUG_ON(list_empty(&o->vma_list));
5265 list_for_each_entry(vma, &o->vma_list, vma_link)
5267 return vma->node.size;
5272 static unsigned long
5273 i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
5275 struct drm_i915_private *dev_priv =
5276 container_of(shrinker, struct drm_i915_private, mm.shrinker);
5277 struct drm_device *dev = dev_priv->dev;
5278 unsigned long freed;
5281 if (!i915_gem_shrinker_lock(dev, &unlock))
5284 freed = i915_gem_shrink(dev_priv,
5287 I915_SHRINK_UNBOUND |
5288 I915_SHRINK_PURGEABLE);
5289 if (freed < sc->nr_to_scan)
5290 freed += i915_gem_shrink(dev_priv,
5291 sc->nr_to_scan - freed,
5293 I915_SHRINK_UNBOUND);
5295 mutex_unlock(&dev->struct_mutex);
5301 i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
5303 struct drm_i915_private *dev_priv =
5304 container_of(nb, struct drm_i915_private, mm.oom_notifier);
5305 struct drm_device *dev = dev_priv->dev;
5306 struct drm_i915_gem_object *obj;
5307 unsigned long timeout = msecs_to_jiffies(5000) + 1;
5308 unsigned long pinned, bound, unbound, freed_pages;
5309 bool was_interruptible;
5312 while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout) {
5313 schedule_timeout_killable(1);
5314 if (fatal_signal_pending(current))
5318 pr_err("Unable to purge GPU memory due lock contention.\n");
5322 was_interruptible = dev_priv->mm.interruptible;
5323 dev_priv->mm.interruptible = false;
5325 freed_pages = i915_gem_shrink_all(dev_priv);
5327 dev_priv->mm.interruptible = was_interruptible;
5329 /* Because we may be allocating inside our own driver, we cannot
5330 * assert that there are no objects with pinned pages that are not
5331 * being pointed to by hardware.
5333 unbound = bound = pinned = 0;
5334 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
5335 if (!obj->base.filp) /* not backed by a freeable object */
5338 if (obj->pages_pin_count)
5339 pinned += obj->base.size;
5341 unbound += obj->base.size;
5343 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
5344 if (!obj->base.filp)
5347 if (obj->pages_pin_count)
5348 pinned += obj->base.size;
5350 bound += obj->base.size;
5354 mutex_unlock(&dev->struct_mutex);
5356 if (freed_pages || unbound || bound)
5357 pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n",
5358 freed_pages << PAGE_SHIFT, pinned);
5359 if (unbound || bound)
5360 pr_err("%lu and %lu bytes still available in the "
5361 "bound and unbound GPU page lists.\n",
5364 *(unsigned long *)ptr += freed_pages;
5368 struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
5370 struct i915_vma *vma;
5372 vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
5373 if (vma->vm != i915_obj_to_ggtt(obj))