2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
29 #include <drm/drm_vma_manager.h>
30 #include <drm/i915_drm.h>
32 #include "i915_trace.h"
33 #include "intel_drv.h"
34 #include <linux/shmem_fs.h>
35 #include <linux/slab.h>
36 #include <linux/swap.h>
37 #include <linux/pci.h>
38 #include <linux/dma-buf.h>
40 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
41 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
42 static __must_check int
43 i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
44 struct i915_address_space *vm,
46 bool map_and_fenceable,
48 static int i915_gem_phys_pwrite(struct drm_device *dev,
49 struct drm_i915_gem_object *obj,
50 struct drm_i915_gem_pwrite *args,
51 struct drm_file *file);
53 static void i915_gem_write_fence(struct drm_device *dev, int reg,
54 struct drm_i915_gem_object *obj);
55 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
56 struct drm_i915_fence_reg *fence,
59 static int i915_gem_inactive_shrink(struct shrinker *shrinker,
60 struct shrink_control *sc);
61 static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
62 static void i915_gem_shrink_all(struct drm_i915_private *dev_priv);
63 static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
65 static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
68 i915_gem_release_mmap(obj);
70 /* As we do not have an associated fence register, we will force
71 * a tiling change if we ever need to acquire one.
73 obj->fence_dirty = false;
74 obj->fence_reg = I915_FENCE_REG_NONE;
77 /* some bookkeeping */
78 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
81 spin_lock(&dev_priv->mm.object_stat_lock);
82 dev_priv->mm.object_count++;
83 dev_priv->mm.object_memory += size;
84 spin_unlock(&dev_priv->mm.object_stat_lock);
87 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
90 spin_lock(&dev_priv->mm.object_stat_lock);
91 dev_priv->mm.object_count--;
92 dev_priv->mm.object_memory -= size;
93 spin_unlock(&dev_priv->mm.object_stat_lock);
97 i915_gem_wait_for_error(struct i915_gpu_error *error)
101 #define EXIT_COND (!i915_reset_in_progress(error) || \
102 i915_terminally_wedged(error))
107 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
108 * userspace. If it takes that long something really bad is going on and
109 * we should simply try to bail out and fail as gracefully as possible.
111 ret = wait_event_interruptible_timeout(error->reset_queue,
115 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
117 } else if (ret < 0) {
125 int i915_mutex_lock_interruptible(struct drm_device *dev)
127 struct drm_i915_private *dev_priv = dev->dev_private;
130 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
134 ret = mutex_lock_interruptible(&dev->struct_mutex);
138 WARN_ON(i915_verify_lists(dev));
143 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
145 return i915_gem_obj_bound_any(obj) && !obj->active;
149 i915_gem_init_ioctl(struct drm_device *dev, void *data,
150 struct drm_file *file)
152 struct drm_i915_private *dev_priv = dev->dev_private;
153 struct drm_i915_gem_init *args = data;
155 if (drm_core_check_feature(dev, DRIVER_MODESET))
158 if (args->gtt_start >= args->gtt_end ||
159 (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
162 /* GEM with user mode setting was never supported on ilk and later. */
163 if (INTEL_INFO(dev)->gen >= 5)
166 mutex_lock(&dev->struct_mutex);
167 i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
169 dev_priv->gtt.mappable_end = args->gtt_end;
170 mutex_unlock(&dev->struct_mutex);
176 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
177 struct drm_file *file)
179 struct drm_i915_private *dev_priv = dev->dev_private;
180 struct drm_i915_gem_get_aperture *args = data;
181 struct drm_i915_gem_object *obj;
185 mutex_lock(&dev->struct_mutex);
186 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
188 pinned += i915_gem_obj_ggtt_size(obj);
189 mutex_unlock(&dev->struct_mutex);
191 args->aper_size = dev_priv->gtt.base.total;
192 args->aper_available_size = args->aper_size - pinned;
197 void *i915_gem_object_alloc(struct drm_device *dev)
199 struct drm_i915_private *dev_priv = dev->dev_private;
200 return kmem_cache_alloc(dev_priv->slab, GFP_KERNEL | __GFP_ZERO);
203 void i915_gem_object_free(struct drm_i915_gem_object *obj)
205 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
206 kmem_cache_free(dev_priv->slab, obj);
210 i915_gem_create(struct drm_file *file,
211 struct drm_device *dev,
215 struct drm_i915_gem_object *obj;
219 size = roundup(size, PAGE_SIZE);
223 /* Allocate the new object */
224 obj = i915_gem_alloc_object(dev, size);
228 ret = drm_gem_handle_create(file, &obj->base, &handle);
229 /* drop reference from allocate - handle holds it now */
230 drm_gem_object_unreference_unlocked(&obj->base);
239 i915_gem_dumb_create(struct drm_file *file,
240 struct drm_device *dev,
241 struct drm_mode_create_dumb *args)
243 /* have to work out size/pitch and return them */
244 args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
245 args->size = args->pitch * args->height;
246 return i915_gem_create(file, dev,
247 args->size, &args->handle);
251 * Creates a new mm object and returns a handle to it.
254 i915_gem_create_ioctl(struct drm_device *dev, void *data,
255 struct drm_file *file)
257 struct drm_i915_gem_create *args = data;
259 return i915_gem_create(file, dev,
260 args->size, &args->handle);
264 __copy_to_user_swizzled(char __user *cpu_vaddr,
265 const char *gpu_vaddr, int gpu_offset,
268 int ret, cpu_offset = 0;
271 int cacheline_end = ALIGN(gpu_offset + 1, 64);
272 int this_length = min(cacheline_end - gpu_offset, length);
273 int swizzled_gpu_offset = gpu_offset ^ 64;
275 ret = __copy_to_user(cpu_vaddr + cpu_offset,
276 gpu_vaddr + swizzled_gpu_offset,
281 cpu_offset += this_length;
282 gpu_offset += this_length;
283 length -= this_length;
290 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
291 const char __user *cpu_vaddr,
294 int ret, cpu_offset = 0;
297 int cacheline_end = ALIGN(gpu_offset + 1, 64);
298 int this_length = min(cacheline_end - gpu_offset, length);
299 int swizzled_gpu_offset = gpu_offset ^ 64;
301 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
302 cpu_vaddr + cpu_offset,
307 cpu_offset += this_length;
308 gpu_offset += this_length;
309 length -= this_length;
315 /* Per-page copy function for the shmem pread fastpath.
316 * Flushes invalid cachelines before reading the target if
317 * needs_clflush is set. */
319 shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
320 char __user *user_data,
321 bool page_do_bit17_swizzling, bool needs_clflush)
326 if (unlikely(page_do_bit17_swizzling))
329 vaddr = kmap_atomic(page);
331 drm_clflush_virt_range(vaddr + shmem_page_offset,
333 ret = __copy_to_user_inatomic(user_data,
334 vaddr + shmem_page_offset,
336 kunmap_atomic(vaddr);
338 return ret ? -EFAULT : 0;
342 shmem_clflush_swizzled_range(char *addr, unsigned long length,
345 if (unlikely(swizzled)) {
346 unsigned long start = (unsigned long) addr;
347 unsigned long end = (unsigned long) addr + length;
349 /* For swizzling simply ensure that we always flush both
350 * channels. Lame, but simple and it works. Swizzled
351 * pwrite/pread is far from a hotpath - current userspace
352 * doesn't use it at all. */
353 start = round_down(start, 128);
354 end = round_up(end, 128);
356 drm_clflush_virt_range((void *)start, end - start);
358 drm_clflush_virt_range(addr, length);
363 /* Only difference to the fast-path function is that this can handle bit17
364 * and uses non-atomic copy and kmap functions. */
366 shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
367 char __user *user_data,
368 bool page_do_bit17_swizzling, bool needs_clflush)
375 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
377 page_do_bit17_swizzling);
379 if (page_do_bit17_swizzling)
380 ret = __copy_to_user_swizzled(user_data,
381 vaddr, shmem_page_offset,
384 ret = __copy_to_user(user_data,
385 vaddr + shmem_page_offset,
389 return ret ? - EFAULT : 0;
393 i915_gem_shmem_pread(struct drm_device *dev,
394 struct drm_i915_gem_object *obj,
395 struct drm_i915_gem_pread *args,
396 struct drm_file *file)
398 char __user *user_data;
401 int shmem_page_offset, page_length, ret = 0;
402 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
404 int needs_clflush = 0;
405 struct sg_page_iter sg_iter;
407 user_data = to_user_ptr(args->data_ptr);
410 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
412 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
413 /* If we're not in the cpu read domain, set ourself into the gtt
414 * read domain and manually flush cachelines (if required). This
415 * optimizes for the case when the gpu will dirty the data
416 * anyway again before the next pread happens. */
417 if (obj->cache_level == I915_CACHE_NONE)
419 if (i915_gem_obj_bound_any(obj)) {
420 ret = i915_gem_object_set_to_gtt_domain(obj, false);
426 ret = i915_gem_object_get_pages(obj);
430 i915_gem_object_pin_pages(obj);
432 offset = args->offset;
434 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
435 offset >> PAGE_SHIFT) {
436 struct page *page = sg_page_iter_page(&sg_iter);
441 /* Operation in this page
443 * shmem_page_offset = offset within page in shmem file
444 * page_length = bytes to copy for this page
446 shmem_page_offset = offset_in_page(offset);
447 page_length = remain;
448 if ((shmem_page_offset + page_length) > PAGE_SIZE)
449 page_length = PAGE_SIZE - shmem_page_offset;
451 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
452 (page_to_phys(page) & (1 << 17)) != 0;
454 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
455 user_data, page_do_bit17_swizzling,
460 mutex_unlock(&dev->struct_mutex);
462 if (likely(!i915_prefault_disable) && !prefaulted) {
463 ret = fault_in_multipages_writeable(user_data, remain);
464 /* Userspace is tricking us, but we've already clobbered
465 * its pages with the prefault and promised to write the
466 * data up to the first fault. Hence ignore any errors
467 * and just continue. */
472 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
473 user_data, page_do_bit17_swizzling,
476 mutex_lock(&dev->struct_mutex);
479 mark_page_accessed(page);
484 remain -= page_length;
485 user_data += page_length;
486 offset += page_length;
490 i915_gem_object_unpin_pages(obj);
496 * Reads data from the object referenced by handle.
498 * On error, the contents of *data are undefined.
501 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
502 struct drm_file *file)
504 struct drm_i915_gem_pread *args = data;
505 struct drm_i915_gem_object *obj;
511 if (!access_ok(VERIFY_WRITE,
512 to_user_ptr(args->data_ptr),
516 ret = i915_mutex_lock_interruptible(dev);
520 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
521 if (&obj->base == NULL) {
526 /* Bounds check source. */
527 if (args->offset > obj->base.size ||
528 args->size > obj->base.size - args->offset) {
533 /* prime objects have no backing filp to GEM pread/pwrite
536 if (!obj->base.filp) {
541 trace_i915_gem_object_pread(obj, args->offset, args->size);
543 ret = i915_gem_shmem_pread(dev, obj, args, file);
546 drm_gem_object_unreference(&obj->base);
548 mutex_unlock(&dev->struct_mutex);
552 /* This is the fast write path which cannot handle
553 * page faults in the source data
557 fast_user_write(struct io_mapping *mapping,
558 loff_t page_base, int page_offset,
559 char __user *user_data,
562 void __iomem *vaddr_atomic;
564 unsigned long unwritten;
566 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
567 /* We can use the cpu mem copy function because this is X86. */
568 vaddr = (void __force*)vaddr_atomic + page_offset;
569 unwritten = __copy_from_user_inatomic_nocache(vaddr,
571 io_mapping_unmap_atomic(vaddr_atomic);
576 * This is the fast pwrite path, where we copy the data directly from the
577 * user into the GTT, uncached.
580 i915_gem_gtt_pwrite_fast(struct drm_device *dev,
581 struct drm_i915_gem_object *obj,
582 struct drm_i915_gem_pwrite *args,
583 struct drm_file *file)
585 drm_i915_private_t *dev_priv = dev->dev_private;
587 loff_t offset, page_base;
588 char __user *user_data;
589 int page_offset, page_length, ret;
591 ret = i915_gem_obj_ggtt_pin(obj, 0, true, true);
595 ret = i915_gem_object_set_to_gtt_domain(obj, true);
599 ret = i915_gem_object_put_fence(obj);
603 user_data = to_user_ptr(args->data_ptr);
606 offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
609 /* Operation in this page
611 * page_base = page offset within aperture
612 * page_offset = offset within page
613 * page_length = bytes to copy for this page
615 page_base = offset & PAGE_MASK;
616 page_offset = offset_in_page(offset);
617 page_length = remain;
618 if ((page_offset + remain) > PAGE_SIZE)
619 page_length = PAGE_SIZE - page_offset;
621 /* If we get a fault while copying data, then (presumably) our
622 * source page isn't available. Return the error and we'll
623 * retry in the slow path.
625 if (fast_user_write(dev_priv->gtt.mappable, page_base,
626 page_offset, user_data, page_length)) {
631 remain -= page_length;
632 user_data += page_length;
633 offset += page_length;
637 i915_gem_object_unpin(obj);
642 /* Per-page copy function for the shmem pwrite fastpath.
643 * Flushes invalid cachelines before writing to the target if
644 * needs_clflush_before is set and flushes out any written cachelines after
645 * writing if needs_clflush is set. */
647 shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
648 char __user *user_data,
649 bool page_do_bit17_swizzling,
650 bool needs_clflush_before,
651 bool needs_clflush_after)
656 if (unlikely(page_do_bit17_swizzling))
659 vaddr = kmap_atomic(page);
660 if (needs_clflush_before)
661 drm_clflush_virt_range(vaddr + shmem_page_offset,
663 ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset,
666 if (needs_clflush_after)
667 drm_clflush_virt_range(vaddr + shmem_page_offset,
669 kunmap_atomic(vaddr);
671 return ret ? -EFAULT : 0;
674 /* Only difference to the fast-path function is that this can handle bit17
675 * and uses non-atomic copy and kmap functions. */
677 shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
678 char __user *user_data,
679 bool page_do_bit17_swizzling,
680 bool needs_clflush_before,
681 bool needs_clflush_after)
687 if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
688 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
690 page_do_bit17_swizzling);
691 if (page_do_bit17_swizzling)
692 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
696 ret = __copy_from_user(vaddr + shmem_page_offset,
699 if (needs_clflush_after)
700 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
702 page_do_bit17_swizzling);
705 return ret ? -EFAULT : 0;
709 i915_gem_shmem_pwrite(struct drm_device *dev,
710 struct drm_i915_gem_object *obj,
711 struct drm_i915_gem_pwrite *args,
712 struct drm_file *file)
716 char __user *user_data;
717 int shmem_page_offset, page_length, ret = 0;
718 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
719 int hit_slowpath = 0;
720 int needs_clflush_after = 0;
721 int needs_clflush_before = 0;
722 struct sg_page_iter sg_iter;
724 user_data = to_user_ptr(args->data_ptr);
727 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
729 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
730 /* If we're not in the cpu write domain, set ourself into the gtt
731 * write domain and manually flush cachelines (if required). This
732 * optimizes for the case when the gpu will use the data
733 * right away and we therefore have to clflush anyway. */
734 if (obj->cache_level == I915_CACHE_NONE)
735 needs_clflush_after = 1;
736 if (i915_gem_obj_bound_any(obj)) {
737 ret = i915_gem_object_set_to_gtt_domain(obj, true);
742 /* Same trick applies for invalidate partially written cachelines before
744 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)
745 && obj->cache_level == I915_CACHE_NONE)
746 needs_clflush_before = 1;
748 ret = i915_gem_object_get_pages(obj);
752 i915_gem_object_pin_pages(obj);
754 offset = args->offset;
757 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
758 offset >> PAGE_SHIFT) {
759 struct page *page = sg_page_iter_page(&sg_iter);
760 int partial_cacheline_write;
765 /* Operation in this page
767 * shmem_page_offset = offset within page in shmem file
768 * page_length = bytes to copy for this page
770 shmem_page_offset = offset_in_page(offset);
772 page_length = remain;
773 if ((shmem_page_offset + page_length) > PAGE_SIZE)
774 page_length = PAGE_SIZE - shmem_page_offset;
776 /* If we don't overwrite a cacheline completely we need to be
777 * careful to have up-to-date data by first clflushing. Don't
778 * overcomplicate things and flush the entire patch. */
779 partial_cacheline_write = needs_clflush_before &&
780 ((shmem_page_offset | page_length)
781 & (boot_cpu_data.x86_clflush_size - 1));
783 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
784 (page_to_phys(page) & (1 << 17)) != 0;
786 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
787 user_data, page_do_bit17_swizzling,
788 partial_cacheline_write,
789 needs_clflush_after);
794 mutex_unlock(&dev->struct_mutex);
795 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
796 user_data, page_do_bit17_swizzling,
797 partial_cacheline_write,
798 needs_clflush_after);
800 mutex_lock(&dev->struct_mutex);
803 set_page_dirty(page);
804 mark_page_accessed(page);
809 remain -= page_length;
810 user_data += page_length;
811 offset += page_length;
815 i915_gem_object_unpin_pages(obj);
819 * Fixup: Flush cpu caches in case we didn't flush the dirty
820 * cachelines in-line while writing and the object moved
821 * out of the cpu write domain while we've dropped the lock.
823 if (!needs_clflush_after &&
824 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
825 i915_gem_clflush_object(obj);
826 i915_gem_chipset_flush(dev);
830 if (needs_clflush_after)
831 i915_gem_chipset_flush(dev);
837 * Writes data to the object referenced by handle.
839 * On error, the contents of the buffer that were to be modified are undefined.
842 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
843 struct drm_file *file)
845 struct drm_i915_gem_pwrite *args = data;
846 struct drm_i915_gem_object *obj;
852 if (!access_ok(VERIFY_READ,
853 to_user_ptr(args->data_ptr),
857 if (likely(!i915_prefault_disable)) {
858 ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
864 ret = i915_mutex_lock_interruptible(dev);
868 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
869 if (&obj->base == NULL) {
874 /* Bounds check destination. */
875 if (args->offset > obj->base.size ||
876 args->size > obj->base.size - args->offset) {
881 /* prime objects have no backing filp to GEM pread/pwrite
884 if (!obj->base.filp) {
889 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
892 /* We can only do the GTT pwrite on untiled buffers, as otherwise
893 * it would end up going through the fenced access, and we'll get
894 * different detiling behavior between reading and writing.
895 * pread/pwrite currently are reading and writing from the CPU
896 * perspective, requiring manual detiling by the client.
899 ret = i915_gem_phys_pwrite(dev, obj, args, file);
903 if (obj->cache_level == I915_CACHE_NONE &&
904 obj->tiling_mode == I915_TILING_NONE &&
905 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
906 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
907 /* Note that the gtt paths might fail with non-page-backed user
908 * pointers (e.g. gtt mappings when moving data between
909 * textures). Fallback to the shmem path in that case. */
912 if (ret == -EFAULT || ret == -ENOSPC)
913 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
916 drm_gem_object_unreference(&obj->base);
918 mutex_unlock(&dev->struct_mutex);
923 i915_gem_check_wedge(struct i915_gpu_error *error,
926 if (i915_reset_in_progress(error)) {
927 /* Non-interruptible callers can't handle -EAGAIN, hence return
928 * -EIO unconditionally for these. */
932 /* Recovery complete, but the reset failed ... */
933 if (i915_terminally_wedged(error))
943 * Compare seqno against outstanding lazy request. Emit a request if they are
947 i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
951 BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
954 if (seqno == ring->outstanding_lazy_request)
955 ret = i915_add_request(ring, NULL);
961 * __wait_seqno - wait until execution of seqno has finished
962 * @ring: the ring expected to report seqno
964 * @reset_counter: reset sequence associated with the given seqno
965 * @interruptible: do an interruptible wait (normally yes)
966 * @timeout: in - how long to wait (NULL forever); out - how much time remaining
968 * Note: It is of utmost importance that the passed in seqno and reset_counter
969 * values have been read by the caller in an smp safe manner. Where read-side
970 * locks are involved, it is sufficient to read the reset_counter before
971 * unlocking the lock that protects the seqno. For lockless tricks, the
972 * reset_counter _must_ be read before, and an appropriate smp_rmb must be
975 * Returns 0 if the seqno was found within the alloted time. Else returns the
976 * errno with remaining time filled in timeout argument.
978 static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
979 unsigned reset_counter,
980 bool interruptible, struct timespec *timeout)
982 drm_i915_private_t *dev_priv = ring->dev->dev_private;
983 struct timespec before, now, wait_time={1,0};
984 unsigned long timeout_jiffies;
986 bool wait_forever = true;
989 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
992 trace_i915_gem_request_wait_begin(ring, seqno);
994 if (timeout != NULL) {
995 wait_time = *timeout;
996 wait_forever = false;
999 timeout_jiffies = timespec_to_jiffies_timeout(&wait_time);
1001 if (WARN_ON(!ring->irq_get(ring)))
1004 /* Record current time in case interrupted by signal, or wedged * */
1005 getrawmonotonic(&before);
1008 (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
1009 i915_reset_in_progress(&dev_priv->gpu_error) || \
1010 reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
1013 end = wait_event_interruptible_timeout(ring->irq_queue,
1017 end = wait_event_timeout(ring->irq_queue, EXIT_COND,
1020 /* We need to check whether any gpu reset happened in between
1021 * the caller grabbing the seqno and now ... */
1022 if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
1025 /* ... but upgrade the -EGAIN to an -EIO if the gpu is truely
1027 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1030 } while (end == 0 && wait_forever);
1032 getrawmonotonic(&now);
1034 ring->irq_put(ring);
1035 trace_i915_gem_request_wait_end(ring, seqno);
1039 struct timespec sleep_time = timespec_sub(now, before);
1040 *timeout = timespec_sub(*timeout, sleep_time);
1041 if (!timespec_valid(timeout)) /* i.e. negative time remains */
1042 set_normalized_timespec(timeout, 0, 0);
1047 case -EAGAIN: /* Wedged */
1048 case -ERESTARTSYS: /* Signal */
1050 case 0: /* Timeout */
1052 default: /* Completed */
1053 WARN_ON(end < 0); /* We're not aware of other errors */
1059 * Waits for a sequence number to be signaled, and cleans up the
1060 * request and object lists appropriately for that event.
1063 i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
1065 struct drm_device *dev = ring->dev;
1066 struct drm_i915_private *dev_priv = dev->dev_private;
1067 bool interruptible = dev_priv->mm.interruptible;
1070 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1073 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1077 ret = i915_gem_check_olr(ring, seqno);
1081 return __wait_seqno(ring, seqno,
1082 atomic_read(&dev_priv->gpu_error.reset_counter),
1083 interruptible, NULL);
1087 i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
1088 struct intel_ring_buffer *ring)
1090 i915_gem_retire_requests_ring(ring);
1092 /* Manually manage the write flush as we may have not yet
1093 * retired the buffer.
1095 * Note that the last_write_seqno is always the earlier of
1096 * the two (read/write) seqno, so if we haved successfully waited,
1097 * we know we have passed the last write.
1099 obj->last_write_seqno = 0;
1100 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1106 * Ensures that all rendering to the object has completed and the object is
1107 * safe to unbind from the GTT or access from the CPU.
1109 static __must_check int
1110 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1113 struct intel_ring_buffer *ring = obj->ring;
1117 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1121 ret = i915_wait_seqno(ring, seqno);
1125 return i915_gem_object_wait_rendering__tail(obj, ring);
1128 /* A nonblocking variant of the above wait. This is a highly dangerous routine
1129 * as the object state may change during this call.
1131 static __must_check int
1132 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1135 struct drm_device *dev = obj->base.dev;
1136 struct drm_i915_private *dev_priv = dev->dev_private;
1137 struct intel_ring_buffer *ring = obj->ring;
1138 unsigned reset_counter;
1142 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1143 BUG_ON(!dev_priv->mm.interruptible);
1145 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1149 ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
1153 ret = i915_gem_check_olr(ring, seqno);
1157 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
1158 mutex_unlock(&dev->struct_mutex);
1159 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
1160 mutex_lock(&dev->struct_mutex);
1164 return i915_gem_object_wait_rendering__tail(obj, ring);
1168 * Called when user space prepares to use an object with the CPU, either
1169 * through the mmap ioctl's mapping or a GTT mapping.
1172 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1173 struct drm_file *file)
1175 struct drm_i915_gem_set_domain *args = data;
1176 struct drm_i915_gem_object *obj;
1177 uint32_t read_domains = args->read_domains;
1178 uint32_t write_domain = args->write_domain;
1181 /* Only handle setting domains to types used by the CPU. */
1182 if (write_domain & I915_GEM_GPU_DOMAINS)
1185 if (read_domains & I915_GEM_GPU_DOMAINS)
1188 /* Having something in the write domain implies it's in the read
1189 * domain, and only that read domain. Enforce that in the request.
1191 if (write_domain != 0 && read_domains != write_domain)
1194 ret = i915_mutex_lock_interruptible(dev);
1198 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1199 if (&obj->base == NULL) {
1204 /* Try to flush the object off the GPU without holding the lock.
1205 * We will repeat the flush holding the lock in the normal manner
1206 * to catch cases where we are gazumped.
1208 ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain);
1212 if (read_domains & I915_GEM_DOMAIN_GTT) {
1213 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1215 /* Silently promote "you're not bound, there was nothing to do"
1216 * to success, since the client was just asking us to
1217 * make sure everything was done.
1222 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1226 drm_gem_object_unreference(&obj->base);
1228 mutex_unlock(&dev->struct_mutex);
1233 * Called when user space has done writes to this buffer
1236 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1237 struct drm_file *file)
1239 struct drm_i915_gem_sw_finish *args = data;
1240 struct drm_i915_gem_object *obj;
1243 ret = i915_mutex_lock_interruptible(dev);
1247 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1248 if (&obj->base == NULL) {
1253 /* Pinned buffers may be scanout, so flush the cache */
1255 i915_gem_object_flush_cpu_write_domain(obj);
1257 drm_gem_object_unreference(&obj->base);
1259 mutex_unlock(&dev->struct_mutex);
1264 * Maps the contents of an object, returning the address it is mapped
1267 * While the mapping holds a reference on the contents of the object, it doesn't
1268 * imply a ref on the object itself.
1271 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1272 struct drm_file *file)
1274 struct drm_i915_gem_mmap *args = data;
1275 struct drm_gem_object *obj;
1278 obj = drm_gem_object_lookup(dev, file, args->handle);
1282 /* prime objects have no backing filp to GEM mmap
1286 drm_gem_object_unreference_unlocked(obj);
1290 addr = vm_mmap(obj->filp, 0, args->size,
1291 PROT_READ | PROT_WRITE, MAP_SHARED,
1293 drm_gem_object_unreference_unlocked(obj);
1294 if (IS_ERR((void *)addr))
1297 args->addr_ptr = (uint64_t) addr;
1303 * i915_gem_fault - fault a page into the GTT
1304 * vma: VMA in question
1307 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1308 * from userspace. The fault handler takes care of binding the object to
1309 * the GTT (if needed), allocating and programming a fence register (again,
1310 * only if needed based on whether the old reg is still valid or the object
1311 * is tiled) and inserting a new PTE into the faulting process.
1313 * Note that the faulting process may involve evicting existing objects
1314 * from the GTT and/or fence registers to make room. So performance may
1315 * suffer if the GTT working set is large or there are few fence registers
1318 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1320 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1321 struct drm_device *dev = obj->base.dev;
1322 drm_i915_private_t *dev_priv = dev->dev_private;
1323 pgoff_t page_offset;
1326 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1328 /* We don't use vmf->pgoff since that has the fake offset */
1329 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1332 ret = i915_mutex_lock_interruptible(dev);
1336 trace_i915_gem_object_fault(obj, page_offset, true, write);
1338 /* Access to snoopable pages through the GTT is incoherent. */
1339 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1344 /* Now bind it into the GTT if needed */
1345 ret = i915_gem_obj_ggtt_pin(obj, 0, true, false);
1349 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1353 ret = i915_gem_object_get_fence(obj);
1357 obj->fault_mappable = true;
1359 pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
1363 /* Finally, remap it using the new GTT offset */
1364 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1366 i915_gem_object_unpin(obj);
1368 mutex_unlock(&dev->struct_mutex);
1372 /* If this -EIO is due to a gpu hang, give the reset code a
1373 * chance to clean up the mess. Otherwise return the proper
1375 if (i915_terminally_wedged(&dev_priv->gpu_error))
1376 return VM_FAULT_SIGBUS;
1378 /* Give the error handler a chance to run and move the
1379 * objects off the GPU active list. Next time we service the
1380 * fault, we should be able to transition the page into the
1381 * GTT without touching the GPU (and so avoid further
1382 * EIO/EGAIN). If the GPU is wedged, then there is no issue
1383 * with coherency, just lost writes.
1391 * EBUSY is ok: this just means that another thread
1392 * already did the job.
1394 return VM_FAULT_NOPAGE;
1396 return VM_FAULT_OOM;
1398 return VM_FAULT_SIGBUS;
1400 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
1401 return VM_FAULT_SIGBUS;
1406 * i915_gem_release_mmap - remove physical page mappings
1407 * @obj: obj in question
1409 * Preserve the reservation of the mmapping with the DRM core code, but
1410 * relinquish ownership of the pages back to the system.
1412 * It is vital that we remove the page mapping if we have mapped a tiled
1413 * object through the GTT and then lose the fence register due to
1414 * resource pressure. Similarly if the object has been moved out of the
1415 * aperture, than pages mapped into userspace must be revoked. Removing the
1416 * mapping will then trigger a page fault on the next user access, allowing
1417 * fixup by i915_gem_fault().
1420 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1422 if (!obj->fault_mappable)
1425 drm_vma_node_unmap(&obj->base.vma_node, obj->base.dev->dev_mapping);
1426 obj->fault_mappable = false;
1430 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1434 if (INTEL_INFO(dev)->gen >= 4 ||
1435 tiling_mode == I915_TILING_NONE)
1438 /* Previous chips need a power-of-two fence region when tiling */
1439 if (INTEL_INFO(dev)->gen == 3)
1440 gtt_size = 1024*1024;
1442 gtt_size = 512*1024;
1444 while (gtt_size < size)
1451 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1452 * @obj: object to check
1454 * Return the required GTT alignment for an object, taking into account
1455 * potential fence register mapping.
1458 i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
1459 int tiling_mode, bool fenced)
1462 * Minimum alignment is 4k (GTT page size), but might be greater
1463 * if a fence register is needed for the object.
1465 if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
1466 tiling_mode == I915_TILING_NONE)
1470 * Previous chips need to be aligned to the size of the smallest
1471 * fence register that can contain the object.
1473 return i915_gem_get_gtt_size(dev, size, tiling_mode);
1476 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1478 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1481 if (drm_vma_node_has_offset(&obj->base.vma_node))
1484 dev_priv->mm.shrinker_no_lock_stealing = true;
1486 ret = drm_gem_create_mmap_offset(&obj->base);
1490 /* Badly fragmented mmap space? The only way we can recover
1491 * space is by destroying unwanted objects. We can't randomly release
1492 * mmap_offsets as userspace expects them to be persistent for the
1493 * lifetime of the objects. The closest we can is to release the
1494 * offsets on purgeable objects by truncating it and marking it purged,
1495 * which prevents userspace from ever using that object again.
1497 i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
1498 ret = drm_gem_create_mmap_offset(&obj->base);
1502 i915_gem_shrink_all(dev_priv);
1503 ret = drm_gem_create_mmap_offset(&obj->base);
1505 dev_priv->mm.shrinker_no_lock_stealing = false;
1510 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1512 drm_gem_free_mmap_offset(&obj->base);
1516 i915_gem_mmap_gtt(struct drm_file *file,
1517 struct drm_device *dev,
1521 struct drm_i915_private *dev_priv = dev->dev_private;
1522 struct drm_i915_gem_object *obj;
1525 ret = i915_mutex_lock_interruptible(dev);
1529 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
1530 if (&obj->base == NULL) {
1535 if (obj->base.size > dev_priv->gtt.mappable_end) {
1540 if (obj->madv != I915_MADV_WILLNEED) {
1541 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1546 ret = i915_gem_object_create_mmap_offset(obj);
1550 *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
1553 drm_gem_object_unreference(&obj->base);
1555 mutex_unlock(&dev->struct_mutex);
1560 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1562 * @data: GTT mapping ioctl data
1563 * @file: GEM object info
1565 * Simply returns the fake offset to userspace so it can mmap it.
1566 * The mmap call will end up in drm_gem_mmap(), which will set things
1567 * up so we can get faults in the handler above.
1569 * The fault handler will take care of binding the object into the GTT
1570 * (since it may have been evicted to make room for something), allocating
1571 * a fence register, and mapping the appropriate aperture address into
1575 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1576 struct drm_file *file)
1578 struct drm_i915_gem_mmap_gtt *args = data;
1580 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1583 /* Immediately discard the backing storage */
1585 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1587 struct inode *inode;
1589 i915_gem_object_free_mmap_offset(obj);
1591 if (obj->base.filp == NULL)
1594 /* Our goal here is to return as much of the memory as
1595 * is possible back to the system as we are called from OOM.
1596 * To do this we must instruct the shmfs to drop all of its
1597 * backing pages, *now*.
1599 inode = file_inode(obj->base.filp);
1600 shmem_truncate_range(inode, 0, (loff_t)-1);
1602 obj->madv = __I915_MADV_PURGED;
1606 i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1608 return obj->madv == I915_MADV_DONTNEED;
1612 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1614 struct sg_page_iter sg_iter;
1617 BUG_ON(obj->madv == __I915_MADV_PURGED);
1619 ret = i915_gem_object_set_to_cpu_domain(obj, true);
1621 /* In the event of a disaster, abandon all caches and
1622 * hope for the best.
1624 WARN_ON(ret != -EIO);
1625 i915_gem_clflush_object(obj);
1626 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1629 if (i915_gem_object_needs_bit17_swizzle(obj))
1630 i915_gem_object_save_bit_17_swizzle(obj);
1632 if (obj->madv == I915_MADV_DONTNEED)
1635 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
1636 struct page *page = sg_page_iter_page(&sg_iter);
1639 set_page_dirty(page);
1641 if (obj->madv == I915_MADV_WILLNEED)
1642 mark_page_accessed(page);
1644 page_cache_release(page);
1648 sg_free_table(obj->pages);
1653 i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1655 const struct drm_i915_gem_object_ops *ops = obj->ops;
1657 if (obj->pages == NULL)
1660 if (obj->pages_pin_count)
1663 BUG_ON(i915_gem_obj_bound_any(obj));
1665 /* ->put_pages might need to allocate memory for the bit17 swizzle
1666 * array, hence protect them from being reaped by removing them from gtt
1668 list_del(&obj->global_list);
1670 ops->put_pages(obj);
1673 if (i915_gem_object_is_purgeable(obj))
1674 i915_gem_object_truncate(obj);
1680 __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
1681 bool purgeable_only)
1683 struct drm_i915_gem_object *obj, *next;
1686 list_for_each_entry_safe(obj, next,
1687 &dev_priv->mm.unbound_list,
1689 if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
1690 i915_gem_object_put_pages(obj) == 0) {
1691 count += obj->base.size >> PAGE_SHIFT;
1692 if (count >= target)
1697 list_for_each_entry_safe(obj, next, &dev_priv->mm.bound_list,
1699 struct i915_vma *vma, *v;
1701 if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
1704 list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
1705 if (i915_vma_unbind(vma))
1708 if (!i915_gem_object_put_pages(obj)) {
1709 count += obj->base.size >> PAGE_SHIFT;
1710 if (count >= target)
1719 i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1721 return __i915_gem_shrink(dev_priv, target, true);
1725 i915_gem_shrink_all(struct drm_i915_private *dev_priv)
1727 struct drm_i915_gem_object *obj, *next;
1729 i915_gem_evict_everything(dev_priv->dev);
1731 list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
1733 i915_gem_object_put_pages(obj);
1737 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
1739 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1741 struct address_space *mapping;
1742 struct sg_table *st;
1743 struct scatterlist *sg;
1744 struct sg_page_iter sg_iter;
1746 unsigned long last_pfn = 0; /* suppress gcc warning */
1749 /* Assert that the object is not currently in any GPU domain. As it
1750 * wasn't in the GTT, there shouldn't be any way it could have been in
1753 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
1754 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
1756 st = kmalloc(sizeof(*st), GFP_KERNEL);
1760 page_count = obj->base.size / PAGE_SIZE;
1761 if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
1767 /* Get the list of pages out of our struct file. They'll be pinned
1768 * at this point until we release them.
1770 * Fail silently without starting the shrinker
1772 mapping = file_inode(obj->base.filp)->i_mapping;
1773 gfp = mapping_gfp_mask(mapping);
1774 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
1775 gfp &= ~(__GFP_IO | __GFP_WAIT);
1778 for (i = 0; i < page_count; i++) {
1779 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1781 i915_gem_purge(dev_priv, page_count);
1782 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1785 /* We've tried hard to allocate the memory by reaping
1786 * our own buffer, now let the real VM do its job and
1787 * go down in flames if truly OOM.
1789 gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD);
1790 gfp |= __GFP_IO | __GFP_WAIT;
1792 i915_gem_shrink_all(dev_priv);
1793 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1797 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
1798 gfp &= ~(__GFP_IO | __GFP_WAIT);
1800 #ifdef CONFIG_SWIOTLB
1801 if (swiotlb_nr_tbl()) {
1803 sg_set_page(sg, page, PAGE_SIZE, 0);
1808 if (!i || page_to_pfn(page) != last_pfn + 1) {
1812 sg_set_page(sg, page, PAGE_SIZE, 0);
1814 sg->length += PAGE_SIZE;
1816 last_pfn = page_to_pfn(page);
1818 #ifdef CONFIG_SWIOTLB
1819 if (!swiotlb_nr_tbl())
1824 if (i915_gem_object_needs_bit17_swizzle(obj))
1825 i915_gem_object_do_bit_17_swizzle(obj);
1831 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
1832 page_cache_release(sg_page_iter_page(&sg_iter));
1835 return PTR_ERR(page);
1838 /* Ensure that the associated pages are gathered from the backing storage
1839 * and pinned into our object. i915_gem_object_get_pages() may be called
1840 * multiple times before they are released by a single call to
1841 * i915_gem_object_put_pages() - once the pages are no longer referenced
1842 * either as a result of memory pressure (reaping pages under the shrinker)
1843 * or as the object is itself released.
1846 i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
1848 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1849 const struct drm_i915_gem_object_ops *ops = obj->ops;
1855 if (obj->madv != I915_MADV_WILLNEED) {
1856 DRM_ERROR("Attempting to obtain a purgeable object\n");
1860 BUG_ON(obj->pages_pin_count);
1862 ret = ops->get_pages(obj);
1866 list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
1871 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1872 struct intel_ring_buffer *ring)
1874 struct drm_device *dev = obj->base.dev;
1875 struct drm_i915_private *dev_priv = dev->dev_private;
1876 u32 seqno = intel_ring_get_seqno(ring);
1878 BUG_ON(ring == NULL);
1879 if (obj->ring != ring && obj->last_write_seqno) {
1880 /* Keep the seqno relative to the current ring */
1881 obj->last_write_seqno = seqno;
1885 /* Add a reference if we're newly entering the active list. */
1887 drm_gem_object_reference(&obj->base);
1891 list_move_tail(&obj->ring_list, &ring->active_list);
1893 obj->last_read_seqno = seqno;
1895 if (obj->fenced_gpu_access) {
1896 obj->last_fenced_seqno = seqno;
1898 /* Bump MRU to take account of the delayed flush */
1899 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1900 struct drm_i915_fence_reg *reg;
1902 reg = &dev_priv->fence_regs[obj->fence_reg];
1903 list_move_tail(®->lru_list,
1904 &dev_priv->mm.fence_list);
1910 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1912 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1913 struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
1914 struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
1916 BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
1917 BUG_ON(!obj->active);
1919 list_move_tail(&vma->mm_list, &ggtt_vm->inactive_list);
1921 list_del_init(&obj->ring_list);
1924 obj->last_read_seqno = 0;
1925 obj->last_write_seqno = 0;
1926 obj->base.write_domain = 0;
1928 obj->last_fenced_seqno = 0;
1929 obj->fenced_gpu_access = false;
1932 drm_gem_object_unreference(&obj->base);
1934 WARN_ON(i915_verify_lists(dev));
1938 i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
1940 struct drm_i915_private *dev_priv = dev->dev_private;
1941 struct intel_ring_buffer *ring;
1944 /* Carefully retire all requests without writing to the rings */
1945 for_each_ring(ring, dev_priv, i) {
1946 ret = intel_ring_idle(ring);
1950 i915_gem_retire_requests(dev);
1952 /* Finally reset hw state */
1953 for_each_ring(ring, dev_priv, i) {
1954 intel_ring_init_seqno(ring, seqno);
1956 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
1957 ring->sync_seqno[j] = 0;
1963 int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
1965 struct drm_i915_private *dev_priv = dev->dev_private;
1971 /* HWS page needs to be set less than what we
1972 * will inject to ring
1974 ret = i915_gem_init_seqno(dev, seqno - 1);
1978 /* Carefully set the last_seqno value so that wrap
1979 * detection still works
1981 dev_priv->next_seqno = seqno;
1982 dev_priv->last_seqno = seqno - 1;
1983 if (dev_priv->last_seqno == 0)
1984 dev_priv->last_seqno--;
1990 i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
1992 struct drm_i915_private *dev_priv = dev->dev_private;
1994 /* reserve 0 for non-seqno */
1995 if (dev_priv->next_seqno == 0) {
1996 int ret = i915_gem_init_seqno(dev, 0);
2000 dev_priv->next_seqno = 1;
2003 *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
2007 int __i915_add_request(struct intel_ring_buffer *ring,
2008 struct drm_file *file,
2009 struct drm_i915_gem_object *obj,
2012 drm_i915_private_t *dev_priv = ring->dev->dev_private;
2013 struct drm_i915_gem_request *request;
2014 u32 request_ring_position, request_start;
2018 request_start = intel_ring_get_tail(ring);
2020 * Emit any outstanding flushes - execbuf can fail to emit the flush
2021 * after having emitted the batchbuffer command. Hence we need to fix
2022 * things up similar to emitting the lazy request. The difference here
2023 * is that the flush _must_ happen before the next request, no matter
2026 ret = intel_ring_flush_all_caches(ring);
2030 request = kmalloc(sizeof(*request), GFP_KERNEL);
2031 if (request == NULL)
2035 /* Record the position of the start of the request so that
2036 * should we detect the updated seqno part-way through the
2037 * GPU processing the request, we never over-estimate the
2038 * position of the head.
2040 request_ring_position = intel_ring_get_tail(ring);
2042 ret = ring->add_request(ring);
2048 request->seqno = intel_ring_get_seqno(ring);
2049 request->ring = ring;
2050 request->head = request_start;
2051 request->tail = request_ring_position;
2052 request->ctx = ring->last_context;
2053 request->batch_obj = obj;
2055 /* Whilst this request exists, batch_obj will be on the
2056 * active_list, and so will hold the active reference. Only when this
2057 * request is retired will the the batch_obj be moved onto the
2058 * inactive_list and lose its active reference. Hence we do not need
2059 * to explicitly hold another reference here.
2063 i915_gem_context_reference(request->ctx);
2065 request->emitted_jiffies = jiffies;
2066 was_empty = list_empty(&ring->request_list);
2067 list_add_tail(&request->list, &ring->request_list);
2068 request->file_priv = NULL;
2071 struct drm_i915_file_private *file_priv = file->driver_priv;
2073 spin_lock(&file_priv->mm.lock);
2074 request->file_priv = file_priv;
2075 list_add_tail(&request->client_list,
2076 &file_priv->mm.request_list);
2077 spin_unlock(&file_priv->mm.lock);
2080 trace_i915_gem_request_add(ring, request->seqno);
2081 ring->outstanding_lazy_request = 0;
2083 if (!dev_priv->ums.mm_suspended) {
2084 i915_queue_hangcheck(ring->dev);
2087 queue_delayed_work(dev_priv->wq,
2088 &dev_priv->mm.retire_work,
2089 round_jiffies_up_relative(HZ));
2090 intel_mark_busy(dev_priv->dev);
2095 *out_seqno = request->seqno;
2100 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
2102 struct drm_i915_file_private *file_priv = request->file_priv;
2107 spin_lock(&file_priv->mm.lock);
2108 if (request->file_priv) {
2109 list_del(&request->client_list);
2110 request->file_priv = NULL;
2112 spin_unlock(&file_priv->mm.lock);
2115 static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj,
2116 struct i915_address_space *vm)
2118 if (acthd >= i915_gem_obj_offset(obj, vm) &&
2119 acthd < i915_gem_obj_offset(obj, vm) + obj->base.size)
2125 static bool i915_head_inside_request(const u32 acthd_unmasked,
2126 const u32 request_start,
2127 const u32 request_end)
2129 const u32 acthd = acthd_unmasked & HEAD_ADDR;
2131 if (request_start < request_end) {
2132 if (acthd >= request_start && acthd < request_end)
2134 } else if (request_start > request_end) {
2135 if (acthd >= request_start || acthd < request_end)
2142 static struct i915_address_space *
2143 request_to_vm(struct drm_i915_gem_request *request)
2145 struct drm_i915_private *dev_priv = request->ring->dev->dev_private;
2146 struct i915_address_space *vm;
2148 vm = &dev_priv->gtt.base;
2153 static bool i915_request_guilty(struct drm_i915_gem_request *request,
2154 const u32 acthd, bool *inside)
2156 /* There is a possibility that unmasked head address
2157 * pointing inside the ring, matches the batch_obj address range.
2158 * However this is extremely unlikely.
2160 if (request->batch_obj) {
2161 if (i915_head_inside_object(acthd, request->batch_obj,
2162 request_to_vm(request))) {
2168 if (i915_head_inside_request(acthd, request->head, request->tail)) {
2176 static void i915_set_reset_status(struct intel_ring_buffer *ring,
2177 struct drm_i915_gem_request *request,
2180 struct i915_ctx_hang_stats *hs = NULL;
2181 bool inside, guilty;
2182 unsigned long offset = 0;
2184 /* Innocent until proven guilty */
2187 if (request->batch_obj)
2188 offset = i915_gem_obj_offset(request->batch_obj,
2189 request_to_vm(request));
2191 if (ring->hangcheck.action != wait &&
2192 i915_request_guilty(request, acthd, &inside)) {
2193 DRM_ERROR("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
2195 inside ? "inside" : "flushing",
2197 request->ctx ? request->ctx->id : 0,
2203 /* If contexts are disabled or this is the default context, use
2204 * file_priv->reset_state
2206 if (request->ctx && request->ctx->id != DEFAULT_CONTEXT_ID)
2207 hs = &request->ctx->hang_stats;
2208 else if (request->file_priv)
2209 hs = &request->file_priv->hang_stats;
2215 hs->batch_pending++;
2219 static void i915_gem_free_request(struct drm_i915_gem_request *request)
2221 list_del(&request->list);
2222 i915_gem_request_remove_from_client(request);
2225 i915_gem_context_unreference(request->ctx);
2230 static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
2231 struct intel_ring_buffer *ring)
2233 u32 completed_seqno;
2236 acthd = intel_ring_get_active_head(ring);
2237 completed_seqno = ring->get_seqno(ring, false);
2239 while (!list_empty(&ring->request_list)) {
2240 struct drm_i915_gem_request *request;
2242 request = list_first_entry(&ring->request_list,
2243 struct drm_i915_gem_request,
2246 if (request->seqno > completed_seqno)
2247 i915_set_reset_status(ring, request, acthd);
2249 i915_gem_free_request(request);
2252 while (!list_empty(&ring->active_list)) {
2253 struct drm_i915_gem_object *obj;
2255 obj = list_first_entry(&ring->active_list,
2256 struct drm_i915_gem_object,
2259 i915_gem_object_move_to_inactive(obj);
2263 void i915_gem_restore_fences(struct drm_device *dev)
2265 struct drm_i915_private *dev_priv = dev->dev_private;
2268 for (i = 0; i < dev_priv->num_fence_regs; i++) {
2269 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
2272 * Commit delayed tiling changes if we have an object still
2273 * attached to the fence, otherwise just clear the fence.
2276 i915_gem_object_update_fence(reg->obj, reg,
2277 reg->obj->tiling_mode);
2279 i915_gem_write_fence(dev, i, NULL);
2284 void i915_gem_reset(struct drm_device *dev)
2286 struct drm_i915_private *dev_priv = dev->dev_private;
2287 struct intel_ring_buffer *ring;
2290 for_each_ring(ring, dev_priv, i)
2291 i915_gem_reset_ring_lists(dev_priv, ring);
2293 i915_gem_restore_fences(dev);
2297 * This function clears the request list as sequence numbers are passed.
2300 i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
2304 if (list_empty(&ring->request_list))
2307 WARN_ON(i915_verify_lists(ring->dev));
2309 seqno = ring->get_seqno(ring, true);
2311 while (!list_empty(&ring->request_list)) {
2312 struct drm_i915_gem_request *request;
2314 request = list_first_entry(&ring->request_list,
2315 struct drm_i915_gem_request,
2318 if (!i915_seqno_passed(seqno, request->seqno))
2321 trace_i915_gem_request_retire(ring, request->seqno);
2322 /* We know the GPU must have read the request to have
2323 * sent us the seqno + interrupt, so use the position
2324 * of tail of the request to update the last known position
2327 ring->last_retired_head = request->tail;
2329 i915_gem_free_request(request);
2332 /* Move any buffers on the active list that are no longer referenced
2333 * by the ringbuffer to the flushing/inactive lists as appropriate.
2335 while (!list_empty(&ring->active_list)) {
2336 struct drm_i915_gem_object *obj;
2338 obj = list_first_entry(&ring->active_list,
2339 struct drm_i915_gem_object,
2342 if (!i915_seqno_passed(seqno, obj->last_read_seqno))
2345 i915_gem_object_move_to_inactive(obj);
2348 if (unlikely(ring->trace_irq_seqno &&
2349 i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
2350 ring->irq_put(ring);
2351 ring->trace_irq_seqno = 0;
2354 WARN_ON(i915_verify_lists(ring->dev));
2358 i915_gem_retire_requests(struct drm_device *dev)
2360 drm_i915_private_t *dev_priv = dev->dev_private;
2361 struct intel_ring_buffer *ring;
2364 for_each_ring(ring, dev_priv, i)
2365 i915_gem_retire_requests_ring(ring);
2369 i915_gem_retire_work_handler(struct work_struct *work)
2371 drm_i915_private_t *dev_priv;
2372 struct drm_device *dev;
2373 struct intel_ring_buffer *ring;
2377 dev_priv = container_of(work, drm_i915_private_t,
2378 mm.retire_work.work);
2379 dev = dev_priv->dev;
2381 /* Come back later if the device is busy... */
2382 if (!mutex_trylock(&dev->struct_mutex)) {
2383 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2384 round_jiffies_up_relative(HZ));
2388 i915_gem_retire_requests(dev);
2390 /* Send a periodic flush down the ring so we don't hold onto GEM
2391 * objects indefinitely.
2394 for_each_ring(ring, dev_priv, i) {
2395 if (ring->gpu_caches_dirty)
2396 i915_add_request(ring, NULL);
2398 idle &= list_empty(&ring->request_list);
2401 if (!dev_priv->ums.mm_suspended && !idle)
2402 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2403 round_jiffies_up_relative(HZ));
2405 intel_mark_idle(dev);
2407 mutex_unlock(&dev->struct_mutex);
2411 * Ensures that an object will eventually get non-busy by flushing any required
2412 * write domains, emitting any outstanding lazy request and retiring and
2413 * completed requests.
2416 i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
2421 ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
2425 i915_gem_retire_requests_ring(obj->ring);
2432 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2433 * @DRM_IOCTL_ARGS: standard ioctl arguments
2435 * Returns 0 if successful, else an error is returned with the remaining time in
2436 * the timeout parameter.
2437 * -ETIME: object is still busy after timeout
2438 * -ERESTARTSYS: signal interrupted the wait
2439 * -ENONENT: object doesn't exist
2440 * Also possible, but rare:
2441 * -EAGAIN: GPU wedged
2443 * -ENODEV: Internal IRQ fail
2444 * -E?: The add request failed
2446 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2447 * non-zero timeout parameter the wait ioctl will wait for the given number of
2448 * nanoseconds on an object becoming unbusy. Since the wait itself does so
2449 * without holding struct_mutex the object may become re-busied before this
2450 * function completes. A similar but shorter * race condition exists in the busy
2454 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2456 drm_i915_private_t *dev_priv = dev->dev_private;
2457 struct drm_i915_gem_wait *args = data;
2458 struct drm_i915_gem_object *obj;
2459 struct intel_ring_buffer *ring = NULL;
2460 struct timespec timeout_stack, *timeout = NULL;
2461 unsigned reset_counter;
2465 if (args->timeout_ns >= 0) {
2466 timeout_stack = ns_to_timespec(args->timeout_ns);
2467 timeout = &timeout_stack;
2470 ret = i915_mutex_lock_interruptible(dev);
2474 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
2475 if (&obj->base == NULL) {
2476 mutex_unlock(&dev->struct_mutex);
2480 /* Need to make sure the object gets inactive eventually. */
2481 ret = i915_gem_object_flush_active(obj);
2486 seqno = obj->last_read_seqno;
2493 /* Do this after OLR check to make sure we make forward progress polling
2494 * on this IOCTL with a 0 timeout (like busy ioctl)
2496 if (!args->timeout_ns) {
2501 drm_gem_object_unreference(&obj->base);
2502 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
2503 mutex_unlock(&dev->struct_mutex);
2505 ret = __wait_seqno(ring, seqno, reset_counter, true, timeout);
2507 args->timeout_ns = timespec_to_ns(timeout);
2511 drm_gem_object_unreference(&obj->base);
2512 mutex_unlock(&dev->struct_mutex);
2517 * i915_gem_object_sync - sync an object to a ring.
2519 * @obj: object which may be in use on another ring.
2520 * @to: ring we wish to use the object on. May be NULL.
2522 * This code is meant to abstract object synchronization with the GPU.
2523 * Calling with NULL implies synchronizing the object with the CPU
2524 * rather than a particular GPU ring.
2526 * Returns 0 if successful, else propagates up the lower layer error.
2529 i915_gem_object_sync(struct drm_i915_gem_object *obj,
2530 struct intel_ring_buffer *to)
2532 struct intel_ring_buffer *from = obj->ring;
2536 if (from == NULL || to == from)
2539 if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
2540 return i915_gem_object_wait_rendering(obj, false);
2542 idx = intel_ring_sync_index(from, to);
2544 seqno = obj->last_read_seqno;
2545 if (seqno <= from->sync_seqno[idx])
2548 ret = i915_gem_check_olr(obj->ring, seqno);
2552 ret = to->sync_to(to, from, seqno);
2554 /* We use last_read_seqno because sync_to()
2555 * might have just caused seqno wrap under
2558 from->sync_seqno[idx] = obj->last_read_seqno;
2563 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2565 u32 old_write_domain, old_read_domains;
2567 /* Force a pagefault for domain tracking on next user access */
2568 i915_gem_release_mmap(obj);
2570 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2573 /* Wait for any direct GTT access to complete */
2576 old_read_domains = obj->base.read_domains;
2577 old_write_domain = obj->base.write_domain;
2579 obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2580 obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2582 trace_i915_gem_object_change_domain(obj,
2587 int i915_vma_unbind(struct i915_vma *vma)
2589 struct drm_i915_gem_object *obj = vma->obj;
2590 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
2593 if (list_empty(&vma->vma_link))
2599 BUG_ON(obj->pages == NULL);
2601 ret = i915_gem_object_finish_gpu(obj);
2604 /* Continue on if we fail due to EIO, the GPU is hung so we
2605 * should be safe and we need to cleanup or else we might
2606 * cause memory corruption through use-after-free.
2609 i915_gem_object_finish_gtt(obj);
2611 /* release the fence reg _after_ flushing */
2612 ret = i915_gem_object_put_fence(obj);
2616 trace_i915_vma_unbind(vma);
2618 if (obj->has_global_gtt_mapping)
2619 i915_gem_gtt_unbind_object(obj);
2620 if (obj->has_aliasing_ppgtt_mapping) {
2621 i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
2622 obj->has_aliasing_ppgtt_mapping = 0;
2624 i915_gem_gtt_finish_object(obj);
2625 i915_gem_object_unpin_pages(obj);
2627 list_del(&vma->mm_list);
2628 /* Avoid an unnecessary call to unbind on rebind. */
2629 if (i915_is_ggtt(vma->vm))
2630 obj->map_and_fenceable = true;
2632 drm_mm_remove_node(&vma->node);
2633 i915_gem_vma_destroy(vma);
2635 /* Since the unbound list is global, only move to that list if
2636 * no more VMAs exist.
2637 * NB: Until we have real VMAs there will only ever be one */
2638 WARN_ON(!list_empty(&obj->vma_list));
2639 if (list_empty(&obj->vma_list))
2640 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2646 * Unbinds an object from the global GTT aperture.
2649 i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
2651 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2652 struct i915_address_space *ggtt = &dev_priv->gtt.base;
2654 if (!i915_gem_obj_ggtt_bound(obj))
2660 BUG_ON(obj->pages == NULL);
2662 return i915_vma_unbind(i915_gem_obj_to_vma(obj, ggtt));
2665 int i915_gpu_idle(struct drm_device *dev)
2667 drm_i915_private_t *dev_priv = dev->dev_private;
2668 struct intel_ring_buffer *ring;
2671 /* Flush everything onto the inactive list. */
2672 for_each_ring(ring, dev_priv, i) {
2673 ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
2677 ret = intel_ring_idle(ring);
2685 static void i965_write_fence_reg(struct drm_device *dev, int reg,
2686 struct drm_i915_gem_object *obj)
2688 drm_i915_private_t *dev_priv = dev->dev_private;
2690 int fence_pitch_shift;
2692 if (INTEL_INFO(dev)->gen >= 6) {
2693 fence_reg = FENCE_REG_SANDYBRIDGE_0;
2694 fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
2696 fence_reg = FENCE_REG_965_0;
2697 fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
2700 fence_reg += reg * 8;
2702 /* To w/a incoherency with non-atomic 64-bit register updates,
2703 * we split the 64-bit update into two 32-bit writes. In order
2704 * for a partial fence not to be evaluated between writes, we
2705 * precede the update with write to turn off the fence register,
2706 * and only enable the fence as the last step.
2708 * For extra levels of paranoia, we make sure each step lands
2709 * before applying the next step.
2711 I915_WRITE(fence_reg, 0);
2712 POSTING_READ(fence_reg);
2715 u32 size = i915_gem_obj_ggtt_size(obj);
2718 val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
2720 val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
2721 val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
2722 if (obj->tiling_mode == I915_TILING_Y)
2723 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2724 val |= I965_FENCE_REG_VALID;
2726 I915_WRITE(fence_reg + 4, val >> 32);
2727 POSTING_READ(fence_reg + 4);
2729 I915_WRITE(fence_reg + 0, val);
2730 POSTING_READ(fence_reg);
2732 I915_WRITE(fence_reg + 4, 0);
2733 POSTING_READ(fence_reg + 4);
2737 static void i915_write_fence_reg(struct drm_device *dev, int reg,
2738 struct drm_i915_gem_object *obj)
2740 drm_i915_private_t *dev_priv = dev->dev_private;
2744 u32 size = i915_gem_obj_ggtt_size(obj);
2748 WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
2749 (size & -size) != size ||
2750 (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
2751 "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2752 i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
2754 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2759 /* Note: pitch better be a power of two tile widths */
2760 pitch_val = obj->stride / tile_width;
2761 pitch_val = ffs(pitch_val) - 1;
2763 val = i915_gem_obj_ggtt_offset(obj);
2764 if (obj->tiling_mode == I915_TILING_Y)
2765 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2766 val |= I915_FENCE_SIZE_BITS(size);
2767 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2768 val |= I830_FENCE_REG_VALID;
2773 reg = FENCE_REG_830_0 + reg * 4;
2775 reg = FENCE_REG_945_8 + (reg - 8) * 4;
2777 I915_WRITE(reg, val);
2781 static void i830_write_fence_reg(struct drm_device *dev, int reg,
2782 struct drm_i915_gem_object *obj)
2784 drm_i915_private_t *dev_priv = dev->dev_private;
2788 u32 size = i915_gem_obj_ggtt_size(obj);
2791 WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
2792 (size & -size) != size ||
2793 (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
2794 "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
2795 i915_gem_obj_ggtt_offset(obj), size);
2797 pitch_val = obj->stride / 128;
2798 pitch_val = ffs(pitch_val) - 1;
2800 val = i915_gem_obj_ggtt_offset(obj);
2801 if (obj->tiling_mode == I915_TILING_Y)
2802 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2803 val |= I830_FENCE_SIZE_BITS(size);
2804 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2805 val |= I830_FENCE_REG_VALID;
2809 I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
2810 POSTING_READ(FENCE_REG_830_0 + reg * 4);
2813 inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
2815 return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
2818 static void i915_gem_write_fence(struct drm_device *dev, int reg,
2819 struct drm_i915_gem_object *obj)
2821 struct drm_i915_private *dev_priv = dev->dev_private;
2823 /* Ensure that all CPU reads are completed before installing a fence
2824 * and all writes before removing the fence.
2826 if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
2829 WARN(obj && (!obj->stride || !obj->tiling_mode),
2830 "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
2831 obj->stride, obj->tiling_mode);
2833 switch (INTEL_INFO(dev)->gen) {
2837 case 4: i965_write_fence_reg(dev, reg, obj); break;
2838 case 3: i915_write_fence_reg(dev, reg, obj); break;
2839 case 2: i830_write_fence_reg(dev, reg, obj); break;
2843 /* And similarly be paranoid that no direct access to this region
2844 * is reordered to before the fence is installed.
2846 if (i915_gem_object_needs_mb(obj))
2850 static inline int fence_number(struct drm_i915_private *dev_priv,
2851 struct drm_i915_fence_reg *fence)
2853 return fence - dev_priv->fence_regs;
2856 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2857 struct drm_i915_fence_reg *fence,
2860 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2861 int reg = fence_number(dev_priv, fence);
2863 i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
2866 obj->fence_reg = reg;
2868 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
2870 obj->fence_reg = I915_FENCE_REG_NONE;
2872 list_del_init(&fence->lru_list);
2874 obj->fence_dirty = false;
2878 i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
2880 if (obj->last_fenced_seqno) {
2881 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
2885 obj->last_fenced_seqno = 0;
2888 obj->fenced_gpu_access = false;
2893 i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
2895 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2896 struct drm_i915_fence_reg *fence;
2899 ret = i915_gem_object_wait_fence(obj);
2903 if (obj->fence_reg == I915_FENCE_REG_NONE)
2906 fence = &dev_priv->fence_regs[obj->fence_reg];
2908 i915_gem_object_fence_lost(obj);
2909 i915_gem_object_update_fence(obj, fence, false);
2914 static struct drm_i915_fence_reg *
2915 i915_find_fence_reg(struct drm_device *dev)
2917 struct drm_i915_private *dev_priv = dev->dev_private;
2918 struct drm_i915_fence_reg *reg, *avail;
2921 /* First try to find a free reg */
2923 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2924 reg = &dev_priv->fence_regs[i];
2928 if (!reg->pin_count)
2935 /* None available, try to steal one or wait for a user to finish */
2936 list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
2947 * i915_gem_object_get_fence - set up fencing for an object
2948 * @obj: object to map through a fence reg
2950 * When mapping objects through the GTT, userspace wants to be able to write
2951 * to them without having to worry about swizzling if the object is tiled.
2952 * This function walks the fence regs looking for a free one for @obj,
2953 * stealing one if it can't find any.
2955 * It then sets up the reg based on the object's properties: address, pitch
2956 * and tiling format.
2958 * For an untiled surface, this removes any existing fence.
2961 i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
2963 struct drm_device *dev = obj->base.dev;
2964 struct drm_i915_private *dev_priv = dev->dev_private;
2965 bool enable = obj->tiling_mode != I915_TILING_NONE;
2966 struct drm_i915_fence_reg *reg;
2969 /* Have we updated the tiling parameters upon the object and so
2970 * will need to serialise the write to the associated fence register?
2972 if (obj->fence_dirty) {
2973 ret = i915_gem_object_wait_fence(obj);
2978 /* Just update our place in the LRU if our fence is getting reused. */
2979 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2980 reg = &dev_priv->fence_regs[obj->fence_reg];
2981 if (!obj->fence_dirty) {
2982 list_move_tail(®->lru_list,
2983 &dev_priv->mm.fence_list);
2986 } else if (enable) {
2987 reg = i915_find_fence_reg(dev);
2992 struct drm_i915_gem_object *old = reg->obj;
2994 ret = i915_gem_object_wait_fence(old);
2998 i915_gem_object_fence_lost(old);
3003 i915_gem_object_update_fence(obj, reg, enable);
3008 static bool i915_gem_valid_gtt_space(struct drm_device *dev,
3009 struct drm_mm_node *gtt_space,
3010 unsigned long cache_level)
3012 struct drm_mm_node *other;
3014 /* On non-LLC machines we have to be careful when putting differing
3015 * types of snoopable memory together to avoid the prefetcher
3016 * crossing memory domains and dying.
3021 if (!drm_mm_node_allocated(gtt_space))
3024 if (list_empty(>t_space->node_list))
3027 other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
3028 if (other->allocated && !other->hole_follows && other->color != cache_level)
3031 other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
3032 if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
3038 static void i915_gem_verify_gtt(struct drm_device *dev)
3041 struct drm_i915_private *dev_priv = dev->dev_private;
3042 struct drm_i915_gem_object *obj;
3045 list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) {
3046 if (obj->gtt_space == NULL) {
3047 printk(KERN_ERR "object found on GTT list with no space reserved\n");
3052 if (obj->cache_level != obj->gtt_space->color) {
3053 printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
3054 i915_gem_obj_ggtt_offset(obj),
3055 i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
3057 obj->gtt_space->color);
3062 if (!i915_gem_valid_gtt_space(dev,
3064 obj->cache_level)) {
3065 printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
3066 i915_gem_obj_ggtt_offset(obj),
3067 i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
3079 * Finds free space in the GTT aperture and binds the object there.
3082 i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3083 struct i915_address_space *vm,
3085 bool map_and_fenceable,
3088 struct drm_device *dev = obj->base.dev;
3089 drm_i915_private_t *dev_priv = dev->dev_private;
3090 u32 size, fence_size, fence_alignment, unfenced_alignment;
3091 bool mappable, fenceable;
3093 map_and_fenceable ? dev_priv->gtt.mappable_end : vm->total;
3094 struct i915_vma *vma;
3097 if (WARN_ON(!list_empty(&obj->vma_list)))
3100 fence_size = i915_gem_get_gtt_size(dev,
3103 fence_alignment = i915_gem_get_gtt_alignment(dev,
3105 obj->tiling_mode, true);
3106 unfenced_alignment =
3107 i915_gem_get_gtt_alignment(dev,
3109 obj->tiling_mode, false);
3112 alignment = map_and_fenceable ? fence_alignment :
3114 if (map_and_fenceable && alignment & (fence_alignment - 1)) {
3115 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
3119 size = map_and_fenceable ? fence_size : obj->base.size;
3121 /* If the object is bigger than the entire aperture, reject it early
3122 * before evicting everything in a vain attempt to find space.
3124 if (obj->base.size > gtt_max) {
3125 DRM_ERROR("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n",
3127 map_and_fenceable ? "mappable" : "total",
3132 ret = i915_gem_object_get_pages(obj);
3136 i915_gem_object_pin_pages(obj);
3138 /* FIXME: For now we only ever use 1 VMA per object */
3139 BUG_ON(!i915_is_ggtt(vm));
3140 WARN_ON(!list_empty(&obj->vma_list));
3142 vma = i915_gem_vma_create(obj, vm);
3149 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
3151 obj->cache_level, 0, gtt_max,
3152 DRM_MM_SEARCH_DEFAULT);
3154 ret = i915_gem_evict_something(dev, vm, size, alignment,
3163 if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
3164 obj->cache_level))) {
3166 goto err_remove_node;
3169 ret = i915_gem_gtt_prepare_object(obj);
3171 goto err_remove_node;
3173 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
3174 list_add_tail(&vma->mm_list, &vm->inactive_list);
3178 i915_gem_obj_ggtt_size(obj) == fence_size &&
3179 (i915_gem_obj_ggtt_offset(obj) & (fence_alignment - 1)) == 0;
3183 vma->node.start + obj->base.size <= dev_priv->gtt.mappable_end;
3185 /* Map and fenceable only changes if the VM is the global GGTT */
3186 if (i915_is_ggtt(vm))
3187 obj->map_and_fenceable = mappable && fenceable;
3189 trace_i915_vma_bind(vma, map_and_fenceable);
3190 i915_gem_verify_gtt(dev);
3194 drm_mm_remove_node(&vma->node);
3196 i915_gem_vma_destroy(vma);
3198 i915_gem_object_unpin_pages(obj);
3203 i915_gem_clflush_object(struct drm_i915_gem_object *obj)
3205 /* If we don't have a page list set up, then we're not pinned
3206 * to GPU, and we can ignore the cache flush because it'll happen
3207 * again at bind time.
3209 if (obj->pages == NULL)
3213 * Stolen memory is always coherent with the GPU as it is explicitly
3214 * marked as wc by the system, or the system is cache-coherent.
3219 /* If the GPU is snooping the contents of the CPU cache,
3220 * we do not need to manually clear the CPU cache lines. However,
3221 * the caches are only snooped when the render cache is
3222 * flushed/invalidated. As we always have to emit invalidations
3223 * and flushes when moving into and out of the RENDER domain, correct
3224 * snooping behaviour occurs naturally as the result of our domain
3227 if (obj->cache_level != I915_CACHE_NONE)
3230 trace_i915_gem_object_clflush(obj);
3232 drm_clflush_sg(obj->pages);
3235 /** Flushes the GTT write domain for the object if it's dirty. */
3237 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3239 uint32_t old_write_domain;
3241 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
3244 /* No actual flushing is required for the GTT write domain. Writes
3245 * to it immediately go to main memory as far as we know, so there's
3246 * no chipset flush. It also doesn't land in render cache.
3248 * However, we do have to enforce the order so that all writes through
3249 * the GTT land before any writes to the device, such as updates to
3254 old_write_domain = obj->base.write_domain;
3255 obj->base.write_domain = 0;
3257 trace_i915_gem_object_change_domain(obj,
3258 obj->base.read_domains,
3262 /** Flushes the CPU write domain for the object if it's dirty. */
3264 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
3266 uint32_t old_write_domain;
3268 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3271 i915_gem_clflush_object(obj);
3272 i915_gem_chipset_flush(obj->base.dev);
3273 old_write_domain = obj->base.write_domain;
3274 obj->base.write_domain = 0;
3276 trace_i915_gem_object_change_domain(obj,
3277 obj->base.read_domains,
3282 * Moves a single object to the GTT read, and possibly write domain.
3284 * This function returns when the move is complete, including waiting on
3288 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3290 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
3291 uint32_t old_write_domain, old_read_domains;
3294 /* Not valid to be called on unbound objects. */
3295 if (!i915_gem_obj_bound_any(obj))
3298 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3301 ret = i915_gem_object_wait_rendering(obj, !write);
3305 i915_gem_object_flush_cpu_write_domain(obj);
3307 /* Serialise direct access to this object with the barriers for
3308 * coherent writes from the GPU, by effectively invalidating the
3309 * GTT domain upon first access.
3311 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3314 old_write_domain = obj->base.write_domain;
3315 old_read_domains = obj->base.read_domains;
3317 /* It should now be out of any other write domains, and we can update
3318 * the domain values for our changes.
3320 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3321 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3323 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3324 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3328 trace_i915_gem_object_change_domain(obj,
3332 /* And bump the LRU for this access */
3333 if (i915_gem_object_is_inactive(obj)) {
3334 struct i915_vma *vma = i915_gem_obj_to_vma(obj,
3335 &dev_priv->gtt.base);
3337 list_move_tail(&vma->mm_list,
3338 &dev_priv->gtt.base.inactive_list);
3345 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3346 enum i915_cache_level cache_level)
3348 struct drm_device *dev = obj->base.dev;
3349 drm_i915_private_t *dev_priv = dev->dev_private;
3350 struct i915_vma *vma;
3353 if (obj->cache_level == cache_level)
3356 if (obj->pin_count) {
3357 DRM_DEBUG("can not change the cache level of pinned objects\n");
3361 list_for_each_entry(vma, &obj->vma_list, vma_link) {
3362 if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
3363 ret = i915_vma_unbind(vma);
3371 if (i915_gem_obj_bound_any(obj)) {
3372 ret = i915_gem_object_finish_gpu(obj);
3376 i915_gem_object_finish_gtt(obj);
3378 /* Before SandyBridge, you could not use tiling or fence
3379 * registers with snooped memory, so relinquish any fences
3380 * currently pointing to our region in the aperture.
3382 if (INTEL_INFO(dev)->gen < 6) {
3383 ret = i915_gem_object_put_fence(obj);
3388 if (obj->has_global_gtt_mapping)
3389 i915_gem_gtt_bind_object(obj, cache_level);
3390 if (obj->has_aliasing_ppgtt_mapping)
3391 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
3395 if (cache_level == I915_CACHE_NONE) {
3396 u32 old_read_domains, old_write_domain;
3398 /* If we're coming from LLC cached, then we haven't
3399 * actually been tracking whether the data is in the
3400 * CPU cache or not, since we only allow one bit set
3401 * in obj->write_domain and have been skipping the clflushes.
3402 * Just set it to the CPU cache for now.
3404 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
3405 WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU);
3407 old_read_domains = obj->base.read_domains;
3408 old_write_domain = obj->base.write_domain;
3410 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3411 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3413 trace_i915_gem_object_change_domain(obj,
3418 list_for_each_entry(vma, &obj->vma_list, vma_link)
3419 vma->node.color = cache_level;
3420 obj->cache_level = cache_level;
3421 i915_gem_verify_gtt(dev);
3425 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3426 struct drm_file *file)
3428 struct drm_i915_gem_caching *args = data;
3429 struct drm_i915_gem_object *obj;
3432 ret = i915_mutex_lock_interruptible(dev);
3436 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3437 if (&obj->base == NULL) {
3442 args->caching = obj->cache_level != I915_CACHE_NONE;
3444 drm_gem_object_unreference(&obj->base);
3446 mutex_unlock(&dev->struct_mutex);
3450 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3451 struct drm_file *file)
3453 struct drm_i915_gem_caching *args = data;
3454 struct drm_i915_gem_object *obj;
3455 enum i915_cache_level level;
3458 switch (args->caching) {
3459 case I915_CACHING_NONE:
3460 level = I915_CACHE_NONE;
3462 case I915_CACHING_CACHED:
3463 level = I915_CACHE_LLC;
3469 ret = i915_mutex_lock_interruptible(dev);
3473 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3474 if (&obj->base == NULL) {
3479 ret = i915_gem_object_set_cache_level(obj, level);
3481 drm_gem_object_unreference(&obj->base);
3483 mutex_unlock(&dev->struct_mutex);
3488 * Prepare buffer for display plane (scanout, cursors, etc).
3489 * Can be called from an uninterruptible phase (modesetting) and allows
3490 * any flushes to be pipelined (for pageflips).
3493 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3495 struct intel_ring_buffer *pipelined)
3497 u32 old_read_domains, old_write_domain;
3500 if (pipelined != obj->ring) {
3501 ret = i915_gem_object_sync(obj, pipelined);
3506 /* The display engine is not coherent with the LLC cache on gen6. As
3507 * a result, we make sure that the pinning that is about to occur is
3508 * done with uncached PTEs. This is lowest common denominator for all
3511 * However for gen6+, we could do better by using the GFDT bit instead
3512 * of uncaching, which would allow us to flush all the LLC-cached data
3513 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3515 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
3519 /* As the user may map the buffer once pinned in the display plane
3520 * (e.g. libkms for the bootup splash), we have to ensure that we
3521 * always use map_and_fenceable for all scanout buffers.
3523 ret = i915_gem_obj_ggtt_pin(obj, alignment, true, false);
3527 i915_gem_object_flush_cpu_write_domain(obj);
3529 old_write_domain = obj->base.write_domain;
3530 old_read_domains = obj->base.read_domains;
3532 /* It should now be out of any other write domains, and we can update
3533 * the domain values for our changes.
3535 obj->base.write_domain = 0;
3536 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3538 trace_i915_gem_object_change_domain(obj,
3546 i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
3550 if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
3553 ret = i915_gem_object_wait_rendering(obj, false);
3557 /* Ensure that we invalidate the GPU's caches and TLBs. */
3558 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
3563 * Moves a single object to the CPU read, and possibly write domain.
3565 * This function returns when the move is complete, including waiting on
3569 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3571 uint32_t old_write_domain, old_read_domains;
3574 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3577 ret = i915_gem_object_wait_rendering(obj, !write);
3581 i915_gem_object_flush_gtt_write_domain(obj);
3583 old_write_domain = obj->base.write_domain;
3584 old_read_domains = obj->base.read_domains;
3586 /* Flush the CPU cache if it's still invalid. */
3587 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3588 i915_gem_clflush_object(obj);
3590 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3593 /* It should now be out of any other write domains, and we can update
3594 * the domain values for our changes.
3596 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3598 /* If we're writing through the CPU, then the GPU read domains will
3599 * need to be invalidated at next use.
3602 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3603 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3606 trace_i915_gem_object_change_domain(obj,
3613 /* Throttle our rendering by waiting until the ring has completed our requests
3614 * emitted over 20 msec ago.
3616 * Note that if we were to use the current jiffies each time around the loop,
3617 * we wouldn't escape the function with any frames outstanding if the time to
3618 * render a frame was over 20ms.
3620 * This should get us reasonable parallelism between CPU and GPU but also
3621 * relatively low latency when blocking on a particular request to finish.
3624 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3626 struct drm_i915_private *dev_priv = dev->dev_private;
3627 struct drm_i915_file_private *file_priv = file->driver_priv;
3628 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
3629 struct drm_i915_gem_request *request;
3630 struct intel_ring_buffer *ring = NULL;
3631 unsigned reset_counter;
3635 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
3639 ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
3643 spin_lock(&file_priv->mm.lock);
3644 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3645 if (time_after_eq(request->emitted_jiffies, recent_enough))
3648 ring = request->ring;
3649 seqno = request->seqno;
3651 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
3652 spin_unlock(&file_priv->mm.lock);
3657 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
3659 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
3665 i915_gem_object_pin(struct drm_i915_gem_object *obj,
3666 struct i915_address_space *vm,
3668 bool map_and_fenceable,
3671 struct i915_vma *vma;
3674 if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3677 WARN_ON(map_and_fenceable && !i915_is_ggtt(vm));
3679 vma = i915_gem_obj_to_vma(obj, vm);
3683 vma->node.start & (alignment - 1)) ||
3684 (map_and_fenceable && !obj->map_and_fenceable)) {
3685 WARN(obj->pin_count,
3686 "bo is already pinned with incorrect alignment:"
3687 " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
3688 " obj->map_and_fenceable=%d\n",
3689 i915_gem_obj_offset(obj, vm), alignment,
3691 obj->map_and_fenceable);
3692 ret = i915_vma_unbind(vma);
3698 if (!i915_gem_obj_bound(obj, vm)) {
3699 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3701 ret = i915_gem_object_bind_to_vm(obj, vm, alignment,
3707 if (!dev_priv->mm.aliasing_ppgtt)
3708 i915_gem_gtt_bind_object(obj, obj->cache_level);
3711 if (!obj->has_global_gtt_mapping && map_and_fenceable)
3712 i915_gem_gtt_bind_object(obj, obj->cache_level);
3715 obj->pin_mappable |= map_and_fenceable;
3721 i915_gem_object_unpin(struct drm_i915_gem_object *obj)
3723 BUG_ON(obj->pin_count == 0);
3724 BUG_ON(!i915_gem_obj_bound_any(obj));
3726 if (--obj->pin_count == 0)
3727 obj->pin_mappable = false;
3731 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3732 struct drm_file *file)
3734 struct drm_i915_gem_pin *args = data;
3735 struct drm_i915_gem_object *obj;
3738 ret = i915_mutex_lock_interruptible(dev);
3742 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3743 if (&obj->base == NULL) {
3748 if (obj->madv != I915_MADV_WILLNEED) {
3749 DRM_ERROR("Attempting to pin a purgeable buffer\n");
3754 if (obj->pin_filp != NULL && obj->pin_filp != file) {
3755 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3761 if (obj->user_pin_count == 0) {
3762 ret = i915_gem_obj_ggtt_pin(obj, args->alignment, true, false);
3767 obj->user_pin_count++;
3768 obj->pin_filp = file;
3770 /* XXX - flush the CPU caches for pinned objects
3771 * as the X server doesn't manage domains yet
3773 i915_gem_object_flush_cpu_write_domain(obj);
3774 args->offset = i915_gem_obj_ggtt_offset(obj);
3776 drm_gem_object_unreference(&obj->base);
3778 mutex_unlock(&dev->struct_mutex);
3783 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
3784 struct drm_file *file)
3786 struct drm_i915_gem_pin *args = data;
3787 struct drm_i915_gem_object *obj;
3790 ret = i915_mutex_lock_interruptible(dev);
3794 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3795 if (&obj->base == NULL) {
3800 if (obj->pin_filp != file) {
3801 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3806 obj->user_pin_count--;
3807 if (obj->user_pin_count == 0) {
3808 obj->pin_filp = NULL;
3809 i915_gem_object_unpin(obj);
3813 drm_gem_object_unreference(&obj->base);
3815 mutex_unlock(&dev->struct_mutex);
3820 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3821 struct drm_file *file)
3823 struct drm_i915_gem_busy *args = data;
3824 struct drm_i915_gem_object *obj;
3827 ret = i915_mutex_lock_interruptible(dev);
3831 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3832 if (&obj->base == NULL) {
3837 /* Count all active objects as busy, even if they are currently not used
3838 * by the gpu. Users of this interface expect objects to eventually
3839 * become non-busy without any further actions, therefore emit any
3840 * necessary flushes here.
3842 ret = i915_gem_object_flush_active(obj);
3844 args->busy = obj->active;
3846 BUILD_BUG_ON(I915_NUM_RINGS > 16);
3847 args->busy |= intel_ring_flag(obj->ring) << 16;
3850 drm_gem_object_unreference(&obj->base);
3852 mutex_unlock(&dev->struct_mutex);
3857 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3858 struct drm_file *file_priv)
3860 return i915_gem_ring_throttle(dev, file_priv);
3864 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3865 struct drm_file *file_priv)
3867 struct drm_i915_gem_madvise *args = data;
3868 struct drm_i915_gem_object *obj;
3871 switch (args->madv) {
3872 case I915_MADV_DONTNEED:
3873 case I915_MADV_WILLNEED:
3879 ret = i915_mutex_lock_interruptible(dev);
3883 obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
3884 if (&obj->base == NULL) {
3889 if (obj->pin_count) {
3894 if (obj->madv != __I915_MADV_PURGED)
3895 obj->madv = args->madv;
3897 /* if the object is no longer attached, discard its backing storage */
3898 if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
3899 i915_gem_object_truncate(obj);
3901 args->retained = obj->madv != __I915_MADV_PURGED;
3904 drm_gem_object_unreference(&obj->base);
3906 mutex_unlock(&dev->struct_mutex);
3910 void i915_gem_object_init(struct drm_i915_gem_object *obj,
3911 const struct drm_i915_gem_object_ops *ops)
3913 INIT_LIST_HEAD(&obj->global_list);
3914 INIT_LIST_HEAD(&obj->ring_list);
3915 INIT_LIST_HEAD(&obj->exec_list);
3916 INIT_LIST_HEAD(&obj->vma_list);
3920 obj->fence_reg = I915_FENCE_REG_NONE;
3921 obj->madv = I915_MADV_WILLNEED;
3922 /* Avoid an unnecessary call to unbind on the first bind. */
3923 obj->map_and_fenceable = true;
3925 i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
3928 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
3929 .get_pages = i915_gem_object_get_pages_gtt,
3930 .put_pages = i915_gem_object_put_pages_gtt,
3933 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3936 struct drm_i915_gem_object *obj;
3937 struct address_space *mapping;
3940 obj = i915_gem_object_alloc(dev);
3944 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
3945 i915_gem_object_free(obj);
3949 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
3950 if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
3951 /* 965gm cannot relocate objects above 4GiB. */
3952 mask &= ~__GFP_HIGHMEM;
3953 mask |= __GFP_DMA32;
3956 mapping = file_inode(obj->base.filp)->i_mapping;
3957 mapping_set_gfp_mask(mapping, mask);
3959 i915_gem_object_init(obj, &i915_gem_object_ops);
3961 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3962 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3965 /* On some devices, we can have the GPU use the LLC (the CPU
3966 * cache) for about a 10% performance improvement
3967 * compared to uncached. Graphics requests other than
3968 * display scanout are coherent with the CPU in
3969 * accessing this cache. This means in this mode we
3970 * don't need to clflush on the CPU side, and on the
3971 * GPU side we only need to flush internal caches to
3972 * get data visible to the CPU.
3974 * However, we maintain the display planes as UC, and so
3975 * need to rebind when first used as such.
3977 obj->cache_level = I915_CACHE_LLC;
3979 obj->cache_level = I915_CACHE_NONE;
3981 trace_i915_gem_object_create(obj);
3986 int i915_gem_init_object(struct drm_gem_object *obj)
3993 void i915_gem_free_object(struct drm_gem_object *gem_obj)
3995 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
3996 struct drm_device *dev = obj->base.dev;
3997 drm_i915_private_t *dev_priv = dev->dev_private;
3998 struct i915_vma *vma, *next;
4000 trace_i915_gem_object_destroy(obj);
4003 i915_gem_detach_phys_object(dev, obj);
4006 /* NB: 0 or 1 elements */
4007 WARN_ON(!list_empty(&obj->vma_list) &&
4008 !list_is_singular(&obj->vma_list));
4009 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
4010 int ret = i915_vma_unbind(vma);
4011 if (WARN_ON(ret == -ERESTARTSYS)) {
4012 bool was_interruptible;
4014 was_interruptible = dev_priv->mm.interruptible;
4015 dev_priv->mm.interruptible = false;
4017 WARN_ON(i915_vma_unbind(vma));
4019 dev_priv->mm.interruptible = was_interruptible;
4023 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4024 * before progressing. */
4026 i915_gem_object_unpin_pages(obj);
4028 if (WARN_ON(obj->pages_pin_count))
4029 obj->pages_pin_count = 0;
4030 i915_gem_object_put_pages(obj);
4031 i915_gem_object_free_mmap_offset(obj);
4032 i915_gem_object_release_stolen(obj);
4036 if (obj->base.import_attach)
4037 drm_prime_gem_destroy(&obj->base, NULL);
4039 drm_gem_object_release(&obj->base);
4040 i915_gem_info_remove_obj(dev_priv, obj->base.size);
4043 i915_gem_object_free(obj);
4046 struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
4047 struct i915_address_space *vm)
4049 struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
4051 return ERR_PTR(-ENOMEM);
4053 INIT_LIST_HEAD(&vma->vma_link);
4054 INIT_LIST_HEAD(&vma->mm_list);
4058 /* Keep GGTT vmas first to make debug easier */
4059 if (i915_is_ggtt(vm))
4060 list_add(&vma->vma_link, &obj->vma_list);
4062 list_add_tail(&vma->vma_link, &obj->vma_list);
4067 void i915_gem_vma_destroy(struct i915_vma *vma)
4069 WARN_ON(vma->node.allocated);
4070 list_del(&vma->vma_link);
4075 i915_gem_idle(struct drm_device *dev)
4077 drm_i915_private_t *dev_priv = dev->dev_private;
4080 if (dev_priv->ums.mm_suspended) {
4081 mutex_unlock(&dev->struct_mutex);
4085 ret = i915_gpu_idle(dev);
4087 mutex_unlock(&dev->struct_mutex);
4090 i915_gem_retire_requests(dev);
4092 /* Under UMS, be paranoid and evict. */
4093 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4094 i915_gem_evict_everything(dev);
4096 del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
4098 i915_kernel_lost_context(dev);
4099 i915_gem_cleanup_ringbuffer(dev);
4101 /* Cancel the retire work handler, which should be idle now. */
4102 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4107 void i915_gem_l3_remap(struct drm_device *dev)
4109 drm_i915_private_t *dev_priv = dev->dev_private;
4113 if (!HAS_L3_GPU_CACHE(dev))
4116 if (!dev_priv->l3_parity.remap_info)
4119 misccpctl = I915_READ(GEN7_MISCCPCTL);
4120 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
4121 POSTING_READ(GEN7_MISCCPCTL);
4123 for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
4124 u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
4125 if (remap && remap != dev_priv->l3_parity.remap_info[i/4])
4126 DRM_DEBUG("0x%x was already programmed to %x\n",
4127 GEN7_L3LOG_BASE + i, remap);
4128 if (remap && !dev_priv->l3_parity.remap_info[i/4])
4129 DRM_DEBUG_DRIVER("Clearing remapped register\n");
4130 I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]);
4133 /* Make sure all the writes land before disabling dop clock gating */
4134 POSTING_READ(GEN7_L3LOG_BASE);
4136 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
4139 void i915_gem_init_swizzling(struct drm_device *dev)
4141 drm_i915_private_t *dev_priv = dev->dev_private;
4143 if (INTEL_INFO(dev)->gen < 5 ||
4144 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4147 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4148 DISP_TILE_SURFACE_SWIZZLING);
4153 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4155 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4156 else if (IS_GEN7(dev))
4157 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
4163 intel_enable_blt(struct drm_device *dev)
4168 /* The blitter was dysfunctional on early prototypes */
4169 if (IS_GEN6(dev) && dev->pdev->revision < 8) {
4170 DRM_INFO("BLT not supported on this pre-production hardware;"
4171 " graphics performance will be degraded.\n");
4178 static int i915_gem_init_rings(struct drm_device *dev)
4180 struct drm_i915_private *dev_priv = dev->dev_private;
4183 ret = intel_init_render_ring_buffer(dev);
4188 ret = intel_init_bsd_ring_buffer(dev);
4190 goto cleanup_render_ring;
4193 if (intel_enable_blt(dev)) {
4194 ret = intel_init_blt_ring_buffer(dev);
4196 goto cleanup_bsd_ring;
4199 if (HAS_VEBOX(dev)) {
4200 ret = intel_init_vebox_ring_buffer(dev);
4202 goto cleanup_blt_ring;
4206 ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
4208 goto cleanup_vebox_ring;
4213 intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
4215 intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
4217 intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
4218 cleanup_render_ring:
4219 intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
4225 i915_gem_init_hw(struct drm_device *dev)
4227 drm_i915_private_t *dev_priv = dev->dev_private;
4230 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
4233 if (dev_priv->ellc_size)
4234 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4236 if (HAS_PCH_NOP(dev)) {
4237 u32 temp = I915_READ(GEN7_MSG_CTL);
4238 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4239 I915_WRITE(GEN7_MSG_CTL, temp);
4242 i915_gem_l3_remap(dev);
4244 i915_gem_init_swizzling(dev);
4246 ret = i915_gem_init_rings(dev);
4251 * XXX: There was some w/a described somewhere suggesting loading
4252 * contexts before PPGTT.
4254 i915_gem_context_init(dev);
4255 if (dev_priv->mm.aliasing_ppgtt) {
4256 ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
4258 i915_gem_cleanup_aliasing_ppgtt(dev);
4259 DRM_INFO("PPGTT enable failed. This is not fatal, but unexpected\n");
4266 int i915_gem_init(struct drm_device *dev)
4268 struct drm_i915_private *dev_priv = dev->dev_private;
4271 mutex_lock(&dev->struct_mutex);
4273 if (IS_VALLEYVIEW(dev)) {
4274 /* VLVA0 (potential hack), BIOS isn't actually waking us */
4275 I915_WRITE(VLV_GTLC_WAKE_CTRL, 1);
4276 if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) & 1) == 1, 10))
4277 DRM_DEBUG_DRIVER("allow wake ack timed out\n");
4280 i915_gem_init_global_gtt(dev);
4282 ret = i915_gem_init_hw(dev);
4283 mutex_unlock(&dev->struct_mutex);
4285 i915_gem_cleanup_aliasing_ppgtt(dev);
4289 /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
4290 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4291 dev_priv->dri1.allow_batchbuffer = 1;
4296 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4298 drm_i915_private_t *dev_priv = dev->dev_private;
4299 struct intel_ring_buffer *ring;
4302 for_each_ring(ring, dev_priv, i)
4303 intel_cleanup_ring_buffer(ring);
4307 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4308 struct drm_file *file_priv)
4310 struct drm_i915_private *dev_priv = dev->dev_private;
4313 if (drm_core_check_feature(dev, DRIVER_MODESET))
4316 if (i915_reset_in_progress(&dev_priv->gpu_error)) {
4317 DRM_ERROR("Reenabling wedged hardware, good luck\n");
4318 atomic_set(&dev_priv->gpu_error.reset_counter, 0);
4321 mutex_lock(&dev->struct_mutex);
4322 dev_priv->ums.mm_suspended = 0;
4324 ret = i915_gem_init_hw(dev);
4326 mutex_unlock(&dev->struct_mutex);
4330 BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
4331 mutex_unlock(&dev->struct_mutex);
4333 ret = drm_irq_install(dev);
4335 goto cleanup_ringbuffer;
4340 mutex_lock(&dev->struct_mutex);
4341 i915_gem_cleanup_ringbuffer(dev);
4342 dev_priv->ums.mm_suspended = 1;
4343 mutex_unlock(&dev->struct_mutex);
4349 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4350 struct drm_file *file_priv)
4352 struct drm_i915_private *dev_priv = dev->dev_private;
4355 if (drm_core_check_feature(dev, DRIVER_MODESET))
4358 drm_irq_uninstall(dev);
4360 mutex_lock(&dev->struct_mutex);
4361 ret = i915_gem_idle(dev);
4363 /* Hack! Don't let anybody do execbuf while we don't control the chip.
4364 * We need to replace this with a semaphore, or something.
4365 * And not confound ums.mm_suspended!
4368 dev_priv->ums.mm_suspended = 1;
4369 mutex_unlock(&dev->struct_mutex);
4375 i915_gem_lastclose(struct drm_device *dev)
4379 if (drm_core_check_feature(dev, DRIVER_MODESET))
4382 mutex_lock(&dev->struct_mutex);
4383 ret = i915_gem_idle(dev);
4385 DRM_ERROR("failed to idle hardware: %d\n", ret);
4386 mutex_unlock(&dev->struct_mutex);
4390 init_ring_lists(struct intel_ring_buffer *ring)
4392 INIT_LIST_HEAD(&ring->active_list);
4393 INIT_LIST_HEAD(&ring->request_list);
4396 static void i915_init_vm(struct drm_i915_private *dev_priv,
4397 struct i915_address_space *vm)
4399 vm->dev = dev_priv->dev;
4400 INIT_LIST_HEAD(&vm->active_list);
4401 INIT_LIST_HEAD(&vm->inactive_list);
4402 INIT_LIST_HEAD(&vm->global_link);
4403 list_add(&vm->global_link, &dev_priv->vm_list);
4407 i915_gem_load(struct drm_device *dev)
4409 drm_i915_private_t *dev_priv = dev->dev_private;
4413 kmem_cache_create("i915_gem_object",
4414 sizeof(struct drm_i915_gem_object), 0,
4418 INIT_LIST_HEAD(&dev_priv->vm_list);
4419 i915_init_vm(dev_priv, &dev_priv->gtt.base);
4421 INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4422 INIT_LIST_HEAD(&dev_priv->mm.bound_list);
4423 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4424 for (i = 0; i < I915_NUM_RINGS; i++)
4425 init_ring_lists(&dev_priv->ring[i]);
4426 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
4427 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
4428 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4429 i915_gem_retire_work_handler);
4430 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
4432 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4434 I915_WRITE(MI_ARB_STATE,
4435 _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
4438 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4440 /* Old X drivers will take 0-2 for front, back, depth buffers */
4441 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4442 dev_priv->fence_reg_start = 3;
4444 if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
4445 dev_priv->num_fence_regs = 32;
4446 else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4447 dev_priv->num_fence_regs = 16;
4449 dev_priv->num_fence_regs = 8;
4451 /* Initialize fence registers to zero */
4452 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4453 i915_gem_restore_fences(dev);
4455 i915_gem_detect_bit_6_swizzle(dev);
4456 init_waitqueue_head(&dev_priv->pending_flip_queue);
4458 dev_priv->mm.interruptible = true;
4460 dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
4461 dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
4462 register_shrinker(&dev_priv->mm.inactive_shrinker);
4466 * Create a physically contiguous memory object for this object
4467 * e.g. for cursor + overlay regs
4469 static int i915_gem_init_phys_object(struct drm_device *dev,
4470 int id, int size, int align)
4472 drm_i915_private_t *dev_priv = dev->dev_private;
4473 struct drm_i915_gem_phys_object *phys_obj;
4476 if (dev_priv->mm.phys_objs[id - 1] || !size)
4479 phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
4485 phys_obj->handle = drm_pci_alloc(dev, size, align);
4486 if (!phys_obj->handle) {
4491 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4494 dev_priv->mm.phys_objs[id - 1] = phys_obj;
4502 static void i915_gem_free_phys_object(struct drm_device *dev, int id)
4504 drm_i915_private_t *dev_priv = dev->dev_private;
4505 struct drm_i915_gem_phys_object *phys_obj;
4507 if (!dev_priv->mm.phys_objs[id - 1])
4510 phys_obj = dev_priv->mm.phys_objs[id - 1];
4511 if (phys_obj->cur_obj) {
4512 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4516 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4518 drm_pci_free(dev, phys_obj->handle);
4520 dev_priv->mm.phys_objs[id - 1] = NULL;
4523 void i915_gem_free_all_phys_object(struct drm_device *dev)
4527 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
4528 i915_gem_free_phys_object(dev, i);
4531 void i915_gem_detach_phys_object(struct drm_device *dev,
4532 struct drm_i915_gem_object *obj)
4534 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
4541 vaddr = obj->phys_obj->handle->vaddr;
4543 page_count = obj->base.size / PAGE_SIZE;
4544 for (i = 0; i < page_count; i++) {
4545 struct page *page = shmem_read_mapping_page(mapping, i);
4546 if (!IS_ERR(page)) {
4547 char *dst = kmap_atomic(page);
4548 memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
4551 drm_clflush_pages(&page, 1);
4553 set_page_dirty(page);
4554 mark_page_accessed(page);
4555 page_cache_release(page);
4558 i915_gem_chipset_flush(dev);
4560 obj->phys_obj->cur_obj = NULL;
4561 obj->phys_obj = NULL;
4565 i915_gem_attach_phys_object(struct drm_device *dev,
4566 struct drm_i915_gem_object *obj,
4570 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
4571 drm_i915_private_t *dev_priv = dev->dev_private;
4576 if (id > I915_MAX_PHYS_OBJECT)
4579 if (obj->phys_obj) {
4580 if (obj->phys_obj->id == id)
4582 i915_gem_detach_phys_object(dev, obj);
4585 /* create a new object */
4586 if (!dev_priv->mm.phys_objs[id - 1]) {
4587 ret = i915_gem_init_phys_object(dev, id,
4588 obj->base.size, align);
4590 DRM_ERROR("failed to init phys object %d size: %zu\n",
4591 id, obj->base.size);
4596 /* bind to the object */
4597 obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4598 obj->phys_obj->cur_obj = obj;
4600 page_count = obj->base.size / PAGE_SIZE;
4602 for (i = 0; i < page_count; i++) {
4606 page = shmem_read_mapping_page(mapping, i);
4608 return PTR_ERR(page);
4610 src = kmap_atomic(page);
4611 dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4612 memcpy(dst, src, PAGE_SIZE);
4615 mark_page_accessed(page);
4616 page_cache_release(page);
4623 i915_gem_phys_pwrite(struct drm_device *dev,
4624 struct drm_i915_gem_object *obj,
4625 struct drm_i915_gem_pwrite *args,
4626 struct drm_file *file_priv)
4628 void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
4629 char __user *user_data = to_user_ptr(args->data_ptr);
4631 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4632 unsigned long unwritten;
4634 /* The physical object once assigned is fixed for the lifetime
4635 * of the obj, so we can safely drop the lock and continue
4638 mutex_unlock(&dev->struct_mutex);
4639 unwritten = copy_from_user(vaddr, user_data, args->size);
4640 mutex_lock(&dev->struct_mutex);
4645 i915_gem_chipset_flush(dev);
4649 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4651 struct drm_i915_file_private *file_priv = file->driver_priv;
4653 /* Clean up our request list when the client is going away, so that
4654 * later retire_requests won't dereference our soon-to-be-gone
4657 spin_lock(&file_priv->mm.lock);
4658 while (!list_empty(&file_priv->mm.request_list)) {
4659 struct drm_i915_gem_request *request;
4661 request = list_first_entry(&file_priv->mm.request_list,
4662 struct drm_i915_gem_request,
4664 list_del(&request->client_list);
4665 request->file_priv = NULL;
4667 spin_unlock(&file_priv->mm.lock);
4670 static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
4672 if (!mutex_is_locked(mutex))
4675 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
4676 return mutex->owner == task;
4678 /* Since UP may be pre-empted, we cannot assume that we own the lock */
4684 i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
4686 struct drm_i915_private *dev_priv =
4687 container_of(shrinker,
4688 struct drm_i915_private,
4689 mm.inactive_shrinker);
4690 struct drm_device *dev = dev_priv->dev;
4691 struct drm_i915_gem_object *obj;
4692 int nr_to_scan = sc->nr_to_scan;
4696 if (!mutex_trylock(&dev->struct_mutex)) {
4697 if (!mutex_is_locked_by(&dev->struct_mutex, current))
4700 if (dev_priv->mm.shrinker_no_lock_stealing)
4707 nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan);
4709 nr_to_scan -= __i915_gem_shrink(dev_priv, nr_to_scan,
4712 i915_gem_shrink_all(dev_priv);
4716 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
4717 if (obj->pages_pin_count == 0)
4718 cnt += obj->base.size >> PAGE_SHIFT;
4720 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
4724 if (obj->pin_count == 0 && obj->pages_pin_count == 0)
4725 cnt += obj->base.size >> PAGE_SHIFT;
4729 mutex_unlock(&dev->struct_mutex);
4733 /* All the new VM stuff */
4734 unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
4735 struct i915_address_space *vm)
4737 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
4738 struct i915_vma *vma;
4740 if (vm == &dev_priv->mm.aliasing_ppgtt->base)
4741 vm = &dev_priv->gtt.base;
4743 BUG_ON(list_empty(&o->vma_list));
4744 list_for_each_entry(vma, &o->vma_list, vma_link) {
4746 return vma->node.start;
4752 bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
4753 struct i915_address_space *vm)
4755 struct i915_vma *vma;
4757 list_for_each_entry(vma, &o->vma_list, vma_link)
4758 if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
4764 bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
4766 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
4767 struct i915_address_space *vm;
4769 list_for_each_entry(vm, &dev_priv->vm_list, global_link)
4770 if (i915_gem_obj_bound(o, vm))
4776 unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
4777 struct i915_address_space *vm)
4779 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
4780 struct i915_vma *vma;
4782 if (vm == &dev_priv->mm.aliasing_ppgtt->base)
4783 vm = &dev_priv->gtt.base;
4785 BUG_ON(list_empty(&o->vma_list));
4787 list_for_each_entry(vma, &o->vma_list, vma_link)
4789 return vma->node.size;
4794 struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
4795 struct i915_address_space *vm)
4797 struct i915_vma *vma;
4798 list_for_each_entry(vma, &obj->vma_list, vma_link)