2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
29 #include <drm/drm_vma_manager.h>
30 #include <drm/i915_drm.h>
32 #include "i915_trace.h"
33 #include "intel_drv.h"
34 #include <linux/shmem_fs.h>
35 #include <linux/slab.h>
36 #include <linux/swap.h>
37 #include <linux/pci.h>
38 #include <linux/dma-buf.h>
40 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
41 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
43 static __must_check int
44 i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
45 struct i915_address_space *vm,
47 bool map_and_fenceable,
49 static int i915_gem_phys_pwrite(struct drm_device *dev,
50 struct drm_i915_gem_object *obj,
51 struct drm_i915_gem_pwrite *args,
52 struct drm_file *file);
54 static void i915_gem_write_fence(struct drm_device *dev, int reg,
55 struct drm_i915_gem_object *obj);
56 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
57 struct drm_i915_fence_reg *fence,
60 static unsigned long i915_gem_inactive_count(struct shrinker *shrinker,
61 struct shrink_control *sc);
62 static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker,
63 struct shrink_control *sc);
64 static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
65 static long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
66 static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
68 static bool cpu_cache_is_coherent(struct drm_device *dev,
69 enum i915_cache_level level)
71 return HAS_LLC(dev) || level != I915_CACHE_NONE;
74 static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
76 if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
79 return obj->pin_display;
82 static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
85 i915_gem_release_mmap(obj);
87 /* As we do not have an associated fence register, we will force
88 * a tiling change if we ever need to acquire one.
90 obj->fence_dirty = false;
91 obj->fence_reg = I915_FENCE_REG_NONE;
94 /* some bookkeeping */
95 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
98 spin_lock(&dev_priv->mm.object_stat_lock);
99 dev_priv->mm.object_count++;
100 dev_priv->mm.object_memory += size;
101 spin_unlock(&dev_priv->mm.object_stat_lock);
104 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
107 spin_lock(&dev_priv->mm.object_stat_lock);
108 dev_priv->mm.object_count--;
109 dev_priv->mm.object_memory -= size;
110 spin_unlock(&dev_priv->mm.object_stat_lock);
114 i915_gem_wait_for_error(struct i915_gpu_error *error)
118 #define EXIT_COND (!i915_reset_in_progress(error) || \
119 i915_terminally_wedged(error))
124 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
125 * userspace. If it takes that long something really bad is going on and
126 * we should simply try to bail out and fail as gracefully as possible.
128 ret = wait_event_interruptible_timeout(error->reset_queue,
132 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
134 } else if (ret < 0) {
142 int i915_mutex_lock_interruptible(struct drm_device *dev)
144 struct drm_i915_private *dev_priv = dev->dev_private;
147 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
151 ret = mutex_lock_interruptible(&dev->struct_mutex);
155 WARN_ON(i915_verify_lists(dev));
160 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
162 return i915_gem_obj_bound_any(obj) && !obj->active;
166 i915_gem_init_ioctl(struct drm_device *dev, void *data,
167 struct drm_file *file)
169 struct drm_i915_private *dev_priv = dev->dev_private;
170 struct drm_i915_gem_init *args = data;
172 if (drm_core_check_feature(dev, DRIVER_MODESET))
175 if (args->gtt_start >= args->gtt_end ||
176 (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
179 /* GEM with user mode setting was never supported on ilk and later. */
180 if (INTEL_INFO(dev)->gen >= 5)
183 mutex_lock(&dev->struct_mutex);
184 i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
186 dev_priv->gtt.mappable_end = args->gtt_end;
187 mutex_unlock(&dev->struct_mutex);
193 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
194 struct drm_file *file)
196 struct drm_i915_private *dev_priv = dev->dev_private;
197 struct drm_i915_gem_get_aperture *args = data;
198 struct drm_i915_gem_object *obj;
202 mutex_lock(&dev->struct_mutex);
203 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
205 pinned += i915_gem_obj_ggtt_size(obj);
206 mutex_unlock(&dev->struct_mutex);
208 args->aper_size = dev_priv->gtt.base.total;
209 args->aper_available_size = args->aper_size - pinned;
214 void *i915_gem_object_alloc(struct drm_device *dev)
216 struct drm_i915_private *dev_priv = dev->dev_private;
217 return kmem_cache_zalloc(dev_priv->slab, GFP_KERNEL);
220 void i915_gem_object_free(struct drm_i915_gem_object *obj)
222 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
223 kmem_cache_free(dev_priv->slab, obj);
227 i915_gem_create(struct drm_file *file,
228 struct drm_device *dev,
232 struct drm_i915_gem_object *obj;
236 size = roundup(size, PAGE_SIZE);
240 /* Allocate the new object */
241 obj = i915_gem_alloc_object(dev, size);
245 ret = drm_gem_handle_create(file, &obj->base, &handle);
246 /* drop reference from allocate - handle holds it now */
247 drm_gem_object_unreference_unlocked(&obj->base);
256 i915_gem_dumb_create(struct drm_file *file,
257 struct drm_device *dev,
258 struct drm_mode_create_dumb *args)
260 /* have to work out size/pitch and return them */
261 args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
262 args->size = args->pitch * args->height;
263 return i915_gem_create(file, dev,
264 args->size, &args->handle);
268 * Creates a new mm object and returns a handle to it.
271 i915_gem_create_ioctl(struct drm_device *dev, void *data,
272 struct drm_file *file)
274 struct drm_i915_gem_create *args = data;
276 return i915_gem_create(file, dev,
277 args->size, &args->handle);
281 __copy_to_user_swizzled(char __user *cpu_vaddr,
282 const char *gpu_vaddr, int gpu_offset,
285 int ret, cpu_offset = 0;
288 int cacheline_end = ALIGN(gpu_offset + 1, 64);
289 int this_length = min(cacheline_end - gpu_offset, length);
290 int swizzled_gpu_offset = gpu_offset ^ 64;
292 ret = __copy_to_user(cpu_vaddr + cpu_offset,
293 gpu_vaddr + swizzled_gpu_offset,
298 cpu_offset += this_length;
299 gpu_offset += this_length;
300 length -= this_length;
307 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
308 const char __user *cpu_vaddr,
311 int ret, cpu_offset = 0;
314 int cacheline_end = ALIGN(gpu_offset + 1, 64);
315 int this_length = min(cacheline_end - gpu_offset, length);
316 int swizzled_gpu_offset = gpu_offset ^ 64;
318 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
319 cpu_vaddr + cpu_offset,
324 cpu_offset += this_length;
325 gpu_offset += this_length;
326 length -= this_length;
332 /* Per-page copy function for the shmem pread fastpath.
333 * Flushes invalid cachelines before reading the target if
334 * needs_clflush is set. */
336 shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
337 char __user *user_data,
338 bool page_do_bit17_swizzling, bool needs_clflush)
343 if (unlikely(page_do_bit17_swizzling))
346 vaddr = kmap_atomic(page);
348 drm_clflush_virt_range(vaddr + shmem_page_offset,
350 ret = __copy_to_user_inatomic(user_data,
351 vaddr + shmem_page_offset,
353 kunmap_atomic(vaddr);
355 return ret ? -EFAULT : 0;
359 shmem_clflush_swizzled_range(char *addr, unsigned long length,
362 if (unlikely(swizzled)) {
363 unsigned long start = (unsigned long) addr;
364 unsigned long end = (unsigned long) addr + length;
366 /* For swizzling simply ensure that we always flush both
367 * channels. Lame, but simple and it works. Swizzled
368 * pwrite/pread is far from a hotpath - current userspace
369 * doesn't use it at all. */
370 start = round_down(start, 128);
371 end = round_up(end, 128);
373 drm_clflush_virt_range((void *)start, end - start);
375 drm_clflush_virt_range(addr, length);
380 /* Only difference to the fast-path function is that this can handle bit17
381 * and uses non-atomic copy and kmap functions. */
383 shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
384 char __user *user_data,
385 bool page_do_bit17_swizzling, bool needs_clflush)
392 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
394 page_do_bit17_swizzling);
396 if (page_do_bit17_swizzling)
397 ret = __copy_to_user_swizzled(user_data,
398 vaddr, shmem_page_offset,
401 ret = __copy_to_user(user_data,
402 vaddr + shmem_page_offset,
406 return ret ? - EFAULT : 0;
410 i915_gem_shmem_pread(struct drm_device *dev,
411 struct drm_i915_gem_object *obj,
412 struct drm_i915_gem_pread *args,
413 struct drm_file *file)
415 char __user *user_data;
418 int shmem_page_offset, page_length, ret = 0;
419 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
421 int needs_clflush = 0;
422 struct sg_page_iter sg_iter;
424 user_data = to_user_ptr(args->data_ptr);
427 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
429 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
430 /* If we're not in the cpu read domain, set ourself into the gtt
431 * read domain and manually flush cachelines (if required). This
432 * optimizes for the case when the gpu will dirty the data
433 * anyway again before the next pread happens. */
434 needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level);
435 if (i915_gem_obj_bound_any(obj)) {
436 ret = i915_gem_object_set_to_gtt_domain(obj, false);
442 ret = i915_gem_object_get_pages(obj);
446 i915_gem_object_pin_pages(obj);
448 offset = args->offset;
450 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
451 offset >> PAGE_SHIFT) {
452 struct page *page = sg_page_iter_page(&sg_iter);
457 /* Operation in this page
459 * shmem_page_offset = offset within page in shmem file
460 * page_length = bytes to copy for this page
462 shmem_page_offset = offset_in_page(offset);
463 page_length = remain;
464 if ((shmem_page_offset + page_length) > PAGE_SIZE)
465 page_length = PAGE_SIZE - shmem_page_offset;
467 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
468 (page_to_phys(page) & (1 << 17)) != 0;
470 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
471 user_data, page_do_bit17_swizzling,
476 mutex_unlock(&dev->struct_mutex);
478 if (likely(!i915_prefault_disable) && !prefaulted) {
479 ret = fault_in_multipages_writeable(user_data, remain);
480 /* Userspace is tricking us, but we've already clobbered
481 * its pages with the prefault and promised to write the
482 * data up to the first fault. Hence ignore any errors
483 * and just continue. */
488 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
489 user_data, page_do_bit17_swizzling,
492 mutex_lock(&dev->struct_mutex);
495 mark_page_accessed(page);
500 remain -= page_length;
501 user_data += page_length;
502 offset += page_length;
506 i915_gem_object_unpin_pages(obj);
512 * Reads data from the object referenced by handle.
514 * On error, the contents of *data are undefined.
517 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
518 struct drm_file *file)
520 struct drm_i915_gem_pread *args = data;
521 struct drm_i915_gem_object *obj;
527 if (!access_ok(VERIFY_WRITE,
528 to_user_ptr(args->data_ptr),
532 ret = i915_mutex_lock_interruptible(dev);
536 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
537 if (&obj->base == NULL) {
542 /* Bounds check source. */
543 if (args->offset > obj->base.size ||
544 args->size > obj->base.size - args->offset) {
549 /* prime objects have no backing filp to GEM pread/pwrite
552 if (!obj->base.filp) {
557 trace_i915_gem_object_pread(obj, args->offset, args->size);
559 ret = i915_gem_shmem_pread(dev, obj, args, file);
562 drm_gem_object_unreference(&obj->base);
564 mutex_unlock(&dev->struct_mutex);
568 /* This is the fast write path which cannot handle
569 * page faults in the source data
573 fast_user_write(struct io_mapping *mapping,
574 loff_t page_base, int page_offset,
575 char __user *user_data,
578 void __iomem *vaddr_atomic;
580 unsigned long unwritten;
582 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
583 /* We can use the cpu mem copy function because this is X86. */
584 vaddr = (void __force*)vaddr_atomic + page_offset;
585 unwritten = __copy_from_user_inatomic_nocache(vaddr,
587 io_mapping_unmap_atomic(vaddr_atomic);
592 * This is the fast pwrite path, where we copy the data directly from the
593 * user into the GTT, uncached.
596 i915_gem_gtt_pwrite_fast(struct drm_device *dev,
597 struct drm_i915_gem_object *obj,
598 struct drm_i915_gem_pwrite *args,
599 struct drm_file *file)
601 drm_i915_private_t *dev_priv = dev->dev_private;
603 loff_t offset, page_base;
604 char __user *user_data;
605 int page_offset, page_length, ret;
607 ret = i915_gem_obj_ggtt_pin(obj, 0, true, true);
611 ret = i915_gem_object_set_to_gtt_domain(obj, true);
615 ret = i915_gem_object_put_fence(obj);
619 user_data = to_user_ptr(args->data_ptr);
622 offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
625 /* Operation in this page
627 * page_base = page offset within aperture
628 * page_offset = offset within page
629 * page_length = bytes to copy for this page
631 page_base = offset & PAGE_MASK;
632 page_offset = offset_in_page(offset);
633 page_length = remain;
634 if ((page_offset + remain) > PAGE_SIZE)
635 page_length = PAGE_SIZE - page_offset;
637 /* If we get a fault while copying data, then (presumably) our
638 * source page isn't available. Return the error and we'll
639 * retry in the slow path.
641 if (fast_user_write(dev_priv->gtt.mappable, page_base,
642 page_offset, user_data, page_length)) {
647 remain -= page_length;
648 user_data += page_length;
649 offset += page_length;
653 i915_gem_object_unpin(obj);
658 /* Per-page copy function for the shmem pwrite fastpath.
659 * Flushes invalid cachelines before writing to the target if
660 * needs_clflush_before is set and flushes out any written cachelines after
661 * writing if needs_clflush is set. */
663 shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
664 char __user *user_data,
665 bool page_do_bit17_swizzling,
666 bool needs_clflush_before,
667 bool needs_clflush_after)
672 if (unlikely(page_do_bit17_swizzling))
675 vaddr = kmap_atomic(page);
676 if (needs_clflush_before)
677 drm_clflush_virt_range(vaddr + shmem_page_offset,
679 ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset,
682 if (needs_clflush_after)
683 drm_clflush_virt_range(vaddr + shmem_page_offset,
685 kunmap_atomic(vaddr);
687 return ret ? -EFAULT : 0;
690 /* Only difference to the fast-path function is that this can handle bit17
691 * and uses non-atomic copy and kmap functions. */
693 shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
694 char __user *user_data,
695 bool page_do_bit17_swizzling,
696 bool needs_clflush_before,
697 bool needs_clflush_after)
703 if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
704 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
706 page_do_bit17_swizzling);
707 if (page_do_bit17_swizzling)
708 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
712 ret = __copy_from_user(vaddr + shmem_page_offset,
715 if (needs_clflush_after)
716 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
718 page_do_bit17_swizzling);
721 return ret ? -EFAULT : 0;
725 i915_gem_shmem_pwrite(struct drm_device *dev,
726 struct drm_i915_gem_object *obj,
727 struct drm_i915_gem_pwrite *args,
728 struct drm_file *file)
732 char __user *user_data;
733 int shmem_page_offset, page_length, ret = 0;
734 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
735 int hit_slowpath = 0;
736 int needs_clflush_after = 0;
737 int needs_clflush_before = 0;
738 struct sg_page_iter sg_iter;
740 user_data = to_user_ptr(args->data_ptr);
743 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
745 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
746 /* If we're not in the cpu write domain, set ourself into the gtt
747 * write domain and manually flush cachelines (if required). This
748 * optimizes for the case when the gpu will use the data
749 * right away and we therefore have to clflush anyway. */
750 needs_clflush_after = cpu_write_needs_clflush(obj);
751 if (i915_gem_obj_bound_any(obj)) {
752 ret = i915_gem_object_set_to_gtt_domain(obj, true);
757 /* Same trick applies to invalidate partially written cachelines read
759 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
760 needs_clflush_before =
761 !cpu_cache_is_coherent(dev, obj->cache_level);
763 ret = i915_gem_object_get_pages(obj);
767 i915_gem_object_pin_pages(obj);
769 offset = args->offset;
772 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
773 offset >> PAGE_SHIFT) {
774 struct page *page = sg_page_iter_page(&sg_iter);
775 int partial_cacheline_write;
780 /* Operation in this page
782 * shmem_page_offset = offset within page in shmem file
783 * page_length = bytes to copy for this page
785 shmem_page_offset = offset_in_page(offset);
787 page_length = remain;
788 if ((shmem_page_offset + page_length) > PAGE_SIZE)
789 page_length = PAGE_SIZE - shmem_page_offset;
791 /* If we don't overwrite a cacheline completely we need to be
792 * careful to have up-to-date data by first clflushing. Don't
793 * overcomplicate things and flush the entire patch. */
794 partial_cacheline_write = needs_clflush_before &&
795 ((shmem_page_offset | page_length)
796 & (boot_cpu_data.x86_clflush_size - 1));
798 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
799 (page_to_phys(page) & (1 << 17)) != 0;
801 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
802 user_data, page_do_bit17_swizzling,
803 partial_cacheline_write,
804 needs_clflush_after);
809 mutex_unlock(&dev->struct_mutex);
810 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
811 user_data, page_do_bit17_swizzling,
812 partial_cacheline_write,
813 needs_clflush_after);
815 mutex_lock(&dev->struct_mutex);
818 set_page_dirty(page);
819 mark_page_accessed(page);
824 remain -= page_length;
825 user_data += page_length;
826 offset += page_length;
830 i915_gem_object_unpin_pages(obj);
834 * Fixup: Flush cpu caches in case we didn't flush the dirty
835 * cachelines in-line while writing and the object moved
836 * out of the cpu write domain while we've dropped the lock.
838 if (!needs_clflush_after &&
839 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
840 if (i915_gem_clflush_object(obj, obj->pin_display))
841 i915_gem_chipset_flush(dev);
845 if (needs_clflush_after)
846 i915_gem_chipset_flush(dev);
852 * Writes data to the object referenced by handle.
854 * On error, the contents of the buffer that were to be modified are undefined.
857 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
858 struct drm_file *file)
860 struct drm_i915_gem_pwrite *args = data;
861 struct drm_i915_gem_object *obj;
867 if (!access_ok(VERIFY_READ,
868 to_user_ptr(args->data_ptr),
872 if (likely(!i915_prefault_disable)) {
873 ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
879 ret = i915_mutex_lock_interruptible(dev);
883 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
884 if (&obj->base == NULL) {
889 /* Bounds check destination. */
890 if (args->offset > obj->base.size ||
891 args->size > obj->base.size - args->offset) {
896 /* prime objects have no backing filp to GEM pread/pwrite
899 if (!obj->base.filp) {
904 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
907 /* We can only do the GTT pwrite on untiled buffers, as otherwise
908 * it would end up going through the fenced access, and we'll get
909 * different detiling behavior between reading and writing.
910 * pread/pwrite currently are reading and writing from the CPU
911 * perspective, requiring manual detiling by the client.
914 ret = i915_gem_phys_pwrite(dev, obj, args, file);
918 if (obj->tiling_mode == I915_TILING_NONE &&
919 obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
920 cpu_write_needs_clflush(obj)) {
921 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
922 /* Note that the gtt paths might fail with non-page-backed user
923 * pointers (e.g. gtt mappings when moving data between
924 * textures). Fallback to the shmem path in that case. */
927 if (ret == -EFAULT || ret == -ENOSPC)
928 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
931 drm_gem_object_unreference(&obj->base);
933 mutex_unlock(&dev->struct_mutex);
938 i915_gem_check_wedge(struct i915_gpu_error *error,
941 if (i915_reset_in_progress(error)) {
942 /* Non-interruptible callers can't handle -EAGAIN, hence return
943 * -EIO unconditionally for these. */
947 /* Recovery complete, but the reset failed ... */
948 if (i915_terminally_wedged(error))
958 * Compare seqno against outstanding lazy request. Emit a request if they are
962 i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
966 BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
969 if (seqno == ring->outstanding_lazy_request)
970 ret = i915_add_request(ring, NULL);
976 * __wait_seqno - wait until execution of seqno has finished
977 * @ring: the ring expected to report seqno
979 * @reset_counter: reset sequence associated with the given seqno
980 * @interruptible: do an interruptible wait (normally yes)
981 * @timeout: in - how long to wait (NULL forever); out - how much time remaining
983 * Note: It is of utmost importance that the passed in seqno and reset_counter
984 * values have been read by the caller in an smp safe manner. Where read-side
985 * locks are involved, it is sufficient to read the reset_counter before
986 * unlocking the lock that protects the seqno. For lockless tricks, the
987 * reset_counter _must_ be read before, and an appropriate smp_rmb must be
990 * Returns 0 if the seqno was found within the alloted time. Else returns the
991 * errno with remaining time filled in timeout argument.
993 static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
994 unsigned reset_counter,
995 bool interruptible, struct timespec *timeout)
997 drm_i915_private_t *dev_priv = ring->dev->dev_private;
998 struct timespec before, now, wait_time={1,0};
999 unsigned long timeout_jiffies;
1001 bool wait_forever = true;
1004 WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n");
1006 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
1009 trace_i915_gem_request_wait_begin(ring, seqno);
1011 if (timeout != NULL) {
1012 wait_time = *timeout;
1013 wait_forever = false;
1016 timeout_jiffies = timespec_to_jiffies_timeout(&wait_time);
1018 if (WARN_ON(!ring->irq_get(ring)))
1021 /* Record current time in case interrupted by signal, or wedged * */
1022 getrawmonotonic(&before);
1025 (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
1026 i915_reset_in_progress(&dev_priv->gpu_error) || \
1027 reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
1030 end = wait_event_interruptible_timeout(ring->irq_queue,
1034 end = wait_event_timeout(ring->irq_queue, EXIT_COND,
1037 /* We need to check whether any gpu reset happened in between
1038 * the caller grabbing the seqno and now ... */
1039 if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
1042 /* ... but upgrade the -EGAIN to an -EIO if the gpu is truely
1044 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1047 } while (end == 0 && wait_forever);
1049 getrawmonotonic(&now);
1051 ring->irq_put(ring);
1052 trace_i915_gem_request_wait_end(ring, seqno);
1056 struct timespec sleep_time = timespec_sub(now, before);
1057 *timeout = timespec_sub(*timeout, sleep_time);
1058 if (!timespec_valid(timeout)) /* i.e. negative time remains */
1059 set_normalized_timespec(timeout, 0, 0);
1064 case -EAGAIN: /* Wedged */
1065 case -ERESTARTSYS: /* Signal */
1067 case 0: /* Timeout */
1069 default: /* Completed */
1070 WARN_ON(end < 0); /* We're not aware of other errors */
1076 * Waits for a sequence number to be signaled, and cleans up the
1077 * request and object lists appropriately for that event.
1080 i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
1082 struct drm_device *dev = ring->dev;
1083 struct drm_i915_private *dev_priv = dev->dev_private;
1084 bool interruptible = dev_priv->mm.interruptible;
1087 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1090 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1094 ret = i915_gem_check_olr(ring, seqno);
1098 return __wait_seqno(ring, seqno,
1099 atomic_read(&dev_priv->gpu_error.reset_counter),
1100 interruptible, NULL);
1104 i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
1105 struct intel_ring_buffer *ring)
1107 i915_gem_retire_requests_ring(ring);
1109 /* Manually manage the write flush as we may have not yet
1110 * retired the buffer.
1112 * Note that the last_write_seqno is always the earlier of
1113 * the two (read/write) seqno, so if we haved successfully waited,
1114 * we know we have passed the last write.
1116 obj->last_write_seqno = 0;
1117 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1123 * Ensures that all rendering to the object has completed and the object is
1124 * safe to unbind from the GTT or access from the CPU.
1126 static __must_check int
1127 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1130 struct intel_ring_buffer *ring = obj->ring;
1134 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1138 ret = i915_wait_seqno(ring, seqno);
1142 return i915_gem_object_wait_rendering__tail(obj, ring);
1145 /* A nonblocking variant of the above wait. This is a highly dangerous routine
1146 * as the object state may change during this call.
1148 static __must_check int
1149 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1152 struct drm_device *dev = obj->base.dev;
1153 struct drm_i915_private *dev_priv = dev->dev_private;
1154 struct intel_ring_buffer *ring = obj->ring;
1155 unsigned reset_counter;
1159 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1160 BUG_ON(!dev_priv->mm.interruptible);
1162 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1166 ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
1170 ret = i915_gem_check_olr(ring, seqno);
1174 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
1175 mutex_unlock(&dev->struct_mutex);
1176 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
1177 mutex_lock(&dev->struct_mutex);
1181 return i915_gem_object_wait_rendering__tail(obj, ring);
1185 * Called when user space prepares to use an object with the CPU, either
1186 * through the mmap ioctl's mapping or a GTT mapping.
1189 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1190 struct drm_file *file)
1192 struct drm_i915_gem_set_domain *args = data;
1193 struct drm_i915_gem_object *obj;
1194 uint32_t read_domains = args->read_domains;
1195 uint32_t write_domain = args->write_domain;
1198 /* Only handle setting domains to types used by the CPU. */
1199 if (write_domain & I915_GEM_GPU_DOMAINS)
1202 if (read_domains & I915_GEM_GPU_DOMAINS)
1205 /* Having something in the write domain implies it's in the read
1206 * domain, and only that read domain. Enforce that in the request.
1208 if (write_domain != 0 && read_domains != write_domain)
1211 ret = i915_mutex_lock_interruptible(dev);
1215 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1216 if (&obj->base == NULL) {
1221 /* Try to flush the object off the GPU without holding the lock.
1222 * We will repeat the flush holding the lock in the normal manner
1223 * to catch cases where we are gazumped.
1225 ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain);
1229 if (read_domains & I915_GEM_DOMAIN_GTT) {
1230 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1232 /* Silently promote "you're not bound, there was nothing to do"
1233 * to success, since the client was just asking us to
1234 * make sure everything was done.
1239 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1243 drm_gem_object_unreference(&obj->base);
1245 mutex_unlock(&dev->struct_mutex);
1250 * Called when user space has done writes to this buffer
1253 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1254 struct drm_file *file)
1256 struct drm_i915_gem_sw_finish *args = data;
1257 struct drm_i915_gem_object *obj;
1260 ret = i915_mutex_lock_interruptible(dev);
1264 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1265 if (&obj->base == NULL) {
1270 /* Pinned buffers may be scanout, so flush the cache */
1271 if (obj->pin_display)
1272 i915_gem_object_flush_cpu_write_domain(obj, true);
1274 drm_gem_object_unreference(&obj->base);
1276 mutex_unlock(&dev->struct_mutex);
1281 * Maps the contents of an object, returning the address it is mapped
1284 * While the mapping holds a reference on the contents of the object, it doesn't
1285 * imply a ref on the object itself.
1288 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1289 struct drm_file *file)
1291 struct drm_i915_gem_mmap *args = data;
1292 struct drm_gem_object *obj;
1295 obj = drm_gem_object_lookup(dev, file, args->handle);
1299 /* prime objects have no backing filp to GEM mmap
1303 drm_gem_object_unreference_unlocked(obj);
1307 addr = vm_mmap(obj->filp, 0, args->size,
1308 PROT_READ | PROT_WRITE, MAP_SHARED,
1310 drm_gem_object_unreference_unlocked(obj);
1311 if (IS_ERR((void *)addr))
1314 args->addr_ptr = (uint64_t) addr;
1320 * i915_gem_fault - fault a page into the GTT
1321 * vma: VMA in question
1324 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1325 * from userspace. The fault handler takes care of binding the object to
1326 * the GTT (if needed), allocating and programming a fence register (again,
1327 * only if needed based on whether the old reg is still valid or the object
1328 * is tiled) and inserting a new PTE into the faulting process.
1330 * Note that the faulting process may involve evicting existing objects
1331 * from the GTT and/or fence registers to make room. So performance may
1332 * suffer if the GTT working set is large or there are few fence registers
1335 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1337 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1338 struct drm_device *dev = obj->base.dev;
1339 drm_i915_private_t *dev_priv = dev->dev_private;
1340 pgoff_t page_offset;
1343 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1345 /* We don't use vmf->pgoff since that has the fake offset */
1346 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1349 ret = i915_mutex_lock_interruptible(dev);
1353 trace_i915_gem_object_fault(obj, page_offset, true, write);
1355 /* Access to snoopable pages through the GTT is incoherent. */
1356 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1361 /* Now bind it into the GTT if needed */
1362 ret = i915_gem_obj_ggtt_pin(obj, 0, true, false);
1366 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1370 ret = i915_gem_object_get_fence(obj);
1374 obj->fault_mappable = true;
1376 pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
1380 /* Finally, remap it using the new GTT offset */
1381 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1383 i915_gem_object_unpin(obj);
1385 mutex_unlock(&dev->struct_mutex);
1389 /* If this -EIO is due to a gpu hang, give the reset code a
1390 * chance to clean up the mess. Otherwise return the proper
1392 if (i915_terminally_wedged(&dev_priv->gpu_error))
1393 return VM_FAULT_SIGBUS;
1396 * EAGAIN means the gpu is hung and we'll wait for the error
1397 * handler to reset everything when re-faulting in
1398 * i915_mutex_lock_interruptible.
1405 * EBUSY is ok: this just means that another thread
1406 * already did the job.
1408 return VM_FAULT_NOPAGE;
1410 return VM_FAULT_OOM;
1412 return VM_FAULT_SIGBUS;
1414 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
1415 return VM_FAULT_SIGBUS;
1420 * i915_gem_release_mmap - remove physical page mappings
1421 * @obj: obj in question
1423 * Preserve the reservation of the mmapping with the DRM core code, but
1424 * relinquish ownership of the pages back to the system.
1426 * It is vital that we remove the page mapping if we have mapped a tiled
1427 * object through the GTT and then lose the fence register due to
1428 * resource pressure. Similarly if the object has been moved out of the
1429 * aperture, than pages mapped into userspace must be revoked. Removing the
1430 * mapping will then trigger a page fault on the next user access, allowing
1431 * fixup by i915_gem_fault().
1434 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1436 if (!obj->fault_mappable)
1439 drm_vma_node_unmap(&obj->base.vma_node, obj->base.dev->dev_mapping);
1440 obj->fault_mappable = false;
1444 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1448 if (INTEL_INFO(dev)->gen >= 4 ||
1449 tiling_mode == I915_TILING_NONE)
1452 /* Previous chips need a power-of-two fence region when tiling */
1453 if (INTEL_INFO(dev)->gen == 3)
1454 gtt_size = 1024*1024;
1456 gtt_size = 512*1024;
1458 while (gtt_size < size)
1465 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1466 * @obj: object to check
1468 * Return the required GTT alignment for an object, taking into account
1469 * potential fence register mapping.
1472 i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
1473 int tiling_mode, bool fenced)
1476 * Minimum alignment is 4k (GTT page size), but might be greater
1477 * if a fence register is needed for the object.
1479 if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
1480 tiling_mode == I915_TILING_NONE)
1484 * Previous chips need to be aligned to the size of the smallest
1485 * fence register that can contain the object.
1487 return i915_gem_get_gtt_size(dev, size, tiling_mode);
1490 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1492 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1495 if (drm_vma_node_has_offset(&obj->base.vma_node))
1498 dev_priv->mm.shrinker_no_lock_stealing = true;
1500 ret = drm_gem_create_mmap_offset(&obj->base);
1504 /* Badly fragmented mmap space? The only way we can recover
1505 * space is by destroying unwanted objects. We can't randomly release
1506 * mmap_offsets as userspace expects them to be persistent for the
1507 * lifetime of the objects. The closest we can is to release the
1508 * offsets on purgeable objects by truncating it and marking it purged,
1509 * which prevents userspace from ever using that object again.
1511 i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
1512 ret = drm_gem_create_mmap_offset(&obj->base);
1516 i915_gem_shrink_all(dev_priv);
1517 ret = drm_gem_create_mmap_offset(&obj->base);
1519 dev_priv->mm.shrinker_no_lock_stealing = false;
1524 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1526 drm_gem_free_mmap_offset(&obj->base);
1530 i915_gem_mmap_gtt(struct drm_file *file,
1531 struct drm_device *dev,
1535 struct drm_i915_private *dev_priv = dev->dev_private;
1536 struct drm_i915_gem_object *obj;
1539 ret = i915_mutex_lock_interruptible(dev);
1543 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
1544 if (&obj->base == NULL) {
1549 if (obj->base.size > dev_priv->gtt.mappable_end) {
1554 if (obj->madv != I915_MADV_WILLNEED) {
1555 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1560 ret = i915_gem_object_create_mmap_offset(obj);
1564 *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
1567 drm_gem_object_unreference(&obj->base);
1569 mutex_unlock(&dev->struct_mutex);
1574 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1576 * @data: GTT mapping ioctl data
1577 * @file: GEM object info
1579 * Simply returns the fake offset to userspace so it can mmap it.
1580 * The mmap call will end up in drm_gem_mmap(), which will set things
1581 * up so we can get faults in the handler above.
1583 * The fault handler will take care of binding the object into the GTT
1584 * (since it may have been evicted to make room for something), allocating
1585 * a fence register, and mapping the appropriate aperture address into
1589 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1590 struct drm_file *file)
1592 struct drm_i915_gem_mmap_gtt *args = data;
1594 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1597 /* Immediately discard the backing storage */
1599 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1601 struct inode *inode;
1603 i915_gem_object_free_mmap_offset(obj);
1605 if (obj->base.filp == NULL)
1608 /* Our goal here is to return as much of the memory as
1609 * is possible back to the system as we are called from OOM.
1610 * To do this we must instruct the shmfs to drop all of its
1611 * backing pages, *now*.
1613 inode = file_inode(obj->base.filp);
1614 shmem_truncate_range(inode, 0, (loff_t)-1);
1616 obj->madv = __I915_MADV_PURGED;
1620 i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1622 return obj->madv == I915_MADV_DONTNEED;
1626 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1628 struct sg_page_iter sg_iter;
1631 BUG_ON(obj->madv == __I915_MADV_PURGED);
1633 ret = i915_gem_object_set_to_cpu_domain(obj, true);
1635 /* In the event of a disaster, abandon all caches and
1636 * hope for the best.
1638 WARN_ON(ret != -EIO);
1639 i915_gem_clflush_object(obj, true);
1640 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1643 if (i915_gem_object_needs_bit17_swizzle(obj))
1644 i915_gem_object_save_bit_17_swizzle(obj);
1646 if (obj->madv == I915_MADV_DONTNEED)
1649 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
1650 struct page *page = sg_page_iter_page(&sg_iter);
1653 set_page_dirty(page);
1655 if (obj->madv == I915_MADV_WILLNEED)
1656 mark_page_accessed(page);
1658 page_cache_release(page);
1662 sg_free_table(obj->pages);
1667 i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1669 const struct drm_i915_gem_object_ops *ops = obj->ops;
1671 if (obj->pages == NULL)
1674 if (obj->pages_pin_count)
1677 BUG_ON(i915_gem_obj_bound_any(obj));
1679 /* ->put_pages might need to allocate memory for the bit17 swizzle
1680 * array, hence protect them from being reaped by removing them from gtt
1682 list_del(&obj->global_list);
1684 ops->put_pages(obj);
1687 if (i915_gem_object_is_purgeable(obj))
1688 i915_gem_object_truncate(obj);
1694 __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
1695 bool purgeable_only)
1697 struct list_head still_bound_list;
1698 struct drm_i915_gem_object *obj, *next;
1701 list_for_each_entry_safe(obj, next,
1702 &dev_priv->mm.unbound_list,
1704 if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
1705 i915_gem_object_put_pages(obj) == 0) {
1706 count += obj->base.size >> PAGE_SHIFT;
1707 if (count >= target)
1713 * As we may completely rewrite the bound list whilst unbinding
1714 * (due to retiring requests) we have to strictly process only
1715 * one element of the list at the time, and recheck the list
1716 * on every iteration.
1718 INIT_LIST_HEAD(&still_bound_list);
1719 while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
1720 struct i915_vma *vma, *v;
1722 obj = list_first_entry(&dev_priv->mm.bound_list,
1723 typeof(*obj), global_list);
1724 list_move_tail(&obj->global_list, &still_bound_list);
1726 if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
1730 * Hold a reference whilst we unbind this object, as we may
1731 * end up waiting for and retiring requests. This might
1732 * release the final reference (held by the active list)
1733 * and result in the object being freed from under us.
1734 * in this object being freed.
1736 * Note 1: Shrinking the bound list is special since only active
1737 * (and hence bound objects) can contain such limbo objects, so
1738 * we don't need special tricks for shrinking the unbound list.
1739 * The only other place where we have to be careful with active
1740 * objects suddenly disappearing due to retiring requests is the
1743 * Note 2: Even though the bound list doesn't hold a reference
1744 * to the object we can safely grab one here: The final object
1745 * unreferencing and the bound_list are both protected by the
1746 * dev->struct_mutex and so we won't ever be able to observe an
1747 * object on the bound_list with a reference count equals 0.
1749 drm_gem_object_reference(&obj->base);
1751 list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
1752 if (i915_vma_unbind(vma))
1755 if (i915_gem_object_put_pages(obj) == 0)
1756 count += obj->base.size >> PAGE_SHIFT;
1758 drm_gem_object_unreference(&obj->base);
1760 list_splice(&still_bound_list, &dev_priv->mm.bound_list);
1766 i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1768 return __i915_gem_shrink(dev_priv, target, true);
1772 i915_gem_shrink_all(struct drm_i915_private *dev_priv)
1774 struct drm_i915_gem_object *obj, *next;
1777 i915_gem_evict_everything(dev_priv->dev);
1779 list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
1781 if (obj->pages_pin_count == 0)
1782 freed += obj->base.size >> PAGE_SHIFT;
1783 i915_gem_object_put_pages(obj);
1789 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
1791 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1793 struct address_space *mapping;
1794 struct sg_table *st;
1795 struct scatterlist *sg;
1796 struct sg_page_iter sg_iter;
1798 unsigned long last_pfn = 0; /* suppress gcc warning */
1801 /* Assert that the object is not currently in any GPU domain. As it
1802 * wasn't in the GTT, there shouldn't be any way it could have been in
1805 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
1806 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
1808 st = kmalloc(sizeof(*st), GFP_KERNEL);
1812 page_count = obj->base.size / PAGE_SIZE;
1813 if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
1818 /* Get the list of pages out of our struct file. They'll be pinned
1819 * at this point until we release them.
1821 * Fail silently without starting the shrinker
1823 mapping = file_inode(obj->base.filp)->i_mapping;
1824 gfp = mapping_gfp_mask(mapping);
1825 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
1826 gfp &= ~(__GFP_IO | __GFP_WAIT);
1829 for (i = 0; i < page_count; i++) {
1830 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1832 i915_gem_purge(dev_priv, page_count);
1833 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1836 /* We've tried hard to allocate the memory by reaping
1837 * our own buffer, now let the real VM do its job and
1838 * go down in flames if truly OOM.
1840 gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD);
1841 gfp |= __GFP_IO | __GFP_WAIT;
1843 i915_gem_shrink_all(dev_priv);
1844 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1848 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
1849 gfp &= ~(__GFP_IO | __GFP_WAIT);
1851 #ifdef CONFIG_SWIOTLB
1852 if (swiotlb_nr_tbl()) {
1854 sg_set_page(sg, page, PAGE_SIZE, 0);
1859 if (!i || page_to_pfn(page) != last_pfn + 1) {
1863 sg_set_page(sg, page, PAGE_SIZE, 0);
1865 sg->length += PAGE_SIZE;
1867 last_pfn = page_to_pfn(page);
1869 #ifdef CONFIG_SWIOTLB
1870 if (!swiotlb_nr_tbl())
1875 if (i915_gem_object_needs_bit17_swizzle(obj))
1876 i915_gem_object_do_bit_17_swizzle(obj);
1882 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
1883 page_cache_release(sg_page_iter_page(&sg_iter));
1886 return PTR_ERR(page);
1889 /* Ensure that the associated pages are gathered from the backing storage
1890 * and pinned into our object. i915_gem_object_get_pages() may be called
1891 * multiple times before they are released by a single call to
1892 * i915_gem_object_put_pages() - once the pages are no longer referenced
1893 * either as a result of memory pressure (reaping pages under the shrinker)
1894 * or as the object is itself released.
1897 i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
1899 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1900 const struct drm_i915_gem_object_ops *ops = obj->ops;
1906 if (obj->madv != I915_MADV_WILLNEED) {
1907 DRM_ERROR("Attempting to obtain a purgeable object\n");
1911 BUG_ON(obj->pages_pin_count);
1913 ret = ops->get_pages(obj);
1917 list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
1922 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1923 struct intel_ring_buffer *ring)
1925 struct drm_device *dev = obj->base.dev;
1926 struct drm_i915_private *dev_priv = dev->dev_private;
1927 u32 seqno = intel_ring_get_seqno(ring);
1929 BUG_ON(ring == NULL);
1930 if (obj->ring != ring && obj->last_write_seqno) {
1931 /* Keep the seqno relative to the current ring */
1932 obj->last_write_seqno = seqno;
1936 /* Add a reference if we're newly entering the active list. */
1938 drm_gem_object_reference(&obj->base);
1942 list_move_tail(&obj->ring_list, &ring->active_list);
1944 obj->last_read_seqno = seqno;
1946 if (obj->fenced_gpu_access) {
1947 obj->last_fenced_seqno = seqno;
1949 /* Bump MRU to take account of the delayed flush */
1950 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1951 struct drm_i915_fence_reg *reg;
1953 reg = &dev_priv->fence_regs[obj->fence_reg];
1954 list_move_tail(®->lru_list,
1955 &dev_priv->mm.fence_list);
1961 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1963 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1964 struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
1965 struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
1967 BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
1968 BUG_ON(!obj->active);
1970 list_move_tail(&vma->mm_list, &ggtt_vm->inactive_list);
1972 list_del_init(&obj->ring_list);
1975 obj->last_read_seqno = 0;
1976 obj->last_write_seqno = 0;
1977 obj->base.write_domain = 0;
1979 obj->last_fenced_seqno = 0;
1980 obj->fenced_gpu_access = false;
1983 drm_gem_object_unreference(&obj->base);
1985 WARN_ON(i915_verify_lists(dev));
1989 i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
1991 struct drm_i915_private *dev_priv = dev->dev_private;
1992 struct intel_ring_buffer *ring;
1995 /* Carefully retire all requests without writing to the rings */
1996 for_each_ring(ring, dev_priv, i) {
1997 ret = intel_ring_idle(ring);
2001 i915_gem_retire_requests(dev);
2003 /* Finally reset hw state */
2004 for_each_ring(ring, dev_priv, i) {
2005 intel_ring_init_seqno(ring, seqno);
2007 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
2008 ring->sync_seqno[j] = 0;
2014 int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
2016 struct drm_i915_private *dev_priv = dev->dev_private;
2022 /* HWS page needs to be set less than what we
2023 * will inject to ring
2025 ret = i915_gem_init_seqno(dev, seqno - 1);
2029 /* Carefully set the last_seqno value so that wrap
2030 * detection still works
2032 dev_priv->next_seqno = seqno;
2033 dev_priv->last_seqno = seqno - 1;
2034 if (dev_priv->last_seqno == 0)
2035 dev_priv->last_seqno--;
2041 i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
2043 struct drm_i915_private *dev_priv = dev->dev_private;
2045 /* reserve 0 for non-seqno */
2046 if (dev_priv->next_seqno == 0) {
2047 int ret = i915_gem_init_seqno(dev, 0);
2051 dev_priv->next_seqno = 1;
2054 *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
2058 int __i915_add_request(struct intel_ring_buffer *ring,
2059 struct drm_file *file,
2060 struct drm_i915_gem_object *obj,
2063 drm_i915_private_t *dev_priv = ring->dev->dev_private;
2064 struct drm_i915_gem_request *request;
2065 u32 request_ring_position, request_start;
2069 request_start = intel_ring_get_tail(ring);
2071 * Emit any outstanding flushes - execbuf can fail to emit the flush
2072 * after having emitted the batchbuffer command. Hence we need to fix
2073 * things up similar to emitting the lazy request. The difference here
2074 * is that the flush _must_ happen before the next request, no matter
2077 ret = intel_ring_flush_all_caches(ring);
2081 request = kmalloc(sizeof(*request), GFP_KERNEL);
2082 if (request == NULL)
2086 /* Record the position of the start of the request so that
2087 * should we detect the updated seqno part-way through the
2088 * GPU processing the request, we never over-estimate the
2089 * position of the head.
2091 request_ring_position = intel_ring_get_tail(ring);
2093 ret = ring->add_request(ring);
2099 request->seqno = intel_ring_get_seqno(ring);
2100 request->ring = ring;
2101 request->head = request_start;
2102 request->tail = request_ring_position;
2103 request->ctx = ring->last_context;
2104 request->batch_obj = obj;
2106 /* Whilst this request exists, batch_obj will be on the
2107 * active_list, and so will hold the active reference. Only when this
2108 * request is retired will the the batch_obj be moved onto the
2109 * inactive_list and lose its active reference. Hence we do not need
2110 * to explicitly hold another reference here.
2114 i915_gem_context_reference(request->ctx);
2116 request->emitted_jiffies = jiffies;
2117 was_empty = list_empty(&ring->request_list);
2118 list_add_tail(&request->list, &ring->request_list);
2119 request->file_priv = NULL;
2122 struct drm_i915_file_private *file_priv = file->driver_priv;
2124 spin_lock(&file_priv->mm.lock);
2125 request->file_priv = file_priv;
2126 list_add_tail(&request->client_list,
2127 &file_priv->mm.request_list);
2128 spin_unlock(&file_priv->mm.lock);
2131 trace_i915_gem_request_add(ring, request->seqno);
2132 ring->outstanding_lazy_request = 0;
2134 if (!dev_priv->ums.mm_suspended) {
2135 i915_queue_hangcheck(ring->dev);
2138 queue_delayed_work(dev_priv->wq,
2139 &dev_priv->mm.retire_work,
2140 round_jiffies_up_relative(HZ));
2141 intel_mark_busy(dev_priv->dev);
2146 *out_seqno = request->seqno;
2151 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
2153 struct drm_i915_file_private *file_priv = request->file_priv;
2158 spin_lock(&file_priv->mm.lock);
2159 if (request->file_priv) {
2160 list_del(&request->client_list);
2161 request->file_priv = NULL;
2163 spin_unlock(&file_priv->mm.lock);
2166 static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj,
2167 struct i915_address_space *vm)
2169 if (acthd >= i915_gem_obj_offset(obj, vm) &&
2170 acthd < i915_gem_obj_offset(obj, vm) + obj->base.size)
2176 static bool i915_head_inside_request(const u32 acthd_unmasked,
2177 const u32 request_start,
2178 const u32 request_end)
2180 const u32 acthd = acthd_unmasked & HEAD_ADDR;
2182 if (request_start < request_end) {
2183 if (acthd >= request_start && acthd < request_end)
2185 } else if (request_start > request_end) {
2186 if (acthd >= request_start || acthd < request_end)
2193 static struct i915_address_space *
2194 request_to_vm(struct drm_i915_gem_request *request)
2196 struct drm_i915_private *dev_priv = request->ring->dev->dev_private;
2197 struct i915_address_space *vm;
2199 vm = &dev_priv->gtt.base;
2204 static bool i915_request_guilty(struct drm_i915_gem_request *request,
2205 const u32 acthd, bool *inside)
2207 /* There is a possibility that unmasked head address
2208 * pointing inside the ring, matches the batch_obj address range.
2209 * However this is extremely unlikely.
2211 if (request->batch_obj) {
2212 if (i915_head_inside_object(acthd, request->batch_obj,
2213 request_to_vm(request))) {
2219 if (i915_head_inside_request(acthd, request->head, request->tail)) {
2227 static void i915_set_reset_status(struct intel_ring_buffer *ring,
2228 struct drm_i915_gem_request *request,
2231 struct i915_ctx_hang_stats *hs = NULL;
2232 bool inside, guilty;
2233 unsigned long offset = 0;
2235 /* Innocent until proven guilty */
2238 if (request->batch_obj)
2239 offset = i915_gem_obj_offset(request->batch_obj,
2240 request_to_vm(request));
2242 if (ring->hangcheck.action != HANGCHECK_WAIT &&
2243 i915_request_guilty(request, acthd, &inside)) {
2244 DRM_ERROR("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
2246 inside ? "inside" : "flushing",
2248 request->ctx ? request->ctx->id : 0,
2254 /* If contexts are disabled or this is the default context, use
2255 * file_priv->reset_state
2257 if (request->ctx && request->ctx->id != DEFAULT_CONTEXT_ID)
2258 hs = &request->ctx->hang_stats;
2259 else if (request->file_priv)
2260 hs = &request->file_priv->hang_stats;
2266 hs->batch_pending++;
2270 static void i915_gem_free_request(struct drm_i915_gem_request *request)
2272 list_del(&request->list);
2273 i915_gem_request_remove_from_client(request);
2276 i915_gem_context_unreference(request->ctx);
2281 static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
2282 struct intel_ring_buffer *ring)
2284 u32 completed_seqno;
2287 acthd = intel_ring_get_active_head(ring);
2288 completed_seqno = ring->get_seqno(ring, false);
2290 while (!list_empty(&ring->request_list)) {
2291 struct drm_i915_gem_request *request;
2293 request = list_first_entry(&ring->request_list,
2294 struct drm_i915_gem_request,
2297 if (request->seqno > completed_seqno)
2298 i915_set_reset_status(ring, request, acthd);
2300 i915_gem_free_request(request);
2303 while (!list_empty(&ring->active_list)) {
2304 struct drm_i915_gem_object *obj;
2306 obj = list_first_entry(&ring->active_list,
2307 struct drm_i915_gem_object,
2310 i915_gem_object_move_to_inactive(obj);
2314 void i915_gem_restore_fences(struct drm_device *dev)
2316 struct drm_i915_private *dev_priv = dev->dev_private;
2319 for (i = 0; i < dev_priv->num_fence_regs; i++) {
2320 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
2323 * Commit delayed tiling changes if we have an object still
2324 * attached to the fence, otherwise just clear the fence.
2327 i915_gem_object_update_fence(reg->obj, reg,
2328 reg->obj->tiling_mode);
2330 i915_gem_write_fence(dev, i, NULL);
2335 void i915_gem_reset(struct drm_device *dev)
2337 struct drm_i915_private *dev_priv = dev->dev_private;
2338 struct intel_ring_buffer *ring;
2341 for_each_ring(ring, dev_priv, i)
2342 i915_gem_reset_ring_lists(dev_priv, ring);
2344 i915_gem_restore_fences(dev);
2348 * This function clears the request list as sequence numbers are passed.
2351 i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
2355 if (list_empty(&ring->request_list))
2358 WARN_ON(i915_verify_lists(ring->dev));
2360 seqno = ring->get_seqno(ring, true);
2362 while (!list_empty(&ring->request_list)) {
2363 struct drm_i915_gem_request *request;
2365 request = list_first_entry(&ring->request_list,
2366 struct drm_i915_gem_request,
2369 if (!i915_seqno_passed(seqno, request->seqno))
2372 trace_i915_gem_request_retire(ring, request->seqno);
2373 /* We know the GPU must have read the request to have
2374 * sent us the seqno + interrupt, so use the position
2375 * of tail of the request to update the last known position
2378 ring->last_retired_head = request->tail;
2380 i915_gem_free_request(request);
2383 /* Move any buffers on the active list that are no longer referenced
2384 * by the ringbuffer to the flushing/inactive lists as appropriate.
2386 while (!list_empty(&ring->active_list)) {
2387 struct drm_i915_gem_object *obj;
2389 obj = list_first_entry(&ring->active_list,
2390 struct drm_i915_gem_object,
2393 if (!i915_seqno_passed(seqno, obj->last_read_seqno))
2396 i915_gem_object_move_to_inactive(obj);
2399 if (unlikely(ring->trace_irq_seqno &&
2400 i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
2401 ring->irq_put(ring);
2402 ring->trace_irq_seqno = 0;
2405 WARN_ON(i915_verify_lists(ring->dev));
2409 i915_gem_retire_requests(struct drm_device *dev)
2411 drm_i915_private_t *dev_priv = dev->dev_private;
2412 struct intel_ring_buffer *ring;
2415 for_each_ring(ring, dev_priv, i)
2416 i915_gem_retire_requests_ring(ring);
2420 i915_gem_retire_work_handler(struct work_struct *work)
2422 drm_i915_private_t *dev_priv;
2423 struct drm_device *dev;
2424 struct intel_ring_buffer *ring;
2428 dev_priv = container_of(work, drm_i915_private_t,
2429 mm.retire_work.work);
2430 dev = dev_priv->dev;
2432 /* Come back later if the device is busy... */
2433 if (!mutex_trylock(&dev->struct_mutex)) {
2434 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2435 round_jiffies_up_relative(HZ));
2439 i915_gem_retire_requests(dev);
2441 /* Send a periodic flush down the ring so we don't hold onto GEM
2442 * objects indefinitely.
2445 for_each_ring(ring, dev_priv, i) {
2446 if (ring->gpu_caches_dirty)
2447 i915_add_request(ring, NULL);
2449 idle &= list_empty(&ring->request_list);
2452 if (!dev_priv->ums.mm_suspended && !idle)
2453 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2454 round_jiffies_up_relative(HZ));
2456 intel_mark_idle(dev);
2458 mutex_unlock(&dev->struct_mutex);
2462 * Ensures that an object will eventually get non-busy by flushing any required
2463 * write domains, emitting any outstanding lazy request and retiring and
2464 * completed requests.
2467 i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
2472 ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
2476 i915_gem_retire_requests_ring(obj->ring);
2483 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2484 * @DRM_IOCTL_ARGS: standard ioctl arguments
2486 * Returns 0 if successful, else an error is returned with the remaining time in
2487 * the timeout parameter.
2488 * -ETIME: object is still busy after timeout
2489 * -ERESTARTSYS: signal interrupted the wait
2490 * -ENONENT: object doesn't exist
2491 * Also possible, but rare:
2492 * -EAGAIN: GPU wedged
2494 * -ENODEV: Internal IRQ fail
2495 * -E?: The add request failed
2497 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2498 * non-zero timeout parameter the wait ioctl will wait for the given number of
2499 * nanoseconds on an object becoming unbusy. Since the wait itself does so
2500 * without holding struct_mutex the object may become re-busied before this
2501 * function completes. A similar but shorter * race condition exists in the busy
2505 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2507 drm_i915_private_t *dev_priv = dev->dev_private;
2508 struct drm_i915_gem_wait *args = data;
2509 struct drm_i915_gem_object *obj;
2510 struct intel_ring_buffer *ring = NULL;
2511 struct timespec timeout_stack, *timeout = NULL;
2512 unsigned reset_counter;
2516 if (args->timeout_ns >= 0) {
2517 timeout_stack = ns_to_timespec(args->timeout_ns);
2518 timeout = &timeout_stack;
2521 ret = i915_mutex_lock_interruptible(dev);
2525 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
2526 if (&obj->base == NULL) {
2527 mutex_unlock(&dev->struct_mutex);
2531 /* Need to make sure the object gets inactive eventually. */
2532 ret = i915_gem_object_flush_active(obj);
2537 seqno = obj->last_read_seqno;
2544 /* Do this after OLR check to make sure we make forward progress polling
2545 * on this IOCTL with a 0 timeout (like busy ioctl)
2547 if (!args->timeout_ns) {
2552 drm_gem_object_unreference(&obj->base);
2553 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
2554 mutex_unlock(&dev->struct_mutex);
2556 ret = __wait_seqno(ring, seqno, reset_counter, true, timeout);
2558 args->timeout_ns = timespec_to_ns(timeout);
2562 drm_gem_object_unreference(&obj->base);
2563 mutex_unlock(&dev->struct_mutex);
2568 * i915_gem_object_sync - sync an object to a ring.
2570 * @obj: object which may be in use on another ring.
2571 * @to: ring we wish to use the object on. May be NULL.
2573 * This code is meant to abstract object synchronization with the GPU.
2574 * Calling with NULL implies synchronizing the object with the CPU
2575 * rather than a particular GPU ring.
2577 * Returns 0 if successful, else propagates up the lower layer error.
2580 i915_gem_object_sync(struct drm_i915_gem_object *obj,
2581 struct intel_ring_buffer *to)
2583 struct intel_ring_buffer *from = obj->ring;
2587 if (from == NULL || to == from)
2590 if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
2591 return i915_gem_object_wait_rendering(obj, false);
2593 idx = intel_ring_sync_index(from, to);
2595 seqno = obj->last_read_seqno;
2596 if (seqno <= from->sync_seqno[idx])
2599 ret = i915_gem_check_olr(obj->ring, seqno);
2603 ret = to->sync_to(to, from, seqno);
2605 /* We use last_read_seqno because sync_to()
2606 * might have just caused seqno wrap under
2609 from->sync_seqno[idx] = obj->last_read_seqno;
2614 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2616 u32 old_write_domain, old_read_domains;
2618 /* Force a pagefault for domain tracking on next user access */
2619 i915_gem_release_mmap(obj);
2621 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2624 /* Wait for any direct GTT access to complete */
2627 old_read_domains = obj->base.read_domains;
2628 old_write_domain = obj->base.write_domain;
2630 obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2631 obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2633 trace_i915_gem_object_change_domain(obj,
2638 int i915_vma_unbind(struct i915_vma *vma)
2640 struct drm_i915_gem_object *obj = vma->obj;
2641 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
2644 if (list_empty(&vma->vma_link))
2647 if (!drm_mm_node_allocated(&vma->node))
2653 BUG_ON(obj->pages == NULL);
2655 ret = i915_gem_object_finish_gpu(obj);
2658 /* Continue on if we fail due to EIO, the GPU is hung so we
2659 * should be safe and we need to cleanup or else we might
2660 * cause memory corruption through use-after-free.
2663 i915_gem_object_finish_gtt(obj);
2665 /* release the fence reg _after_ flushing */
2666 ret = i915_gem_object_put_fence(obj);
2670 trace_i915_vma_unbind(vma);
2672 if (obj->has_global_gtt_mapping)
2673 i915_gem_gtt_unbind_object(obj);
2674 if (obj->has_aliasing_ppgtt_mapping) {
2675 i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
2676 obj->has_aliasing_ppgtt_mapping = 0;
2678 i915_gem_gtt_finish_object(obj);
2679 i915_gem_object_unpin_pages(obj);
2681 list_del(&vma->mm_list);
2682 /* Avoid an unnecessary call to unbind on rebind. */
2683 if (i915_is_ggtt(vma->vm))
2684 obj->map_and_fenceable = true;
2686 drm_mm_remove_node(&vma->node);
2689 i915_gem_vma_destroy(vma);
2691 /* Since the unbound list is global, only move to that list if
2692 * no more VMAs exist.
2693 * NB: Until we have real VMAs there will only ever be one */
2694 WARN_ON(!list_empty(&obj->vma_list));
2695 if (list_empty(&obj->vma_list))
2696 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2702 * Unbinds an object from the global GTT aperture.
2705 i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
2707 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2708 struct i915_address_space *ggtt = &dev_priv->gtt.base;
2710 if (!i915_gem_obj_ggtt_bound(obj))
2716 BUG_ON(obj->pages == NULL);
2718 return i915_vma_unbind(i915_gem_obj_to_vma(obj, ggtt));
2721 int i915_gpu_idle(struct drm_device *dev)
2723 drm_i915_private_t *dev_priv = dev->dev_private;
2724 struct intel_ring_buffer *ring;
2727 /* Flush everything onto the inactive list. */
2728 for_each_ring(ring, dev_priv, i) {
2729 ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
2733 ret = intel_ring_idle(ring);
2741 static void i965_write_fence_reg(struct drm_device *dev, int reg,
2742 struct drm_i915_gem_object *obj)
2744 drm_i915_private_t *dev_priv = dev->dev_private;
2746 int fence_pitch_shift;
2748 if (INTEL_INFO(dev)->gen >= 6) {
2749 fence_reg = FENCE_REG_SANDYBRIDGE_0;
2750 fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
2752 fence_reg = FENCE_REG_965_0;
2753 fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
2756 fence_reg += reg * 8;
2758 /* To w/a incoherency with non-atomic 64-bit register updates,
2759 * we split the 64-bit update into two 32-bit writes. In order
2760 * for a partial fence not to be evaluated between writes, we
2761 * precede the update with write to turn off the fence register,
2762 * and only enable the fence as the last step.
2764 * For extra levels of paranoia, we make sure each step lands
2765 * before applying the next step.
2767 I915_WRITE(fence_reg, 0);
2768 POSTING_READ(fence_reg);
2771 u32 size = i915_gem_obj_ggtt_size(obj);
2774 val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
2776 val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
2777 val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
2778 if (obj->tiling_mode == I915_TILING_Y)
2779 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2780 val |= I965_FENCE_REG_VALID;
2782 I915_WRITE(fence_reg + 4, val >> 32);
2783 POSTING_READ(fence_reg + 4);
2785 I915_WRITE(fence_reg + 0, val);
2786 POSTING_READ(fence_reg);
2788 I915_WRITE(fence_reg + 4, 0);
2789 POSTING_READ(fence_reg + 4);
2793 static void i915_write_fence_reg(struct drm_device *dev, int reg,
2794 struct drm_i915_gem_object *obj)
2796 drm_i915_private_t *dev_priv = dev->dev_private;
2800 u32 size = i915_gem_obj_ggtt_size(obj);
2804 WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
2805 (size & -size) != size ||
2806 (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
2807 "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2808 i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
2810 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2815 /* Note: pitch better be a power of two tile widths */
2816 pitch_val = obj->stride / tile_width;
2817 pitch_val = ffs(pitch_val) - 1;
2819 val = i915_gem_obj_ggtt_offset(obj);
2820 if (obj->tiling_mode == I915_TILING_Y)
2821 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2822 val |= I915_FENCE_SIZE_BITS(size);
2823 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2824 val |= I830_FENCE_REG_VALID;
2829 reg = FENCE_REG_830_0 + reg * 4;
2831 reg = FENCE_REG_945_8 + (reg - 8) * 4;
2833 I915_WRITE(reg, val);
2837 static void i830_write_fence_reg(struct drm_device *dev, int reg,
2838 struct drm_i915_gem_object *obj)
2840 drm_i915_private_t *dev_priv = dev->dev_private;
2844 u32 size = i915_gem_obj_ggtt_size(obj);
2847 WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
2848 (size & -size) != size ||
2849 (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
2850 "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
2851 i915_gem_obj_ggtt_offset(obj), size);
2853 pitch_val = obj->stride / 128;
2854 pitch_val = ffs(pitch_val) - 1;
2856 val = i915_gem_obj_ggtt_offset(obj);
2857 if (obj->tiling_mode == I915_TILING_Y)
2858 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2859 val |= I830_FENCE_SIZE_BITS(size);
2860 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2861 val |= I830_FENCE_REG_VALID;
2865 I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
2866 POSTING_READ(FENCE_REG_830_0 + reg * 4);
2869 inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
2871 return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
2874 static void i915_gem_write_fence(struct drm_device *dev, int reg,
2875 struct drm_i915_gem_object *obj)
2877 struct drm_i915_private *dev_priv = dev->dev_private;
2879 /* Ensure that all CPU reads are completed before installing a fence
2880 * and all writes before removing the fence.
2882 if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
2885 WARN(obj && (!obj->stride || !obj->tiling_mode),
2886 "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
2887 obj->stride, obj->tiling_mode);
2889 switch (INTEL_INFO(dev)->gen) {
2893 case 4: i965_write_fence_reg(dev, reg, obj); break;
2894 case 3: i915_write_fence_reg(dev, reg, obj); break;
2895 case 2: i830_write_fence_reg(dev, reg, obj); break;
2899 /* And similarly be paranoid that no direct access to this region
2900 * is reordered to before the fence is installed.
2902 if (i915_gem_object_needs_mb(obj))
2906 static inline int fence_number(struct drm_i915_private *dev_priv,
2907 struct drm_i915_fence_reg *fence)
2909 return fence - dev_priv->fence_regs;
2912 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2913 struct drm_i915_fence_reg *fence,
2916 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2917 int reg = fence_number(dev_priv, fence);
2919 i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
2922 obj->fence_reg = reg;
2924 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
2926 obj->fence_reg = I915_FENCE_REG_NONE;
2928 list_del_init(&fence->lru_list);
2930 obj->fence_dirty = false;
2934 i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
2936 if (obj->last_fenced_seqno) {
2937 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
2941 obj->last_fenced_seqno = 0;
2944 obj->fenced_gpu_access = false;
2949 i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
2951 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2952 struct drm_i915_fence_reg *fence;
2955 ret = i915_gem_object_wait_fence(obj);
2959 if (obj->fence_reg == I915_FENCE_REG_NONE)
2962 fence = &dev_priv->fence_regs[obj->fence_reg];
2964 i915_gem_object_fence_lost(obj);
2965 i915_gem_object_update_fence(obj, fence, false);
2970 static struct drm_i915_fence_reg *
2971 i915_find_fence_reg(struct drm_device *dev)
2973 struct drm_i915_private *dev_priv = dev->dev_private;
2974 struct drm_i915_fence_reg *reg, *avail;
2977 /* First try to find a free reg */
2979 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2980 reg = &dev_priv->fence_regs[i];
2984 if (!reg->pin_count)
2991 /* None available, try to steal one or wait for a user to finish */
2992 list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
3003 * i915_gem_object_get_fence - set up fencing for an object
3004 * @obj: object to map through a fence reg
3006 * When mapping objects through the GTT, userspace wants to be able to write
3007 * to them without having to worry about swizzling if the object is tiled.
3008 * This function walks the fence regs looking for a free one for @obj,
3009 * stealing one if it can't find any.
3011 * It then sets up the reg based on the object's properties: address, pitch
3012 * and tiling format.
3014 * For an untiled surface, this removes any existing fence.
3017 i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
3019 struct drm_device *dev = obj->base.dev;
3020 struct drm_i915_private *dev_priv = dev->dev_private;
3021 bool enable = obj->tiling_mode != I915_TILING_NONE;
3022 struct drm_i915_fence_reg *reg;
3025 /* Have we updated the tiling parameters upon the object and so
3026 * will need to serialise the write to the associated fence register?
3028 if (obj->fence_dirty) {
3029 ret = i915_gem_object_wait_fence(obj);
3034 /* Just update our place in the LRU if our fence is getting reused. */
3035 if (obj->fence_reg != I915_FENCE_REG_NONE) {
3036 reg = &dev_priv->fence_regs[obj->fence_reg];
3037 if (!obj->fence_dirty) {
3038 list_move_tail(®->lru_list,
3039 &dev_priv->mm.fence_list);
3042 } else if (enable) {
3043 reg = i915_find_fence_reg(dev);
3048 struct drm_i915_gem_object *old = reg->obj;
3050 ret = i915_gem_object_wait_fence(old);
3054 i915_gem_object_fence_lost(old);
3059 i915_gem_object_update_fence(obj, reg, enable);
3064 static bool i915_gem_valid_gtt_space(struct drm_device *dev,
3065 struct drm_mm_node *gtt_space,
3066 unsigned long cache_level)
3068 struct drm_mm_node *other;
3070 /* On non-LLC machines we have to be careful when putting differing
3071 * types of snoopable memory together to avoid the prefetcher
3072 * crossing memory domains and dying.
3077 if (!drm_mm_node_allocated(gtt_space))
3080 if (list_empty(>t_space->node_list))
3083 other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
3084 if (other->allocated && !other->hole_follows && other->color != cache_level)
3087 other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
3088 if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
3094 static void i915_gem_verify_gtt(struct drm_device *dev)
3097 struct drm_i915_private *dev_priv = dev->dev_private;
3098 struct drm_i915_gem_object *obj;
3101 list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) {
3102 if (obj->gtt_space == NULL) {
3103 printk(KERN_ERR "object found on GTT list with no space reserved\n");
3108 if (obj->cache_level != obj->gtt_space->color) {
3109 printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
3110 i915_gem_obj_ggtt_offset(obj),
3111 i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
3113 obj->gtt_space->color);
3118 if (!i915_gem_valid_gtt_space(dev,
3120 obj->cache_level)) {
3121 printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
3122 i915_gem_obj_ggtt_offset(obj),
3123 i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
3135 * Finds free space in the GTT aperture and binds the object there.
3138 i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3139 struct i915_address_space *vm,
3141 bool map_and_fenceable,
3144 struct drm_device *dev = obj->base.dev;
3145 drm_i915_private_t *dev_priv = dev->dev_private;
3146 u32 size, fence_size, fence_alignment, unfenced_alignment;
3148 map_and_fenceable ? dev_priv->gtt.mappable_end : vm->total;
3149 struct i915_vma *vma;
3152 fence_size = i915_gem_get_gtt_size(dev,
3155 fence_alignment = i915_gem_get_gtt_alignment(dev,
3157 obj->tiling_mode, true);
3158 unfenced_alignment =
3159 i915_gem_get_gtt_alignment(dev,
3161 obj->tiling_mode, false);
3164 alignment = map_and_fenceable ? fence_alignment :
3166 if (map_and_fenceable && alignment & (fence_alignment - 1)) {
3167 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
3171 size = map_and_fenceable ? fence_size : obj->base.size;
3173 /* If the object is bigger than the entire aperture, reject it early
3174 * before evicting everything in a vain attempt to find space.
3176 if (obj->base.size > gtt_max) {
3177 DRM_ERROR("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n",
3179 map_and_fenceable ? "mappable" : "total",
3184 ret = i915_gem_object_get_pages(obj);
3188 i915_gem_object_pin_pages(obj);
3190 BUG_ON(!i915_is_ggtt(vm));
3192 vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
3198 /* For now we only ever use 1 vma per object */
3199 WARN_ON(!list_is_singular(&obj->vma_list));
3202 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
3204 obj->cache_level, 0, gtt_max,
3205 DRM_MM_SEARCH_DEFAULT);
3207 ret = i915_gem_evict_something(dev, vm, size, alignment,
3216 if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
3217 obj->cache_level))) {
3219 goto err_remove_node;
3222 ret = i915_gem_gtt_prepare_object(obj);
3224 goto err_remove_node;
3226 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
3227 list_add_tail(&vma->mm_list, &vm->inactive_list);
3229 if (i915_is_ggtt(vm)) {
3230 bool mappable, fenceable;
3232 fenceable = (vma->node.size == fence_size &&
3233 (vma->node.start & (fence_alignment - 1)) == 0);
3235 mappable = (vma->node.start + obj->base.size <=
3236 dev_priv->gtt.mappable_end);
3238 obj->map_and_fenceable = mappable && fenceable;
3241 WARN_ON(map_and_fenceable && !obj->map_and_fenceable);
3243 trace_i915_vma_bind(vma, map_and_fenceable);
3244 i915_gem_verify_gtt(dev);
3248 drm_mm_remove_node(&vma->node);
3250 i915_gem_vma_destroy(vma);
3252 i915_gem_object_unpin_pages(obj);
3257 i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3260 /* If we don't have a page list set up, then we're not pinned
3261 * to GPU, and we can ignore the cache flush because it'll happen
3262 * again at bind time.
3264 if (obj->pages == NULL)
3268 * Stolen memory is always coherent with the GPU as it is explicitly
3269 * marked as wc by the system, or the system is cache-coherent.
3274 /* If the GPU is snooping the contents of the CPU cache,
3275 * we do not need to manually clear the CPU cache lines. However,
3276 * the caches are only snooped when the render cache is
3277 * flushed/invalidated. As we always have to emit invalidations
3278 * and flushes when moving into and out of the RENDER domain, correct
3279 * snooping behaviour occurs naturally as the result of our domain
3282 if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
3285 trace_i915_gem_object_clflush(obj);
3286 drm_clflush_sg(obj->pages);
3291 /** Flushes the GTT write domain for the object if it's dirty. */
3293 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3295 uint32_t old_write_domain;
3297 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
3300 /* No actual flushing is required for the GTT write domain. Writes
3301 * to it immediately go to main memory as far as we know, so there's
3302 * no chipset flush. It also doesn't land in render cache.
3304 * However, we do have to enforce the order so that all writes through
3305 * the GTT land before any writes to the device, such as updates to
3310 old_write_domain = obj->base.write_domain;
3311 obj->base.write_domain = 0;
3313 trace_i915_gem_object_change_domain(obj,
3314 obj->base.read_domains,
3318 /** Flushes the CPU write domain for the object if it's dirty. */
3320 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
3323 uint32_t old_write_domain;
3325 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3328 if (i915_gem_clflush_object(obj, force))
3329 i915_gem_chipset_flush(obj->base.dev);
3331 old_write_domain = obj->base.write_domain;
3332 obj->base.write_domain = 0;
3334 trace_i915_gem_object_change_domain(obj,
3335 obj->base.read_domains,
3340 * Moves a single object to the GTT read, and possibly write domain.
3342 * This function returns when the move is complete, including waiting on
3346 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3348 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
3349 uint32_t old_write_domain, old_read_domains;
3352 /* Not valid to be called on unbound objects. */
3353 if (!i915_gem_obj_bound_any(obj))
3356 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3359 ret = i915_gem_object_wait_rendering(obj, !write);
3363 i915_gem_object_flush_cpu_write_domain(obj, false);
3365 /* Serialise direct access to this object with the barriers for
3366 * coherent writes from the GPU, by effectively invalidating the
3367 * GTT domain upon first access.
3369 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3372 old_write_domain = obj->base.write_domain;
3373 old_read_domains = obj->base.read_domains;
3375 /* It should now be out of any other write domains, and we can update
3376 * the domain values for our changes.
3378 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3379 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3381 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3382 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3386 trace_i915_gem_object_change_domain(obj,
3390 /* And bump the LRU for this access */
3391 if (i915_gem_object_is_inactive(obj)) {
3392 struct i915_vma *vma = i915_gem_obj_to_vma(obj,
3393 &dev_priv->gtt.base);
3395 list_move_tail(&vma->mm_list,
3396 &dev_priv->gtt.base.inactive_list);
3403 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3404 enum i915_cache_level cache_level)
3406 struct drm_device *dev = obj->base.dev;
3407 drm_i915_private_t *dev_priv = dev->dev_private;
3408 struct i915_vma *vma;
3411 if (obj->cache_level == cache_level)
3414 if (obj->pin_count) {
3415 DRM_DEBUG("can not change the cache level of pinned objects\n");
3419 list_for_each_entry(vma, &obj->vma_list, vma_link) {
3420 if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
3421 ret = i915_vma_unbind(vma);
3429 if (i915_gem_obj_bound_any(obj)) {
3430 ret = i915_gem_object_finish_gpu(obj);
3434 i915_gem_object_finish_gtt(obj);
3436 /* Before SandyBridge, you could not use tiling or fence
3437 * registers with snooped memory, so relinquish any fences
3438 * currently pointing to our region in the aperture.
3440 if (INTEL_INFO(dev)->gen < 6) {
3441 ret = i915_gem_object_put_fence(obj);
3446 if (obj->has_global_gtt_mapping)
3447 i915_gem_gtt_bind_object(obj, cache_level);
3448 if (obj->has_aliasing_ppgtt_mapping)
3449 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
3453 list_for_each_entry(vma, &obj->vma_list, vma_link)
3454 vma->node.color = cache_level;
3455 obj->cache_level = cache_level;
3457 if (cpu_write_needs_clflush(obj)) {
3458 u32 old_read_domains, old_write_domain;
3460 /* If we're coming from LLC cached, then we haven't
3461 * actually been tracking whether the data is in the
3462 * CPU cache or not, since we only allow one bit set
3463 * in obj->write_domain and have been skipping the clflushes.
3464 * Just set it to the CPU cache for now.
3466 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
3468 old_read_domains = obj->base.read_domains;
3469 old_write_domain = obj->base.write_domain;
3471 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3472 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3474 trace_i915_gem_object_change_domain(obj,
3479 i915_gem_verify_gtt(dev);
3483 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3484 struct drm_file *file)
3486 struct drm_i915_gem_caching *args = data;
3487 struct drm_i915_gem_object *obj;
3490 ret = i915_mutex_lock_interruptible(dev);
3494 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3495 if (&obj->base == NULL) {
3500 switch (obj->cache_level) {
3501 case I915_CACHE_LLC:
3502 case I915_CACHE_L3_LLC:
3503 args->caching = I915_CACHING_CACHED;
3507 args->caching = I915_CACHING_DISPLAY;
3511 args->caching = I915_CACHING_NONE;
3515 drm_gem_object_unreference(&obj->base);
3517 mutex_unlock(&dev->struct_mutex);
3521 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3522 struct drm_file *file)
3524 struct drm_i915_gem_caching *args = data;
3525 struct drm_i915_gem_object *obj;
3526 enum i915_cache_level level;
3529 switch (args->caching) {
3530 case I915_CACHING_NONE:
3531 level = I915_CACHE_NONE;
3533 case I915_CACHING_CACHED:
3534 level = I915_CACHE_LLC;
3536 case I915_CACHING_DISPLAY:
3537 level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
3543 ret = i915_mutex_lock_interruptible(dev);
3547 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3548 if (&obj->base == NULL) {
3553 ret = i915_gem_object_set_cache_level(obj, level);
3555 drm_gem_object_unreference(&obj->base);
3557 mutex_unlock(&dev->struct_mutex);
3561 static bool is_pin_display(struct drm_i915_gem_object *obj)
3563 /* There are 3 sources that pin objects:
3564 * 1. The display engine (scanouts, sprites, cursors);
3565 * 2. Reservations for execbuffer;
3568 * We can ignore reservations as we hold the struct_mutex and
3569 * are only called outside of the reservation path. The user
3570 * can only increment pin_count once, and so if after
3571 * subtracting the potential reference by the user, any pin_count
3572 * remains, it must be due to another use by the display engine.
3574 return obj->pin_count - !!obj->user_pin_count;
3578 * Prepare buffer for display plane (scanout, cursors, etc).
3579 * Can be called from an uninterruptible phase (modesetting) and allows
3580 * any flushes to be pipelined (for pageflips).
3583 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3585 struct intel_ring_buffer *pipelined)
3587 u32 old_read_domains, old_write_domain;
3590 if (pipelined != obj->ring) {
3591 ret = i915_gem_object_sync(obj, pipelined);
3596 /* Mark the pin_display early so that we account for the
3597 * display coherency whilst setting up the cache domains.
3599 obj->pin_display = true;
3601 /* The display engine is not coherent with the LLC cache on gen6. As
3602 * a result, we make sure that the pinning that is about to occur is
3603 * done with uncached PTEs. This is lowest common denominator for all
3606 * However for gen6+, we could do better by using the GFDT bit instead
3607 * of uncaching, which would allow us to flush all the LLC-cached data
3608 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3610 ret = i915_gem_object_set_cache_level(obj,
3611 HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
3613 goto err_unpin_display;
3615 /* As the user may map the buffer once pinned in the display plane
3616 * (e.g. libkms for the bootup splash), we have to ensure that we
3617 * always use map_and_fenceable for all scanout buffers.
3619 ret = i915_gem_obj_ggtt_pin(obj, alignment, true, false);
3621 goto err_unpin_display;
3623 i915_gem_object_flush_cpu_write_domain(obj, true);
3625 old_write_domain = obj->base.write_domain;
3626 old_read_domains = obj->base.read_domains;
3628 /* It should now be out of any other write domains, and we can update
3629 * the domain values for our changes.
3631 obj->base.write_domain = 0;
3632 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3634 trace_i915_gem_object_change_domain(obj,
3641 obj->pin_display = is_pin_display(obj);
3646 i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
3648 i915_gem_object_unpin(obj);
3649 obj->pin_display = is_pin_display(obj);
3653 i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
3657 if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
3660 ret = i915_gem_object_wait_rendering(obj, false);
3664 /* Ensure that we invalidate the GPU's caches and TLBs. */
3665 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
3670 * Moves a single object to the CPU read, and possibly write domain.
3672 * This function returns when the move is complete, including waiting on
3676 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3678 uint32_t old_write_domain, old_read_domains;
3681 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3684 ret = i915_gem_object_wait_rendering(obj, !write);
3688 i915_gem_object_flush_gtt_write_domain(obj);
3690 old_write_domain = obj->base.write_domain;
3691 old_read_domains = obj->base.read_domains;
3693 /* Flush the CPU cache if it's still invalid. */
3694 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3695 i915_gem_clflush_object(obj, false);
3697 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3700 /* It should now be out of any other write domains, and we can update
3701 * the domain values for our changes.
3703 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3705 /* If we're writing through the CPU, then the GPU read domains will
3706 * need to be invalidated at next use.
3709 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3710 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3713 trace_i915_gem_object_change_domain(obj,
3720 /* Throttle our rendering by waiting until the ring has completed our requests
3721 * emitted over 20 msec ago.
3723 * Note that if we were to use the current jiffies each time around the loop,
3724 * we wouldn't escape the function with any frames outstanding if the time to
3725 * render a frame was over 20ms.
3727 * This should get us reasonable parallelism between CPU and GPU but also
3728 * relatively low latency when blocking on a particular request to finish.
3731 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3733 struct drm_i915_private *dev_priv = dev->dev_private;
3734 struct drm_i915_file_private *file_priv = file->driver_priv;
3735 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
3736 struct drm_i915_gem_request *request;
3737 struct intel_ring_buffer *ring = NULL;
3738 unsigned reset_counter;
3742 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
3746 ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
3750 spin_lock(&file_priv->mm.lock);
3751 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3752 if (time_after_eq(request->emitted_jiffies, recent_enough))
3755 ring = request->ring;
3756 seqno = request->seqno;
3758 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
3759 spin_unlock(&file_priv->mm.lock);
3764 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
3766 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
3772 i915_gem_object_pin(struct drm_i915_gem_object *obj,
3773 struct i915_address_space *vm,
3775 bool map_and_fenceable,
3778 struct i915_vma *vma;
3781 if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3784 WARN_ON(map_and_fenceable && !i915_is_ggtt(vm));
3786 vma = i915_gem_obj_to_vma(obj, vm);
3790 vma->node.start & (alignment - 1)) ||
3791 (map_and_fenceable && !obj->map_and_fenceable)) {
3792 WARN(obj->pin_count,
3793 "bo is already pinned with incorrect alignment:"
3794 " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
3795 " obj->map_and_fenceable=%d\n",
3796 i915_gem_obj_offset(obj, vm), alignment,
3798 obj->map_and_fenceable);
3799 ret = i915_vma_unbind(vma);
3805 if (!i915_gem_obj_bound(obj, vm)) {
3806 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3808 ret = i915_gem_object_bind_to_vm(obj, vm, alignment,
3814 if (!dev_priv->mm.aliasing_ppgtt)
3815 i915_gem_gtt_bind_object(obj, obj->cache_level);
3818 if (!obj->has_global_gtt_mapping && map_and_fenceable)
3819 i915_gem_gtt_bind_object(obj, obj->cache_level);
3822 obj->pin_mappable |= map_and_fenceable;
3828 i915_gem_object_unpin(struct drm_i915_gem_object *obj)
3830 BUG_ON(obj->pin_count == 0);
3831 BUG_ON(!i915_gem_obj_bound_any(obj));
3833 if (--obj->pin_count == 0)
3834 obj->pin_mappable = false;
3838 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3839 struct drm_file *file)
3841 struct drm_i915_gem_pin *args = data;
3842 struct drm_i915_gem_object *obj;
3845 ret = i915_mutex_lock_interruptible(dev);
3849 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3850 if (&obj->base == NULL) {
3855 if (obj->madv != I915_MADV_WILLNEED) {
3856 DRM_ERROR("Attempting to pin a purgeable buffer\n");
3861 if (obj->pin_filp != NULL && obj->pin_filp != file) {
3862 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3868 if (obj->user_pin_count == 0) {
3869 ret = i915_gem_obj_ggtt_pin(obj, args->alignment, true, false);
3874 obj->user_pin_count++;
3875 obj->pin_filp = file;
3877 args->offset = i915_gem_obj_ggtt_offset(obj);
3879 drm_gem_object_unreference(&obj->base);
3881 mutex_unlock(&dev->struct_mutex);
3886 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
3887 struct drm_file *file)
3889 struct drm_i915_gem_pin *args = data;
3890 struct drm_i915_gem_object *obj;
3893 ret = i915_mutex_lock_interruptible(dev);
3897 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3898 if (&obj->base == NULL) {
3903 if (obj->pin_filp != file) {
3904 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3909 obj->user_pin_count--;
3910 if (obj->user_pin_count == 0) {
3911 obj->pin_filp = NULL;
3912 i915_gem_object_unpin(obj);
3916 drm_gem_object_unreference(&obj->base);
3918 mutex_unlock(&dev->struct_mutex);
3923 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3924 struct drm_file *file)
3926 struct drm_i915_gem_busy *args = data;
3927 struct drm_i915_gem_object *obj;
3930 ret = i915_mutex_lock_interruptible(dev);
3934 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3935 if (&obj->base == NULL) {
3940 /* Count all active objects as busy, even if they are currently not used
3941 * by the gpu. Users of this interface expect objects to eventually
3942 * become non-busy without any further actions, therefore emit any
3943 * necessary flushes here.
3945 ret = i915_gem_object_flush_active(obj);
3947 args->busy = obj->active;
3949 BUILD_BUG_ON(I915_NUM_RINGS > 16);
3950 args->busy |= intel_ring_flag(obj->ring) << 16;
3953 drm_gem_object_unreference(&obj->base);
3955 mutex_unlock(&dev->struct_mutex);
3960 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3961 struct drm_file *file_priv)
3963 return i915_gem_ring_throttle(dev, file_priv);
3967 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3968 struct drm_file *file_priv)
3970 struct drm_i915_gem_madvise *args = data;
3971 struct drm_i915_gem_object *obj;
3974 switch (args->madv) {
3975 case I915_MADV_DONTNEED:
3976 case I915_MADV_WILLNEED:
3982 ret = i915_mutex_lock_interruptible(dev);
3986 obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
3987 if (&obj->base == NULL) {
3992 if (obj->pin_count) {
3997 if (obj->madv != __I915_MADV_PURGED)
3998 obj->madv = args->madv;
4000 /* if the object is no longer attached, discard its backing storage */
4001 if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
4002 i915_gem_object_truncate(obj);
4004 args->retained = obj->madv != __I915_MADV_PURGED;
4007 drm_gem_object_unreference(&obj->base);
4009 mutex_unlock(&dev->struct_mutex);
4013 void i915_gem_object_init(struct drm_i915_gem_object *obj,
4014 const struct drm_i915_gem_object_ops *ops)
4016 INIT_LIST_HEAD(&obj->global_list);
4017 INIT_LIST_HEAD(&obj->ring_list);
4018 INIT_LIST_HEAD(&obj->exec_list);
4019 INIT_LIST_HEAD(&obj->obj_exec_link);
4020 INIT_LIST_HEAD(&obj->vma_list);
4024 obj->fence_reg = I915_FENCE_REG_NONE;
4025 obj->madv = I915_MADV_WILLNEED;
4026 /* Avoid an unnecessary call to unbind on the first bind. */
4027 obj->map_and_fenceable = true;
4029 i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
4032 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4033 .get_pages = i915_gem_object_get_pages_gtt,
4034 .put_pages = i915_gem_object_put_pages_gtt,
4037 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
4040 struct drm_i915_gem_object *obj;
4041 struct address_space *mapping;
4044 obj = i915_gem_object_alloc(dev);
4048 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
4049 i915_gem_object_free(obj);
4053 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4054 if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
4055 /* 965gm cannot relocate objects above 4GiB. */
4056 mask &= ~__GFP_HIGHMEM;
4057 mask |= __GFP_DMA32;
4060 mapping = file_inode(obj->base.filp)->i_mapping;
4061 mapping_set_gfp_mask(mapping, mask);
4063 i915_gem_object_init(obj, &i915_gem_object_ops);
4065 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4066 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4069 /* On some devices, we can have the GPU use the LLC (the CPU
4070 * cache) for about a 10% performance improvement
4071 * compared to uncached. Graphics requests other than
4072 * display scanout are coherent with the CPU in
4073 * accessing this cache. This means in this mode we
4074 * don't need to clflush on the CPU side, and on the
4075 * GPU side we only need to flush internal caches to
4076 * get data visible to the CPU.
4078 * However, we maintain the display planes as UC, and so
4079 * need to rebind when first used as such.
4081 obj->cache_level = I915_CACHE_LLC;
4083 obj->cache_level = I915_CACHE_NONE;
4085 trace_i915_gem_object_create(obj);
4090 int i915_gem_init_object(struct drm_gem_object *obj)
4097 void i915_gem_free_object(struct drm_gem_object *gem_obj)
4099 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4100 struct drm_device *dev = obj->base.dev;
4101 drm_i915_private_t *dev_priv = dev->dev_private;
4102 struct i915_vma *vma, *next;
4104 trace_i915_gem_object_destroy(obj);
4107 i915_gem_detach_phys_object(dev, obj);
4110 /* NB: 0 or 1 elements */
4111 WARN_ON(!list_empty(&obj->vma_list) &&
4112 !list_is_singular(&obj->vma_list));
4113 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
4114 int ret = i915_vma_unbind(vma);
4115 if (WARN_ON(ret == -ERESTARTSYS)) {
4116 bool was_interruptible;
4118 was_interruptible = dev_priv->mm.interruptible;
4119 dev_priv->mm.interruptible = false;
4121 WARN_ON(i915_vma_unbind(vma));
4123 dev_priv->mm.interruptible = was_interruptible;
4127 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4128 * before progressing. */
4130 i915_gem_object_unpin_pages(obj);
4132 if (WARN_ON(obj->pages_pin_count))
4133 obj->pages_pin_count = 0;
4134 i915_gem_object_put_pages(obj);
4135 i915_gem_object_free_mmap_offset(obj);
4136 i915_gem_object_release_stolen(obj);
4140 if (obj->base.import_attach)
4141 drm_prime_gem_destroy(&obj->base, NULL);
4143 drm_gem_object_release(&obj->base);
4144 i915_gem_info_remove_obj(dev_priv, obj->base.size);
4147 i915_gem_object_free(obj);
4150 struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
4151 struct i915_address_space *vm)
4153 struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
4155 return ERR_PTR(-ENOMEM);
4157 INIT_LIST_HEAD(&vma->vma_link);
4158 INIT_LIST_HEAD(&vma->mm_list);
4159 INIT_LIST_HEAD(&vma->exec_list);
4163 /* Keep GGTT vmas first to make debug easier */
4164 if (i915_is_ggtt(vm))
4165 list_add(&vma->vma_link, &obj->vma_list);
4167 list_add_tail(&vma->vma_link, &obj->vma_list);
4172 void i915_gem_vma_destroy(struct i915_vma *vma)
4174 WARN_ON(vma->node.allocated);
4175 list_del(&vma->vma_link);
4180 i915_gem_idle(struct drm_device *dev)
4182 drm_i915_private_t *dev_priv = dev->dev_private;
4185 if (dev_priv->ums.mm_suspended) {
4186 mutex_unlock(&dev->struct_mutex);
4190 ret = i915_gpu_idle(dev);
4192 mutex_unlock(&dev->struct_mutex);
4195 i915_gem_retire_requests(dev);
4197 /* Under UMS, be paranoid and evict. */
4198 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4199 i915_gem_evict_everything(dev);
4201 del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
4203 i915_kernel_lost_context(dev);
4204 i915_gem_cleanup_ringbuffer(dev);
4206 /* Cancel the retire work handler, which should be idle now. */
4207 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4212 void i915_gem_l3_remap(struct drm_device *dev)
4214 drm_i915_private_t *dev_priv = dev->dev_private;
4218 if (!HAS_L3_GPU_CACHE(dev))
4221 if (!dev_priv->l3_parity.remap_info)
4224 misccpctl = I915_READ(GEN7_MISCCPCTL);
4225 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
4226 POSTING_READ(GEN7_MISCCPCTL);
4228 for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
4229 u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
4230 if (remap && remap != dev_priv->l3_parity.remap_info[i/4])
4231 DRM_DEBUG("0x%x was already programmed to %x\n",
4232 GEN7_L3LOG_BASE + i, remap);
4233 if (remap && !dev_priv->l3_parity.remap_info[i/4])
4234 DRM_DEBUG_DRIVER("Clearing remapped register\n");
4235 I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]);
4238 /* Make sure all the writes land before disabling dop clock gating */
4239 POSTING_READ(GEN7_L3LOG_BASE);
4241 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
4244 void i915_gem_init_swizzling(struct drm_device *dev)
4246 drm_i915_private_t *dev_priv = dev->dev_private;
4248 if (INTEL_INFO(dev)->gen < 5 ||
4249 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4252 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4253 DISP_TILE_SURFACE_SWIZZLING);
4258 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4260 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4261 else if (IS_GEN7(dev))
4262 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
4268 intel_enable_blt(struct drm_device *dev)
4273 /* The blitter was dysfunctional on early prototypes */
4274 if (IS_GEN6(dev) && dev->pdev->revision < 8) {
4275 DRM_INFO("BLT not supported on this pre-production hardware;"
4276 " graphics performance will be degraded.\n");
4283 static int i915_gem_init_rings(struct drm_device *dev)
4285 struct drm_i915_private *dev_priv = dev->dev_private;
4288 ret = intel_init_render_ring_buffer(dev);
4293 ret = intel_init_bsd_ring_buffer(dev);
4295 goto cleanup_render_ring;
4298 if (intel_enable_blt(dev)) {
4299 ret = intel_init_blt_ring_buffer(dev);
4301 goto cleanup_bsd_ring;
4304 if (HAS_VEBOX(dev)) {
4305 ret = intel_init_vebox_ring_buffer(dev);
4307 goto cleanup_blt_ring;
4311 ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
4313 goto cleanup_vebox_ring;
4318 intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
4320 intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
4322 intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
4323 cleanup_render_ring:
4324 intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
4330 i915_gem_init_hw(struct drm_device *dev)
4332 drm_i915_private_t *dev_priv = dev->dev_private;
4335 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
4338 if (dev_priv->ellc_size)
4339 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4341 if (HAS_PCH_NOP(dev)) {
4342 u32 temp = I915_READ(GEN7_MSG_CTL);
4343 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4344 I915_WRITE(GEN7_MSG_CTL, temp);
4347 i915_gem_l3_remap(dev);
4349 i915_gem_init_swizzling(dev);
4351 ret = i915_gem_init_rings(dev);
4356 * XXX: There was some w/a described somewhere suggesting loading
4357 * contexts before PPGTT.
4359 i915_gem_context_init(dev);
4360 if (dev_priv->mm.aliasing_ppgtt) {
4361 ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
4363 i915_gem_cleanup_aliasing_ppgtt(dev);
4364 DRM_INFO("PPGTT enable failed. This is not fatal, but unexpected\n");
4371 int i915_gem_init(struct drm_device *dev)
4373 struct drm_i915_private *dev_priv = dev->dev_private;
4376 mutex_lock(&dev->struct_mutex);
4378 if (IS_VALLEYVIEW(dev)) {
4379 /* VLVA0 (potential hack), BIOS isn't actually waking us */
4380 I915_WRITE(VLV_GTLC_WAKE_CTRL, 1);
4381 if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) & 1) == 1, 10))
4382 DRM_DEBUG_DRIVER("allow wake ack timed out\n");
4385 i915_gem_init_global_gtt(dev);
4387 ret = i915_gem_init_hw(dev);
4388 mutex_unlock(&dev->struct_mutex);
4390 i915_gem_cleanup_aliasing_ppgtt(dev);
4394 /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
4395 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4396 dev_priv->dri1.allow_batchbuffer = 1;
4401 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4403 drm_i915_private_t *dev_priv = dev->dev_private;
4404 struct intel_ring_buffer *ring;
4407 for_each_ring(ring, dev_priv, i)
4408 intel_cleanup_ring_buffer(ring);
4412 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4413 struct drm_file *file_priv)
4415 struct drm_i915_private *dev_priv = dev->dev_private;
4418 if (drm_core_check_feature(dev, DRIVER_MODESET))
4421 if (i915_reset_in_progress(&dev_priv->gpu_error)) {
4422 DRM_ERROR("Reenabling wedged hardware, good luck\n");
4423 atomic_set(&dev_priv->gpu_error.reset_counter, 0);
4426 mutex_lock(&dev->struct_mutex);
4427 dev_priv->ums.mm_suspended = 0;
4429 ret = i915_gem_init_hw(dev);
4431 mutex_unlock(&dev->struct_mutex);
4435 BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
4436 mutex_unlock(&dev->struct_mutex);
4438 ret = drm_irq_install(dev);
4440 goto cleanup_ringbuffer;
4445 mutex_lock(&dev->struct_mutex);
4446 i915_gem_cleanup_ringbuffer(dev);
4447 dev_priv->ums.mm_suspended = 1;
4448 mutex_unlock(&dev->struct_mutex);
4454 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4455 struct drm_file *file_priv)
4457 struct drm_i915_private *dev_priv = dev->dev_private;
4460 if (drm_core_check_feature(dev, DRIVER_MODESET))
4463 drm_irq_uninstall(dev);
4465 mutex_lock(&dev->struct_mutex);
4466 ret = i915_gem_idle(dev);
4468 /* Hack! Don't let anybody do execbuf while we don't control the chip.
4469 * We need to replace this with a semaphore, or something.
4470 * And not confound ums.mm_suspended!
4473 dev_priv->ums.mm_suspended = 1;
4474 mutex_unlock(&dev->struct_mutex);
4480 i915_gem_lastclose(struct drm_device *dev)
4484 if (drm_core_check_feature(dev, DRIVER_MODESET))
4487 mutex_lock(&dev->struct_mutex);
4488 ret = i915_gem_idle(dev);
4490 DRM_ERROR("failed to idle hardware: %d\n", ret);
4491 mutex_unlock(&dev->struct_mutex);
4495 init_ring_lists(struct intel_ring_buffer *ring)
4497 INIT_LIST_HEAD(&ring->active_list);
4498 INIT_LIST_HEAD(&ring->request_list);
4501 static void i915_init_vm(struct drm_i915_private *dev_priv,
4502 struct i915_address_space *vm)
4504 vm->dev = dev_priv->dev;
4505 INIT_LIST_HEAD(&vm->active_list);
4506 INIT_LIST_HEAD(&vm->inactive_list);
4507 INIT_LIST_HEAD(&vm->global_link);
4508 list_add(&vm->global_link, &dev_priv->vm_list);
4512 i915_gem_load(struct drm_device *dev)
4514 drm_i915_private_t *dev_priv = dev->dev_private;
4518 kmem_cache_create("i915_gem_object",
4519 sizeof(struct drm_i915_gem_object), 0,
4523 INIT_LIST_HEAD(&dev_priv->vm_list);
4524 i915_init_vm(dev_priv, &dev_priv->gtt.base);
4526 INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4527 INIT_LIST_HEAD(&dev_priv->mm.bound_list);
4528 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4529 for (i = 0; i < I915_NUM_RINGS; i++)
4530 init_ring_lists(&dev_priv->ring[i]);
4531 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
4532 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
4533 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4534 i915_gem_retire_work_handler);
4535 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
4537 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4539 I915_WRITE(MI_ARB_STATE,
4540 _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
4543 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4545 /* Old X drivers will take 0-2 for front, back, depth buffers */
4546 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4547 dev_priv->fence_reg_start = 3;
4549 if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
4550 dev_priv->num_fence_regs = 32;
4551 else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4552 dev_priv->num_fence_regs = 16;
4554 dev_priv->num_fence_regs = 8;
4556 /* Initialize fence registers to zero */
4557 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4558 i915_gem_restore_fences(dev);
4560 i915_gem_detect_bit_6_swizzle(dev);
4561 init_waitqueue_head(&dev_priv->pending_flip_queue);
4563 dev_priv->mm.interruptible = true;
4565 dev_priv->mm.inactive_shrinker.scan_objects = i915_gem_inactive_scan;
4566 dev_priv->mm.inactive_shrinker.count_objects = i915_gem_inactive_count;
4567 dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
4568 register_shrinker(&dev_priv->mm.inactive_shrinker);
4572 * Create a physically contiguous memory object for this object
4573 * e.g. for cursor + overlay regs
4575 static int i915_gem_init_phys_object(struct drm_device *dev,
4576 int id, int size, int align)
4578 drm_i915_private_t *dev_priv = dev->dev_private;
4579 struct drm_i915_gem_phys_object *phys_obj;
4582 if (dev_priv->mm.phys_objs[id - 1] || !size)
4585 phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
4591 phys_obj->handle = drm_pci_alloc(dev, size, align);
4592 if (!phys_obj->handle) {
4597 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4600 dev_priv->mm.phys_objs[id - 1] = phys_obj;
4608 static void i915_gem_free_phys_object(struct drm_device *dev, int id)
4610 drm_i915_private_t *dev_priv = dev->dev_private;
4611 struct drm_i915_gem_phys_object *phys_obj;
4613 if (!dev_priv->mm.phys_objs[id - 1])
4616 phys_obj = dev_priv->mm.phys_objs[id - 1];
4617 if (phys_obj->cur_obj) {
4618 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4622 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4624 drm_pci_free(dev, phys_obj->handle);
4626 dev_priv->mm.phys_objs[id - 1] = NULL;
4629 void i915_gem_free_all_phys_object(struct drm_device *dev)
4633 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
4634 i915_gem_free_phys_object(dev, i);
4637 void i915_gem_detach_phys_object(struct drm_device *dev,
4638 struct drm_i915_gem_object *obj)
4640 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
4647 vaddr = obj->phys_obj->handle->vaddr;
4649 page_count = obj->base.size / PAGE_SIZE;
4650 for (i = 0; i < page_count; i++) {
4651 struct page *page = shmem_read_mapping_page(mapping, i);
4652 if (!IS_ERR(page)) {
4653 char *dst = kmap_atomic(page);
4654 memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
4657 drm_clflush_pages(&page, 1);
4659 set_page_dirty(page);
4660 mark_page_accessed(page);
4661 page_cache_release(page);
4664 i915_gem_chipset_flush(dev);
4666 obj->phys_obj->cur_obj = NULL;
4667 obj->phys_obj = NULL;
4671 i915_gem_attach_phys_object(struct drm_device *dev,
4672 struct drm_i915_gem_object *obj,
4676 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
4677 drm_i915_private_t *dev_priv = dev->dev_private;
4682 if (id > I915_MAX_PHYS_OBJECT)
4685 if (obj->phys_obj) {
4686 if (obj->phys_obj->id == id)
4688 i915_gem_detach_phys_object(dev, obj);
4691 /* create a new object */
4692 if (!dev_priv->mm.phys_objs[id - 1]) {
4693 ret = i915_gem_init_phys_object(dev, id,
4694 obj->base.size, align);
4696 DRM_ERROR("failed to init phys object %d size: %zu\n",
4697 id, obj->base.size);
4702 /* bind to the object */
4703 obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4704 obj->phys_obj->cur_obj = obj;
4706 page_count = obj->base.size / PAGE_SIZE;
4708 for (i = 0; i < page_count; i++) {
4712 page = shmem_read_mapping_page(mapping, i);
4714 return PTR_ERR(page);
4716 src = kmap_atomic(page);
4717 dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4718 memcpy(dst, src, PAGE_SIZE);
4721 mark_page_accessed(page);
4722 page_cache_release(page);
4729 i915_gem_phys_pwrite(struct drm_device *dev,
4730 struct drm_i915_gem_object *obj,
4731 struct drm_i915_gem_pwrite *args,
4732 struct drm_file *file_priv)
4734 void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
4735 char __user *user_data = to_user_ptr(args->data_ptr);
4737 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4738 unsigned long unwritten;
4740 /* The physical object once assigned is fixed for the lifetime
4741 * of the obj, so we can safely drop the lock and continue
4744 mutex_unlock(&dev->struct_mutex);
4745 unwritten = copy_from_user(vaddr, user_data, args->size);
4746 mutex_lock(&dev->struct_mutex);
4751 i915_gem_chipset_flush(dev);
4755 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4757 struct drm_i915_file_private *file_priv = file->driver_priv;
4759 /* Clean up our request list when the client is going away, so that
4760 * later retire_requests won't dereference our soon-to-be-gone
4763 spin_lock(&file_priv->mm.lock);
4764 while (!list_empty(&file_priv->mm.request_list)) {
4765 struct drm_i915_gem_request *request;
4767 request = list_first_entry(&file_priv->mm.request_list,
4768 struct drm_i915_gem_request,
4770 list_del(&request->client_list);
4771 request->file_priv = NULL;
4773 spin_unlock(&file_priv->mm.lock);
4776 static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
4778 if (!mutex_is_locked(mutex))
4781 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
4782 return mutex->owner == task;
4784 /* Since UP may be pre-empted, we cannot assume that we own the lock */
4789 static unsigned long
4790 i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
4792 struct drm_i915_private *dev_priv =
4793 container_of(shrinker,
4794 struct drm_i915_private,
4795 mm.inactive_shrinker);
4796 struct drm_device *dev = dev_priv->dev;
4797 struct drm_i915_gem_object *obj;
4799 unsigned long count;
4801 if (!mutex_trylock(&dev->struct_mutex)) {
4802 if (!mutex_is_locked_by(&dev->struct_mutex, current))
4805 if (dev_priv->mm.shrinker_no_lock_stealing)
4812 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
4813 if (obj->pages_pin_count == 0)
4814 count += obj->base.size >> PAGE_SHIFT;
4816 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
4820 if (obj->pin_count == 0 && obj->pages_pin_count == 0)
4821 count += obj->base.size >> PAGE_SHIFT;
4825 mutex_unlock(&dev->struct_mutex);
4829 /* All the new VM stuff */
4830 unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
4831 struct i915_address_space *vm)
4833 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
4834 struct i915_vma *vma;
4836 if (vm == &dev_priv->mm.aliasing_ppgtt->base)
4837 vm = &dev_priv->gtt.base;
4839 BUG_ON(list_empty(&o->vma_list));
4840 list_for_each_entry(vma, &o->vma_list, vma_link) {
4842 return vma->node.start;
4848 bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
4849 struct i915_address_space *vm)
4851 struct i915_vma *vma;
4853 list_for_each_entry(vma, &o->vma_list, vma_link)
4854 if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
4860 bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
4862 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
4863 struct i915_address_space *vm;
4865 list_for_each_entry(vm, &dev_priv->vm_list, global_link)
4866 if (i915_gem_obj_bound(o, vm))
4872 unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
4873 struct i915_address_space *vm)
4875 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
4876 struct i915_vma *vma;
4878 if (vm == &dev_priv->mm.aliasing_ppgtt->base)
4879 vm = &dev_priv->gtt.base;
4881 BUG_ON(list_empty(&o->vma_list));
4883 list_for_each_entry(vma, &o->vma_list, vma_link)
4885 return vma->node.size;
4890 static unsigned long
4891 i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
4893 struct drm_i915_private *dev_priv =
4894 container_of(shrinker,
4895 struct drm_i915_private,
4896 mm.inactive_shrinker);
4897 struct drm_device *dev = dev_priv->dev;
4898 int nr_to_scan = sc->nr_to_scan;
4899 unsigned long freed;
4902 if (!mutex_trylock(&dev->struct_mutex)) {
4903 if (!mutex_is_locked_by(&dev->struct_mutex, current))
4906 if (dev_priv->mm.shrinker_no_lock_stealing)
4912 freed = i915_gem_purge(dev_priv, nr_to_scan);
4913 if (freed < nr_to_scan)
4914 freed += __i915_gem_shrink(dev_priv, nr_to_scan,
4916 if (freed < nr_to_scan)
4917 freed += i915_gem_shrink_all(dev_priv);
4920 mutex_unlock(&dev->struct_mutex);
4924 struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
4925 struct i915_address_space *vm)
4927 struct i915_vma *vma;
4928 list_for_each_entry(vma, &obj->vma_list, vma_link)
4936 i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
4937 struct i915_address_space *vm)
4939 struct i915_vma *vma;
4941 vma = i915_gem_obj_to_vma(obj, vm);
4943 vma = i915_gem_vma_create(obj, vm);