]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/i915/i915_gem.c
Merge tag 'drm-intel-next-2013-07-26-fixed' of git://people.freedesktop.org/~danvet...
[karo-tx-linux.git] / drivers / gpu / drm / i915 / i915_gem.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27
28 #include <drm/drmP.h>
29 #include <drm/drm_vma_manager.h>
30 #include <drm/i915_drm.h>
31 #include "i915_drv.h"
32 #include "i915_trace.h"
33 #include "intel_drv.h"
34 #include <linux/shmem_fs.h>
35 #include <linux/slab.h>
36 #include <linux/swap.h>
37 #include <linux/pci.h>
38 #include <linux/dma-buf.h>
39
40 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
41 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
42 static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
43                                                     unsigned alignment,
44                                                     bool map_and_fenceable,
45                                                     bool nonblocking);
46 static int i915_gem_phys_pwrite(struct drm_device *dev,
47                                 struct drm_i915_gem_object *obj,
48                                 struct drm_i915_gem_pwrite *args,
49                                 struct drm_file *file);
50
51 static void i915_gem_write_fence(struct drm_device *dev, int reg,
52                                  struct drm_i915_gem_object *obj);
53 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
54                                          struct drm_i915_fence_reg *fence,
55                                          bool enable);
56
57 static int i915_gem_inactive_shrink(struct shrinker *shrinker,
58                                     struct shrink_control *sc);
59 static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
60 static void i915_gem_shrink_all(struct drm_i915_private *dev_priv);
61 static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
62
63 static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
64 {
65         if (obj->tiling_mode)
66                 i915_gem_release_mmap(obj);
67
68         /* As we do not have an associated fence register, we will force
69          * a tiling change if we ever need to acquire one.
70          */
71         obj->fence_dirty = false;
72         obj->fence_reg = I915_FENCE_REG_NONE;
73 }
74
75 /* some bookkeeping */
76 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
77                                   size_t size)
78 {
79         spin_lock(&dev_priv->mm.object_stat_lock);
80         dev_priv->mm.object_count++;
81         dev_priv->mm.object_memory += size;
82         spin_unlock(&dev_priv->mm.object_stat_lock);
83 }
84
85 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
86                                      size_t size)
87 {
88         spin_lock(&dev_priv->mm.object_stat_lock);
89         dev_priv->mm.object_count--;
90         dev_priv->mm.object_memory -= size;
91         spin_unlock(&dev_priv->mm.object_stat_lock);
92 }
93
94 static int
95 i915_gem_wait_for_error(struct i915_gpu_error *error)
96 {
97         int ret;
98
99 #define EXIT_COND (!i915_reset_in_progress(error) || \
100                    i915_terminally_wedged(error))
101         if (EXIT_COND)
102                 return 0;
103
104         /*
105          * Only wait 10 seconds for the gpu reset to complete to avoid hanging
106          * userspace. If it takes that long something really bad is going on and
107          * we should simply try to bail out and fail as gracefully as possible.
108          */
109         ret = wait_event_interruptible_timeout(error->reset_queue,
110                                                EXIT_COND,
111                                                10*HZ);
112         if (ret == 0) {
113                 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
114                 return -EIO;
115         } else if (ret < 0) {
116                 return ret;
117         }
118 #undef EXIT_COND
119
120         return 0;
121 }
122
123 int i915_mutex_lock_interruptible(struct drm_device *dev)
124 {
125         struct drm_i915_private *dev_priv = dev->dev_private;
126         int ret;
127
128         ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
129         if (ret)
130                 return ret;
131
132         ret = mutex_lock_interruptible(&dev->struct_mutex);
133         if (ret)
134                 return ret;
135
136         WARN_ON(i915_verify_lists(dev));
137         return 0;
138 }
139
140 static inline bool
141 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
142 {
143         return i915_gem_obj_ggtt_bound(obj) && !obj->active;
144 }
145
146 int
147 i915_gem_init_ioctl(struct drm_device *dev, void *data,
148                     struct drm_file *file)
149 {
150         struct drm_i915_private *dev_priv = dev->dev_private;
151         struct drm_i915_gem_init *args = data;
152
153         if (drm_core_check_feature(dev, DRIVER_MODESET))
154                 return -ENODEV;
155
156         if (args->gtt_start >= args->gtt_end ||
157             (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
158                 return -EINVAL;
159
160         /* GEM with user mode setting was never supported on ilk and later. */
161         if (INTEL_INFO(dev)->gen >= 5)
162                 return -ENODEV;
163
164         mutex_lock(&dev->struct_mutex);
165         i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
166                                   args->gtt_end);
167         dev_priv->gtt.mappable_end = args->gtt_end;
168         mutex_unlock(&dev->struct_mutex);
169
170         return 0;
171 }
172
173 int
174 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
175                             struct drm_file *file)
176 {
177         struct drm_i915_private *dev_priv = dev->dev_private;
178         struct drm_i915_gem_get_aperture *args = data;
179         struct drm_i915_gem_object *obj;
180         size_t pinned;
181
182         pinned = 0;
183         mutex_lock(&dev->struct_mutex);
184         list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
185                 if (obj->pin_count)
186                         pinned += i915_gem_obj_ggtt_size(obj);
187         mutex_unlock(&dev->struct_mutex);
188
189         args->aper_size = dev_priv->gtt.base.total;
190         args->aper_available_size = args->aper_size - pinned;
191
192         return 0;
193 }
194
195 void *i915_gem_object_alloc(struct drm_device *dev)
196 {
197         struct drm_i915_private *dev_priv = dev->dev_private;
198         return kmem_cache_alloc(dev_priv->slab, GFP_KERNEL | __GFP_ZERO);
199 }
200
201 void i915_gem_object_free(struct drm_i915_gem_object *obj)
202 {
203         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
204         kmem_cache_free(dev_priv->slab, obj);
205 }
206
207 static int
208 i915_gem_create(struct drm_file *file,
209                 struct drm_device *dev,
210                 uint64_t size,
211                 uint32_t *handle_p)
212 {
213         struct drm_i915_gem_object *obj;
214         int ret;
215         u32 handle;
216
217         size = roundup(size, PAGE_SIZE);
218         if (size == 0)
219                 return -EINVAL;
220
221         /* Allocate the new object */
222         obj = i915_gem_alloc_object(dev, size);
223         if (obj == NULL)
224                 return -ENOMEM;
225
226         ret = drm_gem_handle_create(file, &obj->base, &handle);
227         /* drop reference from allocate - handle holds it now */
228         drm_gem_object_unreference_unlocked(&obj->base);
229         if (ret)
230                 return ret;
231
232         *handle_p = handle;
233         return 0;
234 }
235
236 int
237 i915_gem_dumb_create(struct drm_file *file,
238                      struct drm_device *dev,
239                      struct drm_mode_create_dumb *args)
240 {
241         /* have to work out size/pitch and return them */
242         args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
243         args->size = args->pitch * args->height;
244         return i915_gem_create(file, dev,
245                                args->size, &args->handle);
246 }
247
248 /**
249  * Creates a new mm object and returns a handle to it.
250  */
251 int
252 i915_gem_create_ioctl(struct drm_device *dev, void *data,
253                       struct drm_file *file)
254 {
255         struct drm_i915_gem_create *args = data;
256
257         return i915_gem_create(file, dev,
258                                args->size, &args->handle);
259 }
260
261 static inline int
262 __copy_to_user_swizzled(char __user *cpu_vaddr,
263                         const char *gpu_vaddr, int gpu_offset,
264                         int length)
265 {
266         int ret, cpu_offset = 0;
267
268         while (length > 0) {
269                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
270                 int this_length = min(cacheline_end - gpu_offset, length);
271                 int swizzled_gpu_offset = gpu_offset ^ 64;
272
273                 ret = __copy_to_user(cpu_vaddr + cpu_offset,
274                                      gpu_vaddr + swizzled_gpu_offset,
275                                      this_length);
276                 if (ret)
277                         return ret + length;
278
279                 cpu_offset += this_length;
280                 gpu_offset += this_length;
281                 length -= this_length;
282         }
283
284         return 0;
285 }
286
287 static inline int
288 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
289                           const char __user *cpu_vaddr,
290                           int length)
291 {
292         int ret, cpu_offset = 0;
293
294         while (length > 0) {
295                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
296                 int this_length = min(cacheline_end - gpu_offset, length);
297                 int swizzled_gpu_offset = gpu_offset ^ 64;
298
299                 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
300                                        cpu_vaddr + cpu_offset,
301                                        this_length);
302                 if (ret)
303                         return ret + length;
304
305                 cpu_offset += this_length;
306                 gpu_offset += this_length;
307                 length -= this_length;
308         }
309
310         return 0;
311 }
312
313 /* Per-page copy function for the shmem pread fastpath.
314  * Flushes invalid cachelines before reading the target if
315  * needs_clflush is set. */
316 static int
317 shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
318                  char __user *user_data,
319                  bool page_do_bit17_swizzling, bool needs_clflush)
320 {
321         char *vaddr;
322         int ret;
323
324         if (unlikely(page_do_bit17_swizzling))
325                 return -EINVAL;
326
327         vaddr = kmap_atomic(page);
328         if (needs_clflush)
329                 drm_clflush_virt_range(vaddr + shmem_page_offset,
330                                        page_length);
331         ret = __copy_to_user_inatomic(user_data,
332                                       vaddr + shmem_page_offset,
333                                       page_length);
334         kunmap_atomic(vaddr);
335
336         return ret ? -EFAULT : 0;
337 }
338
339 static void
340 shmem_clflush_swizzled_range(char *addr, unsigned long length,
341                              bool swizzled)
342 {
343         if (unlikely(swizzled)) {
344                 unsigned long start = (unsigned long) addr;
345                 unsigned long end = (unsigned long) addr + length;
346
347                 /* For swizzling simply ensure that we always flush both
348                  * channels. Lame, but simple and it works. Swizzled
349                  * pwrite/pread is far from a hotpath - current userspace
350                  * doesn't use it at all. */
351                 start = round_down(start, 128);
352                 end = round_up(end, 128);
353
354                 drm_clflush_virt_range((void *)start, end - start);
355         } else {
356                 drm_clflush_virt_range(addr, length);
357         }
358
359 }
360
361 /* Only difference to the fast-path function is that this can handle bit17
362  * and uses non-atomic copy and kmap functions. */
363 static int
364 shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
365                  char __user *user_data,
366                  bool page_do_bit17_swizzling, bool needs_clflush)
367 {
368         char *vaddr;
369         int ret;
370
371         vaddr = kmap(page);
372         if (needs_clflush)
373                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
374                                              page_length,
375                                              page_do_bit17_swizzling);
376
377         if (page_do_bit17_swizzling)
378                 ret = __copy_to_user_swizzled(user_data,
379                                               vaddr, shmem_page_offset,
380                                               page_length);
381         else
382                 ret = __copy_to_user(user_data,
383                                      vaddr + shmem_page_offset,
384                                      page_length);
385         kunmap(page);
386
387         return ret ? - EFAULT : 0;
388 }
389
390 static int
391 i915_gem_shmem_pread(struct drm_device *dev,
392                      struct drm_i915_gem_object *obj,
393                      struct drm_i915_gem_pread *args,
394                      struct drm_file *file)
395 {
396         char __user *user_data;
397         ssize_t remain;
398         loff_t offset;
399         int shmem_page_offset, page_length, ret = 0;
400         int obj_do_bit17_swizzling, page_do_bit17_swizzling;
401         int prefaulted = 0;
402         int needs_clflush = 0;
403         struct sg_page_iter sg_iter;
404
405         user_data = to_user_ptr(args->data_ptr);
406         remain = args->size;
407
408         obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
409
410         if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
411                 /* If we're not in the cpu read domain, set ourself into the gtt
412                  * read domain and manually flush cachelines (if required). This
413                  * optimizes for the case when the gpu will dirty the data
414                  * anyway again before the next pread happens. */
415                 if (obj->cache_level == I915_CACHE_NONE)
416                         needs_clflush = 1;
417                 if (i915_gem_obj_ggtt_bound(obj)) {
418                         ret = i915_gem_object_set_to_gtt_domain(obj, false);
419                         if (ret)
420                                 return ret;
421                 }
422         }
423
424         ret = i915_gem_object_get_pages(obj);
425         if (ret)
426                 return ret;
427
428         i915_gem_object_pin_pages(obj);
429
430         offset = args->offset;
431
432         for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
433                          offset >> PAGE_SHIFT) {
434                 struct page *page = sg_page_iter_page(&sg_iter);
435
436                 if (remain <= 0)
437                         break;
438
439                 /* Operation in this page
440                  *
441                  * shmem_page_offset = offset within page in shmem file
442                  * page_length = bytes to copy for this page
443                  */
444                 shmem_page_offset = offset_in_page(offset);
445                 page_length = remain;
446                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
447                         page_length = PAGE_SIZE - shmem_page_offset;
448
449                 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
450                         (page_to_phys(page) & (1 << 17)) != 0;
451
452                 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
453                                        user_data, page_do_bit17_swizzling,
454                                        needs_clflush);
455                 if (ret == 0)
456                         goto next_page;
457
458                 mutex_unlock(&dev->struct_mutex);
459
460                 if (likely(!i915_prefault_disable) && !prefaulted) {
461                         ret = fault_in_multipages_writeable(user_data, remain);
462                         /* Userspace is tricking us, but we've already clobbered
463                          * its pages with the prefault and promised to write the
464                          * data up to the first fault. Hence ignore any errors
465                          * and just continue. */
466                         (void)ret;
467                         prefaulted = 1;
468                 }
469
470                 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
471                                        user_data, page_do_bit17_swizzling,
472                                        needs_clflush);
473
474                 mutex_lock(&dev->struct_mutex);
475
476 next_page:
477                 mark_page_accessed(page);
478
479                 if (ret)
480                         goto out;
481
482                 remain -= page_length;
483                 user_data += page_length;
484                 offset += page_length;
485         }
486
487 out:
488         i915_gem_object_unpin_pages(obj);
489
490         return ret;
491 }
492
493 /**
494  * Reads data from the object referenced by handle.
495  *
496  * On error, the contents of *data are undefined.
497  */
498 int
499 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
500                      struct drm_file *file)
501 {
502         struct drm_i915_gem_pread *args = data;
503         struct drm_i915_gem_object *obj;
504         int ret = 0;
505
506         if (args->size == 0)
507                 return 0;
508
509         if (!access_ok(VERIFY_WRITE,
510                        to_user_ptr(args->data_ptr),
511                        args->size))
512                 return -EFAULT;
513
514         ret = i915_mutex_lock_interruptible(dev);
515         if (ret)
516                 return ret;
517
518         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
519         if (&obj->base == NULL) {
520                 ret = -ENOENT;
521                 goto unlock;
522         }
523
524         /* Bounds check source.  */
525         if (args->offset > obj->base.size ||
526             args->size > obj->base.size - args->offset) {
527                 ret = -EINVAL;
528                 goto out;
529         }
530
531         /* prime objects have no backing filp to GEM pread/pwrite
532          * pages from.
533          */
534         if (!obj->base.filp) {
535                 ret = -EINVAL;
536                 goto out;
537         }
538
539         trace_i915_gem_object_pread(obj, args->offset, args->size);
540
541         ret = i915_gem_shmem_pread(dev, obj, args, file);
542
543 out:
544         drm_gem_object_unreference(&obj->base);
545 unlock:
546         mutex_unlock(&dev->struct_mutex);
547         return ret;
548 }
549
550 /* This is the fast write path which cannot handle
551  * page faults in the source data
552  */
553
554 static inline int
555 fast_user_write(struct io_mapping *mapping,
556                 loff_t page_base, int page_offset,
557                 char __user *user_data,
558                 int length)
559 {
560         void __iomem *vaddr_atomic;
561         void *vaddr;
562         unsigned long unwritten;
563
564         vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
565         /* We can use the cpu mem copy function because this is X86. */
566         vaddr = (void __force*)vaddr_atomic + page_offset;
567         unwritten = __copy_from_user_inatomic_nocache(vaddr,
568                                                       user_data, length);
569         io_mapping_unmap_atomic(vaddr_atomic);
570         return unwritten;
571 }
572
573 /**
574  * This is the fast pwrite path, where we copy the data directly from the
575  * user into the GTT, uncached.
576  */
577 static int
578 i915_gem_gtt_pwrite_fast(struct drm_device *dev,
579                          struct drm_i915_gem_object *obj,
580                          struct drm_i915_gem_pwrite *args,
581                          struct drm_file *file)
582 {
583         drm_i915_private_t *dev_priv = dev->dev_private;
584         ssize_t remain;
585         loff_t offset, page_base;
586         char __user *user_data;
587         int page_offset, page_length, ret;
588
589         ret = i915_gem_object_pin(obj, 0, true, true);
590         if (ret)
591                 goto out;
592
593         ret = i915_gem_object_set_to_gtt_domain(obj, true);
594         if (ret)
595                 goto out_unpin;
596
597         ret = i915_gem_object_put_fence(obj);
598         if (ret)
599                 goto out_unpin;
600
601         user_data = to_user_ptr(args->data_ptr);
602         remain = args->size;
603
604         offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
605
606         while (remain > 0) {
607                 /* Operation in this page
608                  *
609                  * page_base = page offset within aperture
610                  * page_offset = offset within page
611                  * page_length = bytes to copy for this page
612                  */
613                 page_base = offset & PAGE_MASK;
614                 page_offset = offset_in_page(offset);
615                 page_length = remain;
616                 if ((page_offset + remain) > PAGE_SIZE)
617                         page_length = PAGE_SIZE - page_offset;
618
619                 /* If we get a fault while copying data, then (presumably) our
620                  * source page isn't available.  Return the error and we'll
621                  * retry in the slow path.
622                  */
623                 if (fast_user_write(dev_priv->gtt.mappable, page_base,
624                                     page_offset, user_data, page_length)) {
625                         ret = -EFAULT;
626                         goto out_unpin;
627                 }
628
629                 remain -= page_length;
630                 user_data += page_length;
631                 offset += page_length;
632         }
633
634 out_unpin:
635         i915_gem_object_unpin(obj);
636 out:
637         return ret;
638 }
639
640 /* Per-page copy function for the shmem pwrite fastpath.
641  * Flushes invalid cachelines before writing to the target if
642  * needs_clflush_before is set and flushes out any written cachelines after
643  * writing if needs_clflush is set. */
644 static int
645 shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
646                   char __user *user_data,
647                   bool page_do_bit17_swizzling,
648                   bool needs_clflush_before,
649                   bool needs_clflush_after)
650 {
651         char *vaddr;
652         int ret;
653
654         if (unlikely(page_do_bit17_swizzling))
655                 return -EINVAL;
656
657         vaddr = kmap_atomic(page);
658         if (needs_clflush_before)
659                 drm_clflush_virt_range(vaddr + shmem_page_offset,
660                                        page_length);
661         ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset,
662                                                 user_data,
663                                                 page_length);
664         if (needs_clflush_after)
665                 drm_clflush_virt_range(vaddr + shmem_page_offset,
666                                        page_length);
667         kunmap_atomic(vaddr);
668
669         return ret ? -EFAULT : 0;
670 }
671
672 /* Only difference to the fast-path function is that this can handle bit17
673  * and uses non-atomic copy and kmap functions. */
674 static int
675 shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
676                   char __user *user_data,
677                   bool page_do_bit17_swizzling,
678                   bool needs_clflush_before,
679                   bool needs_clflush_after)
680 {
681         char *vaddr;
682         int ret;
683
684         vaddr = kmap(page);
685         if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
686                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
687                                              page_length,
688                                              page_do_bit17_swizzling);
689         if (page_do_bit17_swizzling)
690                 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
691                                                 user_data,
692                                                 page_length);
693         else
694                 ret = __copy_from_user(vaddr + shmem_page_offset,
695                                        user_data,
696                                        page_length);
697         if (needs_clflush_after)
698                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
699                                              page_length,
700                                              page_do_bit17_swizzling);
701         kunmap(page);
702
703         return ret ? -EFAULT : 0;
704 }
705
706 static int
707 i915_gem_shmem_pwrite(struct drm_device *dev,
708                       struct drm_i915_gem_object *obj,
709                       struct drm_i915_gem_pwrite *args,
710                       struct drm_file *file)
711 {
712         ssize_t remain;
713         loff_t offset;
714         char __user *user_data;
715         int shmem_page_offset, page_length, ret = 0;
716         int obj_do_bit17_swizzling, page_do_bit17_swizzling;
717         int hit_slowpath = 0;
718         int needs_clflush_after = 0;
719         int needs_clflush_before = 0;
720         struct sg_page_iter sg_iter;
721
722         user_data = to_user_ptr(args->data_ptr);
723         remain = args->size;
724
725         obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
726
727         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
728                 /* If we're not in the cpu write domain, set ourself into the gtt
729                  * write domain and manually flush cachelines (if required). This
730                  * optimizes for the case when the gpu will use the data
731                  * right away and we therefore have to clflush anyway. */
732                 if (obj->cache_level == I915_CACHE_NONE)
733                         needs_clflush_after = 1;
734                 if (i915_gem_obj_ggtt_bound(obj)) {
735                         ret = i915_gem_object_set_to_gtt_domain(obj, true);
736                         if (ret)
737                                 return ret;
738                 }
739         }
740         /* Same trick applies for invalidate partially written cachelines before
741          * writing.  */
742         if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)
743             && obj->cache_level == I915_CACHE_NONE)
744                 needs_clflush_before = 1;
745
746         ret = i915_gem_object_get_pages(obj);
747         if (ret)
748                 return ret;
749
750         i915_gem_object_pin_pages(obj);
751
752         offset = args->offset;
753         obj->dirty = 1;
754
755         for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
756                          offset >> PAGE_SHIFT) {
757                 struct page *page = sg_page_iter_page(&sg_iter);
758                 int partial_cacheline_write;
759
760                 if (remain <= 0)
761                         break;
762
763                 /* Operation in this page
764                  *
765                  * shmem_page_offset = offset within page in shmem file
766                  * page_length = bytes to copy for this page
767                  */
768                 shmem_page_offset = offset_in_page(offset);
769
770                 page_length = remain;
771                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
772                         page_length = PAGE_SIZE - shmem_page_offset;
773
774                 /* If we don't overwrite a cacheline completely we need to be
775                  * careful to have up-to-date data by first clflushing. Don't
776                  * overcomplicate things and flush the entire patch. */
777                 partial_cacheline_write = needs_clflush_before &&
778                         ((shmem_page_offset | page_length)
779                                 & (boot_cpu_data.x86_clflush_size - 1));
780
781                 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
782                         (page_to_phys(page) & (1 << 17)) != 0;
783
784                 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
785                                         user_data, page_do_bit17_swizzling,
786                                         partial_cacheline_write,
787                                         needs_clflush_after);
788                 if (ret == 0)
789                         goto next_page;
790
791                 hit_slowpath = 1;
792                 mutex_unlock(&dev->struct_mutex);
793                 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
794                                         user_data, page_do_bit17_swizzling,
795                                         partial_cacheline_write,
796                                         needs_clflush_after);
797
798                 mutex_lock(&dev->struct_mutex);
799
800 next_page:
801                 set_page_dirty(page);
802                 mark_page_accessed(page);
803
804                 if (ret)
805                         goto out;
806
807                 remain -= page_length;
808                 user_data += page_length;
809                 offset += page_length;
810         }
811
812 out:
813         i915_gem_object_unpin_pages(obj);
814
815         if (hit_slowpath) {
816                 /*
817                  * Fixup: Flush cpu caches in case we didn't flush the dirty
818                  * cachelines in-line while writing and the object moved
819                  * out of the cpu write domain while we've dropped the lock.
820                  */
821                 if (!needs_clflush_after &&
822                     obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
823                         i915_gem_clflush_object(obj);
824                         i915_gem_chipset_flush(dev);
825                 }
826         }
827
828         if (needs_clflush_after)
829                 i915_gem_chipset_flush(dev);
830
831         return ret;
832 }
833
834 /**
835  * Writes data to the object referenced by handle.
836  *
837  * On error, the contents of the buffer that were to be modified are undefined.
838  */
839 int
840 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
841                       struct drm_file *file)
842 {
843         struct drm_i915_gem_pwrite *args = data;
844         struct drm_i915_gem_object *obj;
845         int ret;
846
847         if (args->size == 0)
848                 return 0;
849
850         if (!access_ok(VERIFY_READ,
851                        to_user_ptr(args->data_ptr),
852                        args->size))
853                 return -EFAULT;
854
855         if (likely(!i915_prefault_disable)) {
856                 ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
857                                                    args->size);
858                 if (ret)
859                         return -EFAULT;
860         }
861
862         ret = i915_mutex_lock_interruptible(dev);
863         if (ret)
864                 return ret;
865
866         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
867         if (&obj->base == NULL) {
868                 ret = -ENOENT;
869                 goto unlock;
870         }
871
872         /* Bounds check destination. */
873         if (args->offset > obj->base.size ||
874             args->size > obj->base.size - args->offset) {
875                 ret = -EINVAL;
876                 goto out;
877         }
878
879         /* prime objects have no backing filp to GEM pread/pwrite
880          * pages from.
881          */
882         if (!obj->base.filp) {
883                 ret = -EINVAL;
884                 goto out;
885         }
886
887         trace_i915_gem_object_pwrite(obj, args->offset, args->size);
888
889         ret = -EFAULT;
890         /* We can only do the GTT pwrite on untiled buffers, as otherwise
891          * it would end up going through the fenced access, and we'll get
892          * different detiling behavior between reading and writing.
893          * pread/pwrite currently are reading and writing from the CPU
894          * perspective, requiring manual detiling by the client.
895          */
896         if (obj->phys_obj) {
897                 ret = i915_gem_phys_pwrite(dev, obj, args, file);
898                 goto out;
899         }
900
901         if (obj->cache_level == I915_CACHE_NONE &&
902             obj->tiling_mode == I915_TILING_NONE &&
903             obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
904                 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
905                 /* Note that the gtt paths might fail with non-page-backed user
906                  * pointers (e.g. gtt mappings when moving data between
907                  * textures). Fallback to the shmem path in that case. */
908         }
909
910         if (ret == -EFAULT || ret == -ENOSPC)
911                 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
912
913 out:
914         drm_gem_object_unreference(&obj->base);
915 unlock:
916         mutex_unlock(&dev->struct_mutex);
917         return ret;
918 }
919
920 int
921 i915_gem_check_wedge(struct i915_gpu_error *error,
922                      bool interruptible)
923 {
924         if (i915_reset_in_progress(error)) {
925                 /* Non-interruptible callers can't handle -EAGAIN, hence return
926                  * -EIO unconditionally for these. */
927                 if (!interruptible)
928                         return -EIO;
929
930                 /* Recovery complete, but the reset failed ... */
931                 if (i915_terminally_wedged(error))
932                         return -EIO;
933
934                 return -EAGAIN;
935         }
936
937         return 0;
938 }
939
940 /*
941  * Compare seqno against outstanding lazy request. Emit a request if they are
942  * equal.
943  */
944 static int
945 i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
946 {
947         int ret;
948
949         BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
950
951         ret = 0;
952         if (seqno == ring->outstanding_lazy_request)
953                 ret = i915_add_request(ring, NULL);
954
955         return ret;
956 }
957
958 /**
959  * __wait_seqno - wait until execution of seqno has finished
960  * @ring: the ring expected to report seqno
961  * @seqno: duh!
962  * @reset_counter: reset sequence associated with the given seqno
963  * @interruptible: do an interruptible wait (normally yes)
964  * @timeout: in - how long to wait (NULL forever); out - how much time remaining
965  *
966  * Note: It is of utmost importance that the passed in seqno and reset_counter
967  * values have been read by the caller in an smp safe manner. Where read-side
968  * locks are involved, it is sufficient to read the reset_counter before
969  * unlocking the lock that protects the seqno. For lockless tricks, the
970  * reset_counter _must_ be read before, and an appropriate smp_rmb must be
971  * inserted.
972  *
973  * Returns 0 if the seqno was found within the alloted time. Else returns the
974  * errno with remaining time filled in timeout argument.
975  */
976 static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
977                         unsigned reset_counter,
978                         bool interruptible, struct timespec *timeout)
979 {
980         drm_i915_private_t *dev_priv = ring->dev->dev_private;
981         struct timespec before, now, wait_time={1,0};
982         unsigned long timeout_jiffies;
983         long end;
984         bool wait_forever = true;
985         int ret;
986
987         if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
988                 return 0;
989
990         trace_i915_gem_request_wait_begin(ring, seqno);
991
992         if (timeout != NULL) {
993                 wait_time = *timeout;
994                 wait_forever = false;
995         }
996
997         timeout_jiffies = timespec_to_jiffies_timeout(&wait_time);
998
999         if (WARN_ON(!ring->irq_get(ring)))
1000                 return -ENODEV;
1001
1002         /* Record current time in case interrupted by signal, or wedged * */
1003         getrawmonotonic(&before);
1004
1005 #define EXIT_COND \
1006         (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
1007          i915_reset_in_progress(&dev_priv->gpu_error) || \
1008          reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
1009         do {
1010                 if (interruptible)
1011                         end = wait_event_interruptible_timeout(ring->irq_queue,
1012                                                                EXIT_COND,
1013                                                                timeout_jiffies);
1014                 else
1015                         end = wait_event_timeout(ring->irq_queue, EXIT_COND,
1016                                                  timeout_jiffies);
1017
1018                 /* We need to check whether any gpu reset happened in between
1019                  * the caller grabbing the seqno and now ... */
1020                 if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
1021                         end = -EAGAIN;
1022
1023                 /* ... but upgrade the -EGAIN to an -EIO if the gpu is truely
1024                  * gone. */
1025                 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1026                 if (ret)
1027                         end = ret;
1028         } while (end == 0 && wait_forever);
1029
1030         getrawmonotonic(&now);
1031
1032         ring->irq_put(ring);
1033         trace_i915_gem_request_wait_end(ring, seqno);
1034 #undef EXIT_COND
1035
1036         if (timeout) {
1037                 struct timespec sleep_time = timespec_sub(now, before);
1038                 *timeout = timespec_sub(*timeout, sleep_time);
1039                 if (!timespec_valid(timeout)) /* i.e. negative time remains */
1040                         set_normalized_timespec(timeout, 0, 0);
1041         }
1042
1043         switch (end) {
1044         case -EIO:
1045         case -EAGAIN: /* Wedged */
1046         case -ERESTARTSYS: /* Signal */
1047                 return (int)end;
1048         case 0: /* Timeout */
1049                 return -ETIME;
1050         default: /* Completed */
1051                 WARN_ON(end < 0); /* We're not aware of other errors */
1052                 return 0;
1053         }
1054 }
1055
1056 /**
1057  * Waits for a sequence number to be signaled, and cleans up the
1058  * request and object lists appropriately for that event.
1059  */
1060 int
1061 i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
1062 {
1063         struct drm_device *dev = ring->dev;
1064         struct drm_i915_private *dev_priv = dev->dev_private;
1065         bool interruptible = dev_priv->mm.interruptible;
1066         int ret;
1067
1068         BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1069         BUG_ON(seqno == 0);
1070
1071         ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1072         if (ret)
1073                 return ret;
1074
1075         ret = i915_gem_check_olr(ring, seqno);
1076         if (ret)
1077                 return ret;
1078
1079         return __wait_seqno(ring, seqno,
1080                             atomic_read(&dev_priv->gpu_error.reset_counter),
1081                             interruptible, NULL);
1082 }
1083
1084 static int
1085 i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
1086                                      struct intel_ring_buffer *ring)
1087 {
1088         i915_gem_retire_requests_ring(ring);
1089
1090         /* Manually manage the write flush as we may have not yet
1091          * retired the buffer.
1092          *
1093          * Note that the last_write_seqno is always the earlier of
1094          * the two (read/write) seqno, so if we haved successfully waited,
1095          * we know we have passed the last write.
1096          */
1097         obj->last_write_seqno = 0;
1098         obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1099
1100         return 0;
1101 }
1102
1103 /**
1104  * Ensures that all rendering to the object has completed and the object is
1105  * safe to unbind from the GTT or access from the CPU.
1106  */
1107 static __must_check int
1108 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1109                                bool readonly)
1110 {
1111         struct intel_ring_buffer *ring = obj->ring;
1112         u32 seqno;
1113         int ret;
1114
1115         seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1116         if (seqno == 0)
1117                 return 0;
1118
1119         ret = i915_wait_seqno(ring, seqno);
1120         if (ret)
1121                 return ret;
1122
1123         return i915_gem_object_wait_rendering__tail(obj, ring);
1124 }
1125
1126 /* A nonblocking variant of the above wait. This is a highly dangerous routine
1127  * as the object state may change during this call.
1128  */
1129 static __must_check int
1130 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1131                                             bool readonly)
1132 {
1133         struct drm_device *dev = obj->base.dev;
1134         struct drm_i915_private *dev_priv = dev->dev_private;
1135         struct intel_ring_buffer *ring = obj->ring;
1136         unsigned reset_counter;
1137         u32 seqno;
1138         int ret;
1139
1140         BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1141         BUG_ON(!dev_priv->mm.interruptible);
1142
1143         seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1144         if (seqno == 0)
1145                 return 0;
1146
1147         ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
1148         if (ret)
1149                 return ret;
1150
1151         ret = i915_gem_check_olr(ring, seqno);
1152         if (ret)
1153                 return ret;
1154
1155         reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
1156         mutex_unlock(&dev->struct_mutex);
1157         ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
1158         mutex_lock(&dev->struct_mutex);
1159         if (ret)
1160                 return ret;
1161
1162         return i915_gem_object_wait_rendering__tail(obj, ring);
1163 }
1164
1165 /**
1166  * Called when user space prepares to use an object with the CPU, either
1167  * through the mmap ioctl's mapping or a GTT mapping.
1168  */
1169 int
1170 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1171                           struct drm_file *file)
1172 {
1173         struct drm_i915_gem_set_domain *args = data;
1174         struct drm_i915_gem_object *obj;
1175         uint32_t read_domains = args->read_domains;
1176         uint32_t write_domain = args->write_domain;
1177         int ret;
1178
1179         /* Only handle setting domains to types used by the CPU. */
1180         if (write_domain & I915_GEM_GPU_DOMAINS)
1181                 return -EINVAL;
1182
1183         if (read_domains & I915_GEM_GPU_DOMAINS)
1184                 return -EINVAL;
1185
1186         /* Having something in the write domain implies it's in the read
1187          * domain, and only that read domain.  Enforce that in the request.
1188          */
1189         if (write_domain != 0 && read_domains != write_domain)
1190                 return -EINVAL;
1191
1192         ret = i915_mutex_lock_interruptible(dev);
1193         if (ret)
1194                 return ret;
1195
1196         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1197         if (&obj->base == NULL) {
1198                 ret = -ENOENT;
1199                 goto unlock;
1200         }
1201
1202         /* Try to flush the object off the GPU without holding the lock.
1203          * We will repeat the flush holding the lock in the normal manner
1204          * to catch cases where we are gazumped.
1205          */
1206         ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain);
1207         if (ret)
1208                 goto unref;
1209
1210         if (read_domains & I915_GEM_DOMAIN_GTT) {
1211                 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1212
1213                 /* Silently promote "you're not bound, there was nothing to do"
1214                  * to success, since the client was just asking us to
1215                  * make sure everything was done.
1216                  */
1217                 if (ret == -EINVAL)
1218                         ret = 0;
1219         } else {
1220                 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1221         }
1222
1223 unref:
1224         drm_gem_object_unreference(&obj->base);
1225 unlock:
1226         mutex_unlock(&dev->struct_mutex);
1227         return ret;
1228 }
1229
1230 /**
1231  * Called when user space has done writes to this buffer
1232  */
1233 int
1234 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1235                          struct drm_file *file)
1236 {
1237         struct drm_i915_gem_sw_finish *args = data;
1238         struct drm_i915_gem_object *obj;
1239         int ret = 0;
1240
1241         ret = i915_mutex_lock_interruptible(dev);
1242         if (ret)
1243                 return ret;
1244
1245         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1246         if (&obj->base == NULL) {
1247                 ret = -ENOENT;
1248                 goto unlock;
1249         }
1250
1251         /* Pinned buffers may be scanout, so flush the cache */
1252         if (obj->pin_count)
1253                 i915_gem_object_flush_cpu_write_domain(obj);
1254
1255         drm_gem_object_unreference(&obj->base);
1256 unlock:
1257         mutex_unlock(&dev->struct_mutex);
1258         return ret;
1259 }
1260
1261 /**
1262  * Maps the contents of an object, returning the address it is mapped
1263  * into.
1264  *
1265  * While the mapping holds a reference on the contents of the object, it doesn't
1266  * imply a ref on the object itself.
1267  */
1268 int
1269 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1270                     struct drm_file *file)
1271 {
1272         struct drm_i915_gem_mmap *args = data;
1273         struct drm_gem_object *obj;
1274         unsigned long addr;
1275
1276         obj = drm_gem_object_lookup(dev, file, args->handle);
1277         if (obj == NULL)
1278                 return -ENOENT;
1279
1280         /* prime objects have no backing filp to GEM mmap
1281          * pages from.
1282          */
1283         if (!obj->filp) {
1284                 drm_gem_object_unreference_unlocked(obj);
1285                 return -EINVAL;
1286         }
1287
1288         addr = vm_mmap(obj->filp, 0, args->size,
1289                        PROT_READ | PROT_WRITE, MAP_SHARED,
1290                        args->offset);
1291         drm_gem_object_unreference_unlocked(obj);
1292         if (IS_ERR((void *)addr))
1293                 return addr;
1294
1295         args->addr_ptr = (uint64_t) addr;
1296
1297         return 0;
1298 }
1299
1300 /**
1301  * i915_gem_fault - fault a page into the GTT
1302  * vma: VMA in question
1303  * vmf: fault info
1304  *
1305  * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1306  * from userspace.  The fault handler takes care of binding the object to
1307  * the GTT (if needed), allocating and programming a fence register (again,
1308  * only if needed based on whether the old reg is still valid or the object
1309  * is tiled) and inserting a new PTE into the faulting process.
1310  *
1311  * Note that the faulting process may involve evicting existing objects
1312  * from the GTT and/or fence registers to make room.  So performance may
1313  * suffer if the GTT working set is large or there are few fence registers
1314  * left.
1315  */
1316 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1317 {
1318         struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1319         struct drm_device *dev = obj->base.dev;
1320         drm_i915_private_t *dev_priv = dev->dev_private;
1321         pgoff_t page_offset;
1322         unsigned long pfn;
1323         int ret = 0;
1324         bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1325
1326         /* We don't use vmf->pgoff since that has the fake offset */
1327         page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1328                 PAGE_SHIFT;
1329
1330         ret = i915_mutex_lock_interruptible(dev);
1331         if (ret)
1332                 goto out;
1333
1334         trace_i915_gem_object_fault(obj, page_offset, true, write);
1335
1336         /* Access to snoopable pages through the GTT is incoherent. */
1337         if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1338                 ret = -EINVAL;
1339                 goto unlock;
1340         }
1341
1342         /* Now bind it into the GTT if needed */
1343         ret = i915_gem_object_pin(obj, 0, true, false);
1344         if (ret)
1345                 goto unlock;
1346
1347         ret = i915_gem_object_set_to_gtt_domain(obj, write);
1348         if (ret)
1349                 goto unpin;
1350
1351         ret = i915_gem_object_get_fence(obj);
1352         if (ret)
1353                 goto unpin;
1354
1355         obj->fault_mappable = true;
1356
1357         pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
1358         pfn >>= PAGE_SHIFT;
1359         pfn += page_offset;
1360
1361         /* Finally, remap it using the new GTT offset */
1362         ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1363 unpin:
1364         i915_gem_object_unpin(obj);
1365 unlock:
1366         mutex_unlock(&dev->struct_mutex);
1367 out:
1368         switch (ret) {
1369         case -EIO:
1370                 /* If this -EIO is due to a gpu hang, give the reset code a
1371                  * chance to clean up the mess. Otherwise return the proper
1372                  * SIGBUS. */
1373                 if (i915_terminally_wedged(&dev_priv->gpu_error))
1374                         return VM_FAULT_SIGBUS;
1375         case -EAGAIN:
1376                 /* Give the error handler a chance to run and move the
1377                  * objects off the GPU active list. Next time we service the
1378                  * fault, we should be able to transition the page into the
1379                  * GTT without touching the GPU (and so avoid further
1380                  * EIO/EGAIN). If the GPU is wedged, then there is no issue
1381                  * with coherency, just lost writes.
1382                  */
1383                 set_need_resched();
1384         case 0:
1385         case -ERESTARTSYS:
1386         case -EINTR:
1387         case -EBUSY:
1388                 /*
1389                  * EBUSY is ok: this just means that another thread
1390                  * already did the job.
1391                  */
1392                 return VM_FAULT_NOPAGE;
1393         case -ENOMEM:
1394                 return VM_FAULT_OOM;
1395         case -ENOSPC:
1396                 return VM_FAULT_SIGBUS;
1397         default:
1398                 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
1399                 return VM_FAULT_SIGBUS;
1400         }
1401 }
1402
1403 /**
1404  * i915_gem_release_mmap - remove physical page mappings
1405  * @obj: obj in question
1406  *
1407  * Preserve the reservation of the mmapping with the DRM core code, but
1408  * relinquish ownership of the pages back to the system.
1409  *
1410  * It is vital that we remove the page mapping if we have mapped a tiled
1411  * object through the GTT and then lose the fence register due to
1412  * resource pressure. Similarly if the object has been moved out of the
1413  * aperture, than pages mapped into userspace must be revoked. Removing the
1414  * mapping will then trigger a page fault on the next user access, allowing
1415  * fixup by i915_gem_fault().
1416  */
1417 void
1418 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1419 {
1420         if (!obj->fault_mappable)
1421                 return;
1422
1423         drm_vma_node_unmap(&obj->base.vma_node, obj->base.dev->dev_mapping);
1424         obj->fault_mappable = false;
1425 }
1426
1427 uint32_t
1428 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1429 {
1430         uint32_t gtt_size;
1431
1432         if (INTEL_INFO(dev)->gen >= 4 ||
1433             tiling_mode == I915_TILING_NONE)
1434                 return size;
1435
1436         /* Previous chips need a power-of-two fence region when tiling */
1437         if (INTEL_INFO(dev)->gen == 3)
1438                 gtt_size = 1024*1024;
1439         else
1440                 gtt_size = 512*1024;
1441
1442         while (gtt_size < size)
1443                 gtt_size <<= 1;
1444
1445         return gtt_size;
1446 }
1447
1448 /**
1449  * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1450  * @obj: object to check
1451  *
1452  * Return the required GTT alignment for an object, taking into account
1453  * potential fence register mapping.
1454  */
1455 uint32_t
1456 i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
1457                            int tiling_mode, bool fenced)
1458 {
1459         /*
1460          * Minimum alignment is 4k (GTT page size), but might be greater
1461          * if a fence register is needed for the object.
1462          */
1463         if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
1464             tiling_mode == I915_TILING_NONE)
1465                 return 4096;
1466
1467         /*
1468          * Previous chips need to be aligned to the size of the smallest
1469          * fence register that can contain the object.
1470          */
1471         return i915_gem_get_gtt_size(dev, size, tiling_mode);
1472 }
1473
1474 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1475 {
1476         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1477         int ret;
1478
1479         if (drm_vma_node_has_offset(&obj->base.vma_node))
1480                 return 0;
1481
1482         dev_priv->mm.shrinker_no_lock_stealing = true;
1483
1484         ret = drm_gem_create_mmap_offset(&obj->base);
1485         if (ret != -ENOSPC)
1486                 goto out;
1487
1488         /* Badly fragmented mmap space? The only way we can recover
1489          * space is by destroying unwanted objects. We can't randomly release
1490          * mmap_offsets as userspace expects them to be persistent for the
1491          * lifetime of the objects. The closest we can is to release the
1492          * offsets on purgeable objects by truncating it and marking it purged,
1493          * which prevents userspace from ever using that object again.
1494          */
1495         i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
1496         ret = drm_gem_create_mmap_offset(&obj->base);
1497         if (ret != -ENOSPC)
1498                 goto out;
1499
1500         i915_gem_shrink_all(dev_priv);
1501         ret = drm_gem_create_mmap_offset(&obj->base);
1502 out:
1503         dev_priv->mm.shrinker_no_lock_stealing = false;
1504
1505         return ret;
1506 }
1507
1508 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1509 {
1510         drm_gem_free_mmap_offset(&obj->base);
1511 }
1512
1513 int
1514 i915_gem_mmap_gtt(struct drm_file *file,
1515                   struct drm_device *dev,
1516                   uint32_t handle,
1517                   uint64_t *offset)
1518 {
1519         struct drm_i915_private *dev_priv = dev->dev_private;
1520         struct drm_i915_gem_object *obj;
1521         int ret;
1522
1523         ret = i915_mutex_lock_interruptible(dev);
1524         if (ret)
1525                 return ret;
1526
1527         obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
1528         if (&obj->base == NULL) {
1529                 ret = -ENOENT;
1530                 goto unlock;
1531         }
1532
1533         if (obj->base.size > dev_priv->gtt.mappable_end) {
1534                 ret = -E2BIG;
1535                 goto out;
1536         }
1537
1538         if (obj->madv != I915_MADV_WILLNEED) {
1539                 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1540                 ret = -EINVAL;
1541                 goto out;
1542         }
1543
1544         ret = i915_gem_object_create_mmap_offset(obj);
1545         if (ret)
1546                 goto out;
1547
1548         *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
1549
1550 out:
1551         drm_gem_object_unreference(&obj->base);
1552 unlock:
1553         mutex_unlock(&dev->struct_mutex);
1554         return ret;
1555 }
1556
1557 /**
1558  * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1559  * @dev: DRM device
1560  * @data: GTT mapping ioctl data
1561  * @file: GEM object info
1562  *
1563  * Simply returns the fake offset to userspace so it can mmap it.
1564  * The mmap call will end up in drm_gem_mmap(), which will set things
1565  * up so we can get faults in the handler above.
1566  *
1567  * The fault handler will take care of binding the object into the GTT
1568  * (since it may have been evicted to make room for something), allocating
1569  * a fence register, and mapping the appropriate aperture address into
1570  * userspace.
1571  */
1572 int
1573 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1574                         struct drm_file *file)
1575 {
1576         struct drm_i915_gem_mmap_gtt *args = data;
1577
1578         return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1579 }
1580
1581 /* Immediately discard the backing storage */
1582 static void
1583 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1584 {
1585         struct inode *inode;
1586
1587         i915_gem_object_free_mmap_offset(obj);
1588
1589         if (obj->base.filp == NULL)
1590                 return;
1591
1592         /* Our goal here is to return as much of the memory as
1593          * is possible back to the system as we are called from OOM.
1594          * To do this we must instruct the shmfs to drop all of its
1595          * backing pages, *now*.
1596          */
1597         inode = file_inode(obj->base.filp);
1598         shmem_truncate_range(inode, 0, (loff_t)-1);
1599
1600         obj->madv = __I915_MADV_PURGED;
1601 }
1602
1603 static inline int
1604 i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1605 {
1606         return obj->madv == I915_MADV_DONTNEED;
1607 }
1608
1609 static void
1610 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1611 {
1612         struct sg_page_iter sg_iter;
1613         int ret;
1614
1615         BUG_ON(obj->madv == __I915_MADV_PURGED);
1616
1617         ret = i915_gem_object_set_to_cpu_domain(obj, true);
1618         if (ret) {
1619                 /* In the event of a disaster, abandon all caches and
1620                  * hope for the best.
1621                  */
1622                 WARN_ON(ret != -EIO);
1623                 i915_gem_clflush_object(obj);
1624                 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1625         }
1626
1627         if (i915_gem_object_needs_bit17_swizzle(obj))
1628                 i915_gem_object_save_bit_17_swizzle(obj);
1629
1630         if (obj->madv == I915_MADV_DONTNEED)
1631                 obj->dirty = 0;
1632
1633         for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
1634                 struct page *page = sg_page_iter_page(&sg_iter);
1635
1636                 if (obj->dirty)
1637                         set_page_dirty(page);
1638
1639                 if (obj->madv == I915_MADV_WILLNEED)
1640                         mark_page_accessed(page);
1641
1642                 page_cache_release(page);
1643         }
1644         obj->dirty = 0;
1645
1646         sg_free_table(obj->pages);
1647         kfree(obj->pages);
1648 }
1649
1650 int
1651 i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1652 {
1653         const struct drm_i915_gem_object_ops *ops = obj->ops;
1654
1655         if (obj->pages == NULL)
1656                 return 0;
1657
1658         BUG_ON(i915_gem_obj_ggtt_bound(obj));
1659
1660         if (obj->pages_pin_count)
1661                 return -EBUSY;
1662
1663         /* ->put_pages might need to allocate memory for the bit17 swizzle
1664          * array, hence protect them from being reaped by removing them from gtt
1665          * lists early. */
1666         list_del(&obj->global_list);
1667
1668         ops->put_pages(obj);
1669         obj->pages = NULL;
1670
1671         if (i915_gem_object_is_purgeable(obj))
1672                 i915_gem_object_truncate(obj);
1673
1674         return 0;
1675 }
1676
1677 static long
1678 __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
1679                   bool purgeable_only)
1680 {
1681         struct drm_i915_gem_object *obj, *next;
1682         struct i915_address_space *vm = &dev_priv->gtt.base;
1683         long count = 0;
1684
1685         list_for_each_entry_safe(obj, next,
1686                                  &dev_priv->mm.unbound_list,
1687                                  global_list) {
1688                 if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
1689                     i915_gem_object_put_pages(obj) == 0) {
1690                         count += obj->base.size >> PAGE_SHIFT;
1691                         if (count >= target)
1692                                 return count;
1693                 }
1694         }
1695
1696         list_for_each_entry_safe(obj, next, &vm->inactive_list, mm_list) {
1697                 if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
1698                     i915_gem_object_unbind(obj) == 0 &&
1699                     i915_gem_object_put_pages(obj) == 0) {
1700                         count += obj->base.size >> PAGE_SHIFT;
1701                         if (count >= target)
1702                                 return count;
1703                 }
1704         }
1705
1706         return count;
1707 }
1708
1709 static long
1710 i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1711 {
1712         return __i915_gem_shrink(dev_priv, target, true);
1713 }
1714
1715 static void
1716 i915_gem_shrink_all(struct drm_i915_private *dev_priv)
1717 {
1718         struct drm_i915_gem_object *obj, *next;
1719
1720         i915_gem_evict_everything(dev_priv->dev);
1721
1722         list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
1723                                  global_list)
1724                 i915_gem_object_put_pages(obj);
1725 }
1726
1727 static int
1728 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
1729 {
1730         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1731         int page_count, i;
1732         struct address_space *mapping;
1733         struct sg_table *st;
1734         struct scatterlist *sg;
1735         struct sg_page_iter sg_iter;
1736         struct page *page;
1737         unsigned long last_pfn = 0;     /* suppress gcc warning */
1738         gfp_t gfp;
1739
1740         /* Assert that the object is not currently in any GPU domain. As it
1741          * wasn't in the GTT, there shouldn't be any way it could have been in
1742          * a GPU cache
1743          */
1744         BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
1745         BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
1746
1747         st = kmalloc(sizeof(*st), GFP_KERNEL);
1748         if (st == NULL)
1749                 return -ENOMEM;
1750
1751         page_count = obj->base.size / PAGE_SIZE;
1752         if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
1753                 sg_free_table(st);
1754                 kfree(st);
1755                 return -ENOMEM;
1756         }
1757
1758         /* Get the list of pages out of our struct file.  They'll be pinned
1759          * at this point until we release them.
1760          *
1761          * Fail silently without starting the shrinker
1762          */
1763         mapping = file_inode(obj->base.filp)->i_mapping;
1764         gfp = mapping_gfp_mask(mapping);
1765         gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
1766         gfp &= ~(__GFP_IO | __GFP_WAIT);
1767         sg = st->sgl;
1768         st->nents = 0;
1769         for (i = 0; i < page_count; i++) {
1770                 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1771                 if (IS_ERR(page)) {
1772                         i915_gem_purge(dev_priv, page_count);
1773                         page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1774                 }
1775                 if (IS_ERR(page)) {
1776                         /* We've tried hard to allocate the memory by reaping
1777                          * our own buffer, now let the real VM do its job and
1778                          * go down in flames if truly OOM.
1779                          */
1780                         gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD);
1781                         gfp |= __GFP_IO | __GFP_WAIT;
1782
1783                         i915_gem_shrink_all(dev_priv);
1784                         page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1785                         if (IS_ERR(page))
1786                                 goto err_pages;
1787
1788                         gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
1789                         gfp &= ~(__GFP_IO | __GFP_WAIT);
1790                 }
1791 #ifdef CONFIG_SWIOTLB
1792                 if (swiotlb_nr_tbl()) {
1793                         st->nents++;
1794                         sg_set_page(sg, page, PAGE_SIZE, 0);
1795                         sg = sg_next(sg);
1796                         continue;
1797                 }
1798 #endif
1799                 if (!i || page_to_pfn(page) != last_pfn + 1) {
1800                         if (i)
1801                                 sg = sg_next(sg);
1802                         st->nents++;
1803                         sg_set_page(sg, page, PAGE_SIZE, 0);
1804                 } else {
1805                         sg->length += PAGE_SIZE;
1806                 }
1807                 last_pfn = page_to_pfn(page);
1808         }
1809 #ifdef CONFIG_SWIOTLB
1810         if (!swiotlb_nr_tbl())
1811 #endif
1812                 sg_mark_end(sg);
1813         obj->pages = st;
1814
1815         if (i915_gem_object_needs_bit17_swizzle(obj))
1816                 i915_gem_object_do_bit_17_swizzle(obj);
1817
1818         return 0;
1819
1820 err_pages:
1821         sg_mark_end(sg);
1822         for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
1823                 page_cache_release(sg_page_iter_page(&sg_iter));
1824         sg_free_table(st);
1825         kfree(st);
1826         return PTR_ERR(page);
1827 }
1828
1829 /* Ensure that the associated pages are gathered from the backing storage
1830  * and pinned into our object. i915_gem_object_get_pages() may be called
1831  * multiple times before they are released by a single call to
1832  * i915_gem_object_put_pages() - once the pages are no longer referenced
1833  * either as a result of memory pressure (reaping pages under the shrinker)
1834  * or as the object is itself released.
1835  */
1836 int
1837 i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
1838 {
1839         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1840         const struct drm_i915_gem_object_ops *ops = obj->ops;
1841         int ret;
1842
1843         if (obj->pages)
1844                 return 0;
1845
1846         if (obj->madv != I915_MADV_WILLNEED) {
1847                 DRM_ERROR("Attempting to obtain a purgeable object\n");
1848                 return -EINVAL;
1849         }
1850
1851         BUG_ON(obj->pages_pin_count);
1852
1853         ret = ops->get_pages(obj);
1854         if (ret)
1855                 return ret;
1856
1857         list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
1858         return 0;
1859 }
1860
1861 void
1862 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1863                                struct intel_ring_buffer *ring)
1864 {
1865         struct drm_device *dev = obj->base.dev;
1866         struct drm_i915_private *dev_priv = dev->dev_private;
1867         struct i915_address_space *vm = &dev_priv->gtt.base;
1868         u32 seqno = intel_ring_get_seqno(ring);
1869
1870         BUG_ON(ring == NULL);
1871         if (obj->ring != ring && obj->last_write_seqno) {
1872                 /* Keep the seqno relative to the current ring */
1873                 obj->last_write_seqno = seqno;
1874         }
1875         obj->ring = ring;
1876
1877         /* Add a reference if we're newly entering the active list. */
1878         if (!obj->active) {
1879                 drm_gem_object_reference(&obj->base);
1880                 obj->active = 1;
1881         }
1882
1883         /* Move from whatever list we were on to the tail of execution. */
1884         list_move_tail(&obj->mm_list, &vm->active_list);
1885         list_move_tail(&obj->ring_list, &ring->active_list);
1886
1887         obj->last_read_seqno = seqno;
1888
1889         if (obj->fenced_gpu_access) {
1890                 obj->last_fenced_seqno = seqno;
1891
1892                 /* Bump MRU to take account of the delayed flush */
1893                 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1894                         struct drm_i915_fence_reg *reg;
1895
1896                         reg = &dev_priv->fence_regs[obj->fence_reg];
1897                         list_move_tail(&reg->lru_list,
1898                                        &dev_priv->mm.fence_list);
1899                 }
1900         }
1901 }
1902
1903 static void
1904 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1905 {
1906         struct drm_device *dev = obj->base.dev;
1907         struct drm_i915_private *dev_priv = dev->dev_private;
1908         struct i915_address_space *vm = &dev_priv->gtt.base;
1909
1910         BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
1911         BUG_ON(!obj->active);
1912
1913         list_move_tail(&obj->mm_list, &vm->inactive_list);
1914
1915         list_del_init(&obj->ring_list);
1916         obj->ring = NULL;
1917
1918         obj->last_read_seqno = 0;
1919         obj->last_write_seqno = 0;
1920         obj->base.write_domain = 0;
1921
1922         obj->last_fenced_seqno = 0;
1923         obj->fenced_gpu_access = false;
1924
1925         obj->active = 0;
1926         drm_gem_object_unreference(&obj->base);
1927
1928         WARN_ON(i915_verify_lists(dev));
1929 }
1930
1931 static int
1932 i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
1933 {
1934         struct drm_i915_private *dev_priv = dev->dev_private;
1935         struct intel_ring_buffer *ring;
1936         int ret, i, j;
1937
1938         /* Carefully retire all requests without writing to the rings */
1939         for_each_ring(ring, dev_priv, i) {
1940                 ret = intel_ring_idle(ring);
1941                 if (ret)
1942                         return ret;
1943         }
1944         i915_gem_retire_requests(dev);
1945
1946         /* Finally reset hw state */
1947         for_each_ring(ring, dev_priv, i) {
1948                 intel_ring_init_seqno(ring, seqno);
1949
1950                 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
1951                         ring->sync_seqno[j] = 0;
1952         }
1953
1954         return 0;
1955 }
1956
1957 int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
1958 {
1959         struct drm_i915_private *dev_priv = dev->dev_private;
1960         int ret;
1961
1962         if (seqno == 0)
1963                 return -EINVAL;
1964
1965         /* HWS page needs to be set less than what we
1966          * will inject to ring
1967          */
1968         ret = i915_gem_init_seqno(dev, seqno - 1);
1969         if (ret)
1970                 return ret;
1971
1972         /* Carefully set the last_seqno value so that wrap
1973          * detection still works
1974          */
1975         dev_priv->next_seqno = seqno;
1976         dev_priv->last_seqno = seqno - 1;
1977         if (dev_priv->last_seqno == 0)
1978                 dev_priv->last_seqno--;
1979
1980         return 0;
1981 }
1982
1983 int
1984 i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
1985 {
1986         struct drm_i915_private *dev_priv = dev->dev_private;
1987
1988         /* reserve 0 for non-seqno */
1989         if (dev_priv->next_seqno == 0) {
1990                 int ret = i915_gem_init_seqno(dev, 0);
1991                 if (ret)
1992                         return ret;
1993
1994                 dev_priv->next_seqno = 1;
1995         }
1996
1997         *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
1998         return 0;
1999 }
2000
2001 int __i915_add_request(struct intel_ring_buffer *ring,
2002                        struct drm_file *file,
2003                        struct drm_i915_gem_object *obj,
2004                        u32 *out_seqno)
2005 {
2006         drm_i915_private_t *dev_priv = ring->dev->dev_private;
2007         struct drm_i915_gem_request *request;
2008         u32 request_ring_position, request_start;
2009         int was_empty;
2010         int ret;
2011
2012         request_start = intel_ring_get_tail(ring);
2013         /*
2014          * Emit any outstanding flushes - execbuf can fail to emit the flush
2015          * after having emitted the batchbuffer command. Hence we need to fix
2016          * things up similar to emitting the lazy request. The difference here
2017          * is that the flush _must_ happen before the next request, no matter
2018          * what.
2019          */
2020         ret = intel_ring_flush_all_caches(ring);
2021         if (ret)
2022                 return ret;
2023
2024         request = kmalloc(sizeof(*request), GFP_KERNEL);
2025         if (request == NULL)
2026                 return -ENOMEM;
2027
2028
2029         /* Record the position of the start of the request so that
2030          * should we detect the updated seqno part-way through the
2031          * GPU processing the request, we never over-estimate the
2032          * position of the head.
2033          */
2034         request_ring_position = intel_ring_get_tail(ring);
2035
2036         ret = ring->add_request(ring);
2037         if (ret) {
2038                 kfree(request);
2039                 return ret;
2040         }
2041
2042         request->seqno = intel_ring_get_seqno(ring);
2043         request->ring = ring;
2044         request->head = request_start;
2045         request->tail = request_ring_position;
2046         request->ctx = ring->last_context;
2047         request->batch_obj = obj;
2048
2049         /* Whilst this request exists, batch_obj will be on the
2050          * active_list, and so will hold the active reference. Only when this
2051          * request is retired will the the batch_obj be moved onto the
2052          * inactive_list and lose its active reference. Hence we do not need
2053          * to explicitly hold another reference here.
2054          */
2055
2056         if (request->ctx)
2057                 i915_gem_context_reference(request->ctx);
2058
2059         request->emitted_jiffies = jiffies;
2060         was_empty = list_empty(&ring->request_list);
2061         list_add_tail(&request->list, &ring->request_list);
2062         request->file_priv = NULL;
2063
2064         if (file) {
2065                 struct drm_i915_file_private *file_priv = file->driver_priv;
2066
2067                 spin_lock(&file_priv->mm.lock);
2068                 request->file_priv = file_priv;
2069                 list_add_tail(&request->client_list,
2070                               &file_priv->mm.request_list);
2071                 spin_unlock(&file_priv->mm.lock);
2072         }
2073
2074         trace_i915_gem_request_add(ring, request->seqno);
2075         ring->outstanding_lazy_request = 0;
2076
2077         if (!dev_priv->ums.mm_suspended) {
2078                 i915_queue_hangcheck(ring->dev);
2079
2080                 if (was_empty) {
2081                         queue_delayed_work(dev_priv->wq,
2082                                            &dev_priv->mm.retire_work,
2083                                            round_jiffies_up_relative(HZ));
2084                         intel_mark_busy(dev_priv->dev);
2085                 }
2086         }
2087
2088         if (out_seqno)
2089                 *out_seqno = request->seqno;
2090         return 0;
2091 }
2092
2093 static inline void
2094 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
2095 {
2096         struct drm_i915_file_private *file_priv = request->file_priv;
2097
2098         if (!file_priv)
2099                 return;
2100
2101         spin_lock(&file_priv->mm.lock);
2102         if (request->file_priv) {
2103                 list_del(&request->client_list);
2104                 request->file_priv = NULL;
2105         }
2106         spin_unlock(&file_priv->mm.lock);
2107 }
2108
2109 static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj)
2110 {
2111         if (acthd >= i915_gem_obj_ggtt_offset(obj) &&
2112             acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
2113                 return true;
2114
2115         return false;
2116 }
2117
2118 static bool i915_head_inside_request(const u32 acthd_unmasked,
2119                                      const u32 request_start,
2120                                      const u32 request_end)
2121 {
2122         const u32 acthd = acthd_unmasked & HEAD_ADDR;
2123
2124         if (request_start < request_end) {
2125                 if (acthd >= request_start && acthd < request_end)
2126                         return true;
2127         } else if (request_start > request_end) {
2128                 if (acthd >= request_start || acthd < request_end)
2129                         return true;
2130         }
2131
2132         return false;
2133 }
2134
2135 static bool i915_request_guilty(struct drm_i915_gem_request *request,
2136                                 const u32 acthd, bool *inside)
2137 {
2138         /* There is a possibility that unmasked head address
2139          * pointing inside the ring, matches the batch_obj address range.
2140          * However this is extremely unlikely.
2141          */
2142
2143         if (request->batch_obj) {
2144                 if (i915_head_inside_object(acthd, request->batch_obj)) {
2145                         *inside = true;
2146                         return true;
2147                 }
2148         }
2149
2150         if (i915_head_inside_request(acthd, request->head, request->tail)) {
2151                 *inside = false;
2152                 return true;
2153         }
2154
2155         return false;
2156 }
2157
2158 static void i915_set_reset_status(struct intel_ring_buffer *ring,
2159                                   struct drm_i915_gem_request *request,
2160                                   u32 acthd)
2161 {
2162         struct i915_ctx_hang_stats *hs = NULL;
2163         bool inside, guilty;
2164
2165         /* Innocent until proven guilty */
2166         guilty = false;
2167
2168         if (ring->hangcheck.action != wait &&
2169             i915_request_guilty(request, acthd, &inside)) {
2170                 DRM_ERROR("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
2171                           ring->name,
2172                           inside ? "inside" : "flushing",
2173                           request->batch_obj ?
2174                           i915_gem_obj_ggtt_offset(request->batch_obj) : 0,
2175                           request->ctx ? request->ctx->id : 0,
2176                           acthd);
2177
2178                 guilty = true;
2179         }
2180
2181         /* If contexts are disabled or this is the default context, use
2182          * file_priv->reset_state
2183          */
2184         if (request->ctx && request->ctx->id != DEFAULT_CONTEXT_ID)
2185                 hs = &request->ctx->hang_stats;
2186         else if (request->file_priv)
2187                 hs = &request->file_priv->hang_stats;
2188
2189         if (hs) {
2190                 if (guilty)
2191                         hs->batch_active++;
2192                 else
2193                         hs->batch_pending++;
2194         }
2195 }
2196
2197 static void i915_gem_free_request(struct drm_i915_gem_request *request)
2198 {
2199         list_del(&request->list);
2200         i915_gem_request_remove_from_client(request);
2201
2202         if (request->ctx)
2203                 i915_gem_context_unreference(request->ctx);
2204
2205         kfree(request);
2206 }
2207
2208 static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
2209                                       struct intel_ring_buffer *ring)
2210 {
2211         u32 completed_seqno;
2212         u32 acthd;
2213
2214         acthd = intel_ring_get_active_head(ring);
2215         completed_seqno = ring->get_seqno(ring, false);
2216
2217         while (!list_empty(&ring->request_list)) {
2218                 struct drm_i915_gem_request *request;
2219
2220                 request = list_first_entry(&ring->request_list,
2221                                            struct drm_i915_gem_request,
2222                                            list);
2223
2224                 if (request->seqno > completed_seqno)
2225                         i915_set_reset_status(ring, request, acthd);
2226
2227                 i915_gem_free_request(request);
2228         }
2229
2230         while (!list_empty(&ring->active_list)) {
2231                 struct drm_i915_gem_object *obj;
2232
2233                 obj = list_first_entry(&ring->active_list,
2234                                        struct drm_i915_gem_object,
2235                                        ring_list);
2236
2237                 i915_gem_object_move_to_inactive(obj);
2238         }
2239 }
2240
2241 void i915_gem_restore_fences(struct drm_device *dev)
2242 {
2243         struct drm_i915_private *dev_priv = dev->dev_private;
2244         int i;
2245
2246         for (i = 0; i < dev_priv->num_fence_regs; i++) {
2247                 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
2248
2249                 /*
2250                  * Commit delayed tiling changes if we have an object still
2251                  * attached to the fence, otherwise just clear the fence.
2252                  */
2253                 if (reg->obj) {
2254                         i915_gem_object_update_fence(reg->obj, reg,
2255                                                      reg->obj->tiling_mode);
2256                 } else {
2257                         i915_gem_write_fence(dev, i, NULL);
2258                 }
2259         }
2260 }
2261
2262 void i915_gem_reset(struct drm_device *dev)
2263 {
2264         struct drm_i915_private *dev_priv = dev->dev_private;
2265         struct i915_address_space *vm = &dev_priv->gtt.base;
2266         struct drm_i915_gem_object *obj;
2267         struct intel_ring_buffer *ring;
2268         int i;
2269
2270         for_each_ring(ring, dev_priv, i)
2271                 i915_gem_reset_ring_lists(dev_priv, ring);
2272
2273         /* Move everything out of the GPU domains to ensure we do any
2274          * necessary invalidation upon reuse.
2275          */
2276         list_for_each_entry(obj, &vm->inactive_list, mm_list)
2277                 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
2278
2279         i915_gem_restore_fences(dev);
2280 }
2281
2282 /**
2283  * This function clears the request list as sequence numbers are passed.
2284  */
2285 void
2286 i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
2287 {
2288         uint32_t seqno;
2289
2290         if (list_empty(&ring->request_list))
2291                 return;
2292
2293         WARN_ON(i915_verify_lists(ring->dev));
2294
2295         seqno = ring->get_seqno(ring, true);
2296
2297         while (!list_empty(&ring->request_list)) {
2298                 struct drm_i915_gem_request *request;
2299
2300                 request = list_first_entry(&ring->request_list,
2301                                            struct drm_i915_gem_request,
2302                                            list);
2303
2304                 if (!i915_seqno_passed(seqno, request->seqno))
2305                         break;
2306
2307                 trace_i915_gem_request_retire(ring, request->seqno);
2308                 /* We know the GPU must have read the request to have
2309                  * sent us the seqno + interrupt, so use the position
2310                  * of tail of the request to update the last known position
2311                  * of the GPU head.
2312                  */
2313                 ring->last_retired_head = request->tail;
2314
2315                 i915_gem_free_request(request);
2316         }
2317
2318         /* Move any buffers on the active list that are no longer referenced
2319          * by the ringbuffer to the flushing/inactive lists as appropriate.
2320          */
2321         while (!list_empty(&ring->active_list)) {
2322                 struct drm_i915_gem_object *obj;
2323
2324                 obj = list_first_entry(&ring->active_list,
2325                                       struct drm_i915_gem_object,
2326                                       ring_list);
2327
2328                 if (!i915_seqno_passed(seqno, obj->last_read_seqno))
2329                         break;
2330
2331                 i915_gem_object_move_to_inactive(obj);
2332         }
2333
2334         if (unlikely(ring->trace_irq_seqno &&
2335                      i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
2336                 ring->irq_put(ring);
2337                 ring->trace_irq_seqno = 0;
2338         }
2339
2340         WARN_ON(i915_verify_lists(ring->dev));
2341 }
2342
2343 void
2344 i915_gem_retire_requests(struct drm_device *dev)
2345 {
2346         drm_i915_private_t *dev_priv = dev->dev_private;
2347         struct intel_ring_buffer *ring;
2348         int i;
2349
2350         for_each_ring(ring, dev_priv, i)
2351                 i915_gem_retire_requests_ring(ring);
2352 }
2353
2354 static void
2355 i915_gem_retire_work_handler(struct work_struct *work)
2356 {
2357         drm_i915_private_t *dev_priv;
2358         struct drm_device *dev;
2359         struct intel_ring_buffer *ring;
2360         bool idle;
2361         int i;
2362
2363         dev_priv = container_of(work, drm_i915_private_t,
2364                                 mm.retire_work.work);
2365         dev = dev_priv->dev;
2366
2367         /* Come back later if the device is busy... */
2368         if (!mutex_trylock(&dev->struct_mutex)) {
2369                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2370                                    round_jiffies_up_relative(HZ));
2371                 return;
2372         }
2373
2374         i915_gem_retire_requests(dev);
2375
2376         /* Send a periodic flush down the ring so we don't hold onto GEM
2377          * objects indefinitely.
2378          */
2379         idle = true;
2380         for_each_ring(ring, dev_priv, i) {
2381                 if (ring->gpu_caches_dirty)
2382                         i915_add_request(ring, NULL);
2383
2384                 idle &= list_empty(&ring->request_list);
2385         }
2386
2387         if (!dev_priv->ums.mm_suspended && !idle)
2388                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2389                                    round_jiffies_up_relative(HZ));
2390         if (idle)
2391                 intel_mark_idle(dev);
2392
2393         mutex_unlock(&dev->struct_mutex);
2394 }
2395
2396 /**
2397  * Ensures that an object will eventually get non-busy by flushing any required
2398  * write domains, emitting any outstanding lazy request and retiring and
2399  * completed requests.
2400  */
2401 static int
2402 i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
2403 {
2404         int ret;
2405
2406         if (obj->active) {
2407                 ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
2408                 if (ret)
2409                         return ret;
2410
2411                 i915_gem_retire_requests_ring(obj->ring);
2412         }
2413
2414         return 0;
2415 }
2416
2417 /**
2418  * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2419  * @DRM_IOCTL_ARGS: standard ioctl arguments
2420  *
2421  * Returns 0 if successful, else an error is returned with the remaining time in
2422  * the timeout parameter.
2423  *  -ETIME: object is still busy after timeout
2424  *  -ERESTARTSYS: signal interrupted the wait
2425  *  -ENONENT: object doesn't exist
2426  * Also possible, but rare:
2427  *  -EAGAIN: GPU wedged
2428  *  -ENOMEM: damn
2429  *  -ENODEV: Internal IRQ fail
2430  *  -E?: The add request failed
2431  *
2432  * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2433  * non-zero timeout parameter the wait ioctl will wait for the given number of
2434  * nanoseconds on an object becoming unbusy. Since the wait itself does so
2435  * without holding struct_mutex the object may become re-busied before this
2436  * function completes. A similar but shorter * race condition exists in the busy
2437  * ioctl
2438  */
2439 int
2440 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2441 {
2442         drm_i915_private_t *dev_priv = dev->dev_private;
2443         struct drm_i915_gem_wait *args = data;
2444         struct drm_i915_gem_object *obj;
2445         struct intel_ring_buffer *ring = NULL;
2446         struct timespec timeout_stack, *timeout = NULL;
2447         unsigned reset_counter;
2448         u32 seqno = 0;
2449         int ret = 0;
2450
2451         if (args->timeout_ns >= 0) {
2452                 timeout_stack = ns_to_timespec(args->timeout_ns);
2453                 timeout = &timeout_stack;
2454         }
2455
2456         ret = i915_mutex_lock_interruptible(dev);
2457         if (ret)
2458                 return ret;
2459
2460         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
2461         if (&obj->base == NULL) {
2462                 mutex_unlock(&dev->struct_mutex);
2463                 return -ENOENT;
2464         }
2465
2466         /* Need to make sure the object gets inactive eventually. */
2467         ret = i915_gem_object_flush_active(obj);
2468         if (ret)
2469                 goto out;
2470
2471         if (obj->active) {
2472                 seqno = obj->last_read_seqno;
2473                 ring = obj->ring;
2474         }
2475
2476         if (seqno == 0)
2477                  goto out;
2478
2479         /* Do this after OLR check to make sure we make forward progress polling
2480          * on this IOCTL with a 0 timeout (like busy ioctl)
2481          */
2482         if (!args->timeout_ns) {
2483                 ret = -ETIME;
2484                 goto out;
2485         }
2486
2487         drm_gem_object_unreference(&obj->base);
2488         reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
2489         mutex_unlock(&dev->struct_mutex);
2490
2491         ret = __wait_seqno(ring, seqno, reset_counter, true, timeout);
2492         if (timeout)
2493                 args->timeout_ns = timespec_to_ns(timeout);
2494         return ret;
2495
2496 out:
2497         drm_gem_object_unreference(&obj->base);
2498         mutex_unlock(&dev->struct_mutex);
2499         return ret;
2500 }
2501
2502 /**
2503  * i915_gem_object_sync - sync an object to a ring.
2504  *
2505  * @obj: object which may be in use on another ring.
2506  * @to: ring we wish to use the object on. May be NULL.
2507  *
2508  * This code is meant to abstract object synchronization with the GPU.
2509  * Calling with NULL implies synchronizing the object with the CPU
2510  * rather than a particular GPU ring.
2511  *
2512  * Returns 0 if successful, else propagates up the lower layer error.
2513  */
2514 int
2515 i915_gem_object_sync(struct drm_i915_gem_object *obj,
2516                      struct intel_ring_buffer *to)
2517 {
2518         struct intel_ring_buffer *from = obj->ring;
2519         u32 seqno;
2520         int ret, idx;
2521
2522         if (from == NULL || to == from)
2523                 return 0;
2524
2525         if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
2526                 return i915_gem_object_wait_rendering(obj, false);
2527
2528         idx = intel_ring_sync_index(from, to);
2529
2530         seqno = obj->last_read_seqno;
2531         if (seqno <= from->sync_seqno[idx])
2532                 return 0;
2533
2534         ret = i915_gem_check_olr(obj->ring, seqno);
2535         if (ret)
2536                 return ret;
2537
2538         ret = to->sync_to(to, from, seqno);
2539         if (!ret)
2540                 /* We use last_read_seqno because sync_to()
2541                  * might have just caused seqno wrap under
2542                  * the radar.
2543                  */
2544                 from->sync_seqno[idx] = obj->last_read_seqno;
2545
2546         return ret;
2547 }
2548
2549 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2550 {
2551         u32 old_write_domain, old_read_domains;
2552
2553         /* Force a pagefault for domain tracking on next user access */
2554         i915_gem_release_mmap(obj);
2555
2556         if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2557                 return;
2558
2559         /* Wait for any direct GTT access to complete */
2560         mb();
2561
2562         old_read_domains = obj->base.read_domains;
2563         old_write_domain = obj->base.write_domain;
2564
2565         obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2566         obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2567
2568         trace_i915_gem_object_change_domain(obj,
2569                                             old_read_domains,
2570                                             old_write_domain);
2571 }
2572
2573 /**
2574  * Unbinds an object from the GTT aperture.
2575  */
2576 int
2577 i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2578 {
2579         drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
2580         struct i915_vma *vma;
2581         int ret;
2582
2583         if (!i915_gem_obj_ggtt_bound(obj))
2584                 return 0;
2585
2586         if (obj->pin_count)
2587                 return -EBUSY;
2588
2589         BUG_ON(obj->pages == NULL);
2590
2591         ret = i915_gem_object_finish_gpu(obj);
2592         if (ret)
2593                 return ret;
2594         /* Continue on if we fail due to EIO, the GPU is hung so we
2595          * should be safe and we need to cleanup or else we might
2596          * cause memory corruption through use-after-free.
2597          */
2598
2599         i915_gem_object_finish_gtt(obj);
2600
2601         /* release the fence reg _after_ flushing */
2602         ret = i915_gem_object_put_fence(obj);
2603         if (ret)
2604                 return ret;
2605
2606         trace_i915_gem_object_unbind(obj);
2607
2608         if (obj->has_global_gtt_mapping)
2609                 i915_gem_gtt_unbind_object(obj);
2610         if (obj->has_aliasing_ppgtt_mapping) {
2611                 i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
2612                 obj->has_aliasing_ppgtt_mapping = 0;
2613         }
2614         i915_gem_gtt_finish_object(obj);
2615         i915_gem_object_unpin_pages(obj);
2616
2617         list_del(&obj->mm_list);
2618         /* Avoid an unnecessary call to unbind on rebind. */
2619         obj->map_and_fenceable = true;
2620
2621         vma = __i915_gem_obj_to_vma(obj);
2622         list_del(&vma->vma_link);
2623         drm_mm_remove_node(&vma->node);
2624         i915_gem_vma_destroy(vma);
2625
2626         /* Since the unbound list is global, only move to that list if
2627          * no more VMAs exist.
2628          * NB: Until we have real VMAs there will only ever be one */
2629         WARN_ON(!list_empty(&obj->vma_list));
2630         if (list_empty(&obj->vma_list))
2631                 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2632
2633         return 0;
2634 }
2635
2636 int i915_gpu_idle(struct drm_device *dev)
2637 {
2638         drm_i915_private_t *dev_priv = dev->dev_private;
2639         struct intel_ring_buffer *ring;
2640         int ret, i;
2641
2642         /* Flush everything onto the inactive list. */
2643         for_each_ring(ring, dev_priv, i) {
2644                 ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
2645                 if (ret)
2646                         return ret;
2647
2648                 ret = intel_ring_idle(ring);
2649                 if (ret)
2650                         return ret;
2651         }
2652
2653         return 0;
2654 }
2655
2656 static void i965_write_fence_reg(struct drm_device *dev, int reg,
2657                                  struct drm_i915_gem_object *obj)
2658 {
2659         drm_i915_private_t *dev_priv = dev->dev_private;
2660         int fence_reg;
2661         int fence_pitch_shift;
2662
2663         if (INTEL_INFO(dev)->gen >= 6) {
2664                 fence_reg = FENCE_REG_SANDYBRIDGE_0;
2665                 fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
2666         } else {
2667                 fence_reg = FENCE_REG_965_0;
2668                 fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
2669         }
2670
2671         fence_reg += reg * 8;
2672
2673         /* To w/a incoherency with non-atomic 64-bit register updates,
2674          * we split the 64-bit update into two 32-bit writes. In order
2675          * for a partial fence not to be evaluated between writes, we
2676          * precede the update with write to turn off the fence register,
2677          * and only enable the fence as the last step.
2678          *
2679          * For extra levels of paranoia, we make sure each step lands
2680          * before applying the next step.
2681          */
2682         I915_WRITE(fence_reg, 0);
2683         POSTING_READ(fence_reg);
2684
2685         if (obj) {
2686                 u32 size = i915_gem_obj_ggtt_size(obj);
2687                 uint64_t val;
2688
2689                 val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
2690                                  0xfffff000) << 32;
2691                 val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
2692                 val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
2693                 if (obj->tiling_mode == I915_TILING_Y)
2694                         val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2695                 val |= I965_FENCE_REG_VALID;
2696
2697                 I915_WRITE(fence_reg + 4, val >> 32);
2698                 POSTING_READ(fence_reg + 4);
2699
2700                 I915_WRITE(fence_reg + 0, val);
2701                 POSTING_READ(fence_reg);
2702         } else {
2703                 I915_WRITE(fence_reg + 4, 0);
2704                 POSTING_READ(fence_reg + 4);
2705         }
2706 }
2707
2708 static void i915_write_fence_reg(struct drm_device *dev, int reg,
2709                                  struct drm_i915_gem_object *obj)
2710 {
2711         drm_i915_private_t *dev_priv = dev->dev_private;
2712         u32 val;
2713
2714         if (obj) {
2715                 u32 size = i915_gem_obj_ggtt_size(obj);
2716                 int pitch_val;
2717                 int tile_width;
2718
2719                 WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
2720                      (size & -size) != size ||
2721                      (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
2722                      "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2723                      i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
2724
2725                 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2726                         tile_width = 128;
2727                 else
2728                         tile_width = 512;
2729
2730                 /* Note: pitch better be a power of two tile widths */
2731                 pitch_val = obj->stride / tile_width;
2732                 pitch_val = ffs(pitch_val) - 1;
2733
2734                 val = i915_gem_obj_ggtt_offset(obj);
2735                 if (obj->tiling_mode == I915_TILING_Y)
2736                         val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2737                 val |= I915_FENCE_SIZE_BITS(size);
2738                 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2739                 val |= I830_FENCE_REG_VALID;
2740         } else
2741                 val = 0;
2742
2743         if (reg < 8)
2744                 reg = FENCE_REG_830_0 + reg * 4;
2745         else
2746                 reg = FENCE_REG_945_8 + (reg - 8) * 4;
2747
2748         I915_WRITE(reg, val);
2749         POSTING_READ(reg);
2750 }
2751
2752 static void i830_write_fence_reg(struct drm_device *dev, int reg,
2753                                 struct drm_i915_gem_object *obj)
2754 {
2755         drm_i915_private_t *dev_priv = dev->dev_private;
2756         uint32_t val;
2757
2758         if (obj) {
2759                 u32 size = i915_gem_obj_ggtt_size(obj);
2760                 uint32_t pitch_val;
2761
2762                 WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
2763                      (size & -size) != size ||
2764                      (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
2765                      "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
2766                      i915_gem_obj_ggtt_offset(obj), size);
2767
2768                 pitch_val = obj->stride / 128;
2769                 pitch_val = ffs(pitch_val) - 1;
2770
2771                 val = i915_gem_obj_ggtt_offset(obj);
2772                 if (obj->tiling_mode == I915_TILING_Y)
2773                         val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2774                 val |= I830_FENCE_SIZE_BITS(size);
2775                 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2776                 val |= I830_FENCE_REG_VALID;
2777         } else
2778                 val = 0;
2779
2780         I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
2781         POSTING_READ(FENCE_REG_830_0 + reg * 4);
2782 }
2783
2784 inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
2785 {
2786         return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
2787 }
2788
2789 static void i915_gem_write_fence(struct drm_device *dev, int reg,
2790                                  struct drm_i915_gem_object *obj)
2791 {
2792         struct drm_i915_private *dev_priv = dev->dev_private;
2793
2794         /* Ensure that all CPU reads are completed before installing a fence
2795          * and all writes before removing the fence.
2796          */
2797         if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
2798                 mb();
2799
2800         WARN(obj && (!obj->stride || !obj->tiling_mode),
2801              "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
2802              obj->stride, obj->tiling_mode);
2803
2804         switch (INTEL_INFO(dev)->gen) {
2805         case 7:
2806         case 6:
2807         case 5:
2808         case 4: i965_write_fence_reg(dev, reg, obj); break;
2809         case 3: i915_write_fence_reg(dev, reg, obj); break;
2810         case 2: i830_write_fence_reg(dev, reg, obj); break;
2811         default: BUG();
2812         }
2813
2814         /* And similarly be paranoid that no direct access to this region
2815          * is reordered to before the fence is installed.
2816          */
2817         if (i915_gem_object_needs_mb(obj))
2818                 mb();
2819 }
2820
2821 static inline int fence_number(struct drm_i915_private *dev_priv,
2822                                struct drm_i915_fence_reg *fence)
2823 {
2824         return fence - dev_priv->fence_regs;
2825 }
2826
2827 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2828                                          struct drm_i915_fence_reg *fence,
2829                                          bool enable)
2830 {
2831         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2832         int reg = fence_number(dev_priv, fence);
2833
2834         i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
2835
2836         if (enable) {
2837                 obj->fence_reg = reg;
2838                 fence->obj = obj;
2839                 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
2840         } else {
2841                 obj->fence_reg = I915_FENCE_REG_NONE;
2842                 fence->obj = NULL;
2843                 list_del_init(&fence->lru_list);
2844         }
2845         obj->fence_dirty = false;
2846 }
2847
2848 static int
2849 i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
2850 {
2851         if (obj->last_fenced_seqno) {
2852                 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
2853                 if (ret)
2854                         return ret;
2855
2856                 obj->last_fenced_seqno = 0;
2857         }
2858
2859         obj->fenced_gpu_access = false;
2860         return 0;
2861 }
2862
2863 int
2864 i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
2865 {
2866         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2867         struct drm_i915_fence_reg *fence;
2868         int ret;
2869
2870         ret = i915_gem_object_wait_fence(obj);
2871         if (ret)
2872                 return ret;
2873
2874         if (obj->fence_reg == I915_FENCE_REG_NONE)
2875                 return 0;
2876
2877         fence = &dev_priv->fence_regs[obj->fence_reg];
2878
2879         i915_gem_object_fence_lost(obj);
2880         i915_gem_object_update_fence(obj, fence, false);
2881
2882         return 0;
2883 }
2884
2885 static struct drm_i915_fence_reg *
2886 i915_find_fence_reg(struct drm_device *dev)
2887 {
2888         struct drm_i915_private *dev_priv = dev->dev_private;
2889         struct drm_i915_fence_reg *reg, *avail;
2890         int i;
2891
2892         /* First try to find a free reg */
2893         avail = NULL;
2894         for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2895                 reg = &dev_priv->fence_regs[i];
2896                 if (!reg->obj)
2897                         return reg;
2898
2899                 if (!reg->pin_count)
2900                         avail = reg;
2901         }
2902
2903         if (avail == NULL)
2904                 return NULL;
2905
2906         /* None available, try to steal one or wait for a user to finish */
2907         list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
2908                 if (reg->pin_count)
2909                         continue;
2910
2911                 return reg;
2912         }
2913
2914         return NULL;
2915 }
2916
2917 /**
2918  * i915_gem_object_get_fence - set up fencing for an object
2919  * @obj: object to map through a fence reg
2920  *
2921  * When mapping objects through the GTT, userspace wants to be able to write
2922  * to them without having to worry about swizzling if the object is tiled.
2923  * This function walks the fence regs looking for a free one for @obj,
2924  * stealing one if it can't find any.
2925  *
2926  * It then sets up the reg based on the object's properties: address, pitch
2927  * and tiling format.
2928  *
2929  * For an untiled surface, this removes any existing fence.
2930  */
2931 int
2932 i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
2933 {
2934         struct drm_device *dev = obj->base.dev;
2935         struct drm_i915_private *dev_priv = dev->dev_private;
2936         bool enable = obj->tiling_mode != I915_TILING_NONE;
2937         struct drm_i915_fence_reg *reg;
2938         int ret;
2939
2940         /* Have we updated the tiling parameters upon the object and so
2941          * will need to serialise the write to the associated fence register?
2942          */
2943         if (obj->fence_dirty) {
2944                 ret = i915_gem_object_wait_fence(obj);
2945                 if (ret)
2946                         return ret;
2947         }
2948
2949         /* Just update our place in the LRU if our fence is getting reused. */
2950         if (obj->fence_reg != I915_FENCE_REG_NONE) {
2951                 reg = &dev_priv->fence_regs[obj->fence_reg];
2952                 if (!obj->fence_dirty) {
2953                         list_move_tail(&reg->lru_list,
2954                                        &dev_priv->mm.fence_list);
2955                         return 0;
2956                 }
2957         } else if (enable) {
2958                 reg = i915_find_fence_reg(dev);
2959                 if (reg == NULL)
2960                         return -EDEADLK;
2961
2962                 if (reg->obj) {
2963                         struct drm_i915_gem_object *old = reg->obj;
2964
2965                         ret = i915_gem_object_wait_fence(old);
2966                         if (ret)
2967                                 return ret;
2968
2969                         i915_gem_object_fence_lost(old);
2970                 }
2971         } else
2972                 return 0;
2973
2974         i915_gem_object_update_fence(obj, reg, enable);
2975
2976         return 0;
2977 }
2978
2979 static bool i915_gem_valid_gtt_space(struct drm_device *dev,
2980                                      struct drm_mm_node *gtt_space,
2981                                      unsigned long cache_level)
2982 {
2983         struct drm_mm_node *other;
2984
2985         /* On non-LLC machines we have to be careful when putting differing
2986          * types of snoopable memory together to avoid the prefetcher
2987          * crossing memory domains and dying.
2988          */
2989         if (HAS_LLC(dev))
2990                 return true;
2991
2992         if (!drm_mm_node_allocated(gtt_space))
2993                 return true;
2994
2995         if (list_empty(&gtt_space->node_list))
2996                 return true;
2997
2998         other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
2999         if (other->allocated && !other->hole_follows && other->color != cache_level)
3000                 return false;
3001
3002         other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
3003         if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
3004                 return false;
3005
3006         return true;
3007 }
3008
3009 static void i915_gem_verify_gtt(struct drm_device *dev)
3010 {
3011 #if WATCH_GTT
3012         struct drm_i915_private *dev_priv = dev->dev_private;
3013         struct drm_i915_gem_object *obj;
3014         int err = 0;
3015
3016         list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) {
3017                 if (obj->gtt_space == NULL) {
3018                         printk(KERN_ERR "object found on GTT list with no space reserved\n");
3019                         err++;
3020                         continue;
3021                 }
3022
3023                 if (obj->cache_level != obj->gtt_space->color) {
3024                         printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
3025                                i915_gem_obj_ggtt_offset(obj),
3026                                i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
3027                                obj->cache_level,
3028                                obj->gtt_space->color);
3029                         err++;
3030                         continue;
3031                 }
3032
3033                 if (!i915_gem_valid_gtt_space(dev,
3034                                               obj->gtt_space,
3035                                               obj->cache_level)) {
3036                         printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
3037                                i915_gem_obj_ggtt_offset(obj),
3038                                i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
3039                                obj->cache_level);
3040                         err++;
3041                         continue;
3042                 }
3043         }
3044
3045         WARN_ON(err);
3046 #endif
3047 }
3048
3049 /**
3050  * Finds free space in the GTT aperture and binds the object there.
3051  */
3052 static int
3053 i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
3054                             unsigned alignment,
3055                             bool map_and_fenceable,
3056                             bool nonblocking)
3057 {
3058         struct drm_device *dev = obj->base.dev;
3059         drm_i915_private_t *dev_priv = dev->dev_private;
3060         struct i915_address_space *vm = &dev_priv->gtt.base;
3061         u32 size, fence_size, fence_alignment, unfenced_alignment;
3062         bool mappable, fenceable;
3063         size_t gtt_max = map_and_fenceable ?
3064                 dev_priv->gtt.mappable_end : dev_priv->gtt.base.total;
3065         struct i915_vma *vma;
3066         int ret;
3067
3068         if (WARN_ON(!list_empty(&obj->vma_list)))
3069                 return -EBUSY;
3070
3071         fence_size = i915_gem_get_gtt_size(dev,
3072                                            obj->base.size,
3073                                            obj->tiling_mode);
3074         fence_alignment = i915_gem_get_gtt_alignment(dev,
3075                                                      obj->base.size,
3076                                                      obj->tiling_mode, true);
3077         unfenced_alignment =
3078                 i915_gem_get_gtt_alignment(dev,
3079                                                     obj->base.size,
3080                                                     obj->tiling_mode, false);
3081
3082         if (alignment == 0)
3083                 alignment = map_and_fenceable ? fence_alignment :
3084                                                 unfenced_alignment;
3085         if (map_and_fenceable && alignment & (fence_alignment - 1)) {
3086                 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
3087                 return -EINVAL;
3088         }
3089
3090         size = map_and_fenceable ? fence_size : obj->base.size;
3091
3092         /* If the object is bigger than the entire aperture, reject it early
3093          * before evicting everything in a vain attempt to find space.
3094          */
3095         if (obj->base.size > gtt_max) {
3096                 DRM_ERROR("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n",
3097                           obj->base.size,
3098                           map_and_fenceable ? "mappable" : "total",
3099                           gtt_max);
3100                 return -E2BIG;
3101         }
3102
3103         ret = i915_gem_object_get_pages(obj);
3104         if (ret)
3105                 return ret;
3106
3107         i915_gem_object_pin_pages(obj);
3108
3109         vma = i915_gem_vma_create(obj, &dev_priv->gtt.base);
3110         if (IS_ERR(vma)) {
3111                 ret = PTR_ERR(vma);
3112                 goto err_unpin;
3113         }
3114
3115 search_free:
3116         ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
3117                                                   &vma->node,
3118                                                   size, alignment,
3119                                                   obj->cache_level, 0, gtt_max,
3120                                                   DRM_MM_SEARCH_DEFAULT);
3121         if (ret) {
3122                 ret = i915_gem_evict_something(dev, size, alignment,
3123                                                obj->cache_level,
3124                                                map_and_fenceable,
3125                                                nonblocking);
3126                 if (ret == 0)
3127                         goto search_free;
3128
3129                 goto err_free_vma;
3130         }
3131         if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
3132                                               obj->cache_level))) {
3133                 ret = -EINVAL;
3134                 goto err_remove_node;
3135         }
3136
3137         ret = i915_gem_gtt_prepare_object(obj);
3138         if (ret)
3139                 goto err_remove_node;
3140
3141         list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
3142         list_add_tail(&obj->mm_list, &vm->inactive_list);
3143         list_add(&vma->vma_link, &obj->vma_list);
3144
3145         fenceable =
3146                 i915_gem_obj_ggtt_size(obj) == fence_size &&
3147                 (i915_gem_obj_ggtt_offset(obj) & (fence_alignment - 1)) == 0;
3148
3149         mappable = i915_gem_obj_ggtt_offset(obj) + obj->base.size <=
3150                 dev_priv->gtt.mappable_end;
3151
3152         obj->map_and_fenceable = mappable && fenceable;
3153
3154         trace_i915_gem_object_bind(obj, map_and_fenceable);
3155         i915_gem_verify_gtt(dev);
3156         return 0;
3157
3158 err_remove_node:
3159         drm_mm_remove_node(&vma->node);
3160 err_free_vma:
3161         i915_gem_vma_destroy(vma);
3162 err_unpin:
3163         i915_gem_object_unpin_pages(obj);
3164         return ret;
3165 }
3166
3167 void
3168 i915_gem_clflush_object(struct drm_i915_gem_object *obj)
3169 {
3170         /* If we don't have a page list set up, then we're not pinned
3171          * to GPU, and we can ignore the cache flush because it'll happen
3172          * again at bind time.
3173          */
3174         if (obj->pages == NULL)
3175                 return;
3176
3177         /*
3178          * Stolen memory is always coherent with the GPU as it is explicitly
3179          * marked as wc by the system, or the system is cache-coherent.
3180          */
3181         if (obj->stolen)
3182                 return;
3183
3184         /* If the GPU is snooping the contents of the CPU cache,
3185          * we do not need to manually clear the CPU cache lines.  However,
3186          * the caches are only snooped when the render cache is
3187          * flushed/invalidated.  As we always have to emit invalidations
3188          * and flushes when moving into and out of the RENDER domain, correct
3189          * snooping behaviour occurs naturally as the result of our domain
3190          * tracking.
3191          */
3192         if (obj->cache_level != I915_CACHE_NONE)
3193                 return;
3194
3195         trace_i915_gem_object_clflush(obj);
3196
3197         drm_clflush_sg(obj->pages);
3198 }
3199
3200 /** Flushes the GTT write domain for the object if it's dirty. */
3201 static void
3202 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3203 {
3204         uint32_t old_write_domain;
3205
3206         if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
3207                 return;
3208
3209         /* No actual flushing is required for the GTT write domain.  Writes
3210          * to it immediately go to main memory as far as we know, so there's
3211          * no chipset flush.  It also doesn't land in render cache.
3212          *
3213          * However, we do have to enforce the order so that all writes through
3214          * the GTT land before any writes to the device, such as updates to
3215          * the GATT itself.
3216          */
3217         wmb();
3218
3219         old_write_domain = obj->base.write_domain;
3220         obj->base.write_domain = 0;
3221
3222         trace_i915_gem_object_change_domain(obj,
3223                                             obj->base.read_domains,
3224                                             old_write_domain);
3225 }
3226
3227 /** Flushes the CPU write domain for the object if it's dirty. */
3228 static void
3229 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
3230 {
3231         uint32_t old_write_domain;
3232
3233         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3234                 return;
3235
3236         i915_gem_clflush_object(obj);
3237         i915_gem_chipset_flush(obj->base.dev);
3238         old_write_domain = obj->base.write_domain;
3239         obj->base.write_domain = 0;
3240
3241         trace_i915_gem_object_change_domain(obj,
3242                                             obj->base.read_domains,
3243                                             old_write_domain);
3244 }
3245
3246 /**
3247  * Moves a single object to the GTT read, and possibly write domain.
3248  *
3249  * This function returns when the move is complete, including waiting on
3250  * flushes to occur.
3251  */
3252 int
3253 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3254 {
3255         drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
3256         uint32_t old_write_domain, old_read_domains;
3257         int ret;
3258
3259         /* Not valid to be called on unbound objects. */
3260         if (!i915_gem_obj_ggtt_bound(obj))
3261                 return -EINVAL;
3262
3263         if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3264                 return 0;
3265
3266         ret = i915_gem_object_wait_rendering(obj, !write);
3267         if (ret)
3268                 return ret;
3269
3270         i915_gem_object_flush_cpu_write_domain(obj);
3271
3272         /* Serialise direct access to this object with the barriers for
3273          * coherent writes from the GPU, by effectively invalidating the
3274          * GTT domain upon first access.
3275          */
3276         if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3277                 mb();
3278
3279         old_write_domain = obj->base.write_domain;
3280         old_read_domains = obj->base.read_domains;
3281
3282         /* It should now be out of any other write domains, and we can update
3283          * the domain values for our changes.
3284          */
3285         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3286         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3287         if (write) {
3288                 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3289                 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3290                 obj->dirty = 1;
3291         }
3292
3293         trace_i915_gem_object_change_domain(obj,
3294                                             old_read_domains,
3295                                             old_write_domain);
3296
3297         /* And bump the LRU for this access */
3298         if (i915_gem_object_is_inactive(obj))
3299                 list_move_tail(&obj->mm_list,
3300                                &dev_priv->gtt.base.inactive_list);
3301
3302         return 0;
3303 }
3304
3305 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3306                                     enum i915_cache_level cache_level)
3307 {
3308         struct drm_device *dev = obj->base.dev;
3309         drm_i915_private_t *dev_priv = dev->dev_private;
3310         struct i915_vma *vma = __i915_gem_obj_to_vma(obj);
3311         int ret;
3312
3313         if (obj->cache_level == cache_level)
3314                 return 0;
3315
3316         if (obj->pin_count) {
3317                 DRM_DEBUG("can not change the cache level of pinned objects\n");
3318                 return -EBUSY;
3319         }
3320
3321         if (vma && !i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
3322                 ret = i915_gem_object_unbind(obj);
3323                 if (ret)
3324                         return ret;
3325         }
3326
3327         if (i915_gem_obj_ggtt_bound(obj)) {
3328                 ret = i915_gem_object_finish_gpu(obj);
3329                 if (ret)
3330                         return ret;
3331
3332                 i915_gem_object_finish_gtt(obj);
3333
3334                 /* Before SandyBridge, you could not use tiling or fence
3335                  * registers with snooped memory, so relinquish any fences
3336                  * currently pointing to our region in the aperture.
3337                  */
3338                 if (INTEL_INFO(dev)->gen < 6) {
3339                         ret = i915_gem_object_put_fence(obj);
3340                         if (ret)
3341                                 return ret;
3342                 }
3343
3344                 if (obj->has_global_gtt_mapping)
3345                         i915_gem_gtt_bind_object(obj, cache_level);
3346                 if (obj->has_aliasing_ppgtt_mapping)
3347                         i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
3348                                                obj, cache_level);
3349
3350                 i915_gem_obj_ggtt_set_color(obj, cache_level);
3351         }
3352
3353         if (cache_level == I915_CACHE_NONE) {
3354                 u32 old_read_domains, old_write_domain;
3355
3356                 /* If we're coming from LLC cached, then we haven't
3357                  * actually been tracking whether the data is in the
3358                  * CPU cache or not, since we only allow one bit set
3359                  * in obj->write_domain and have been skipping the clflushes.
3360                  * Just set it to the CPU cache for now.
3361                  */
3362                 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
3363                 WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU);
3364
3365                 old_read_domains = obj->base.read_domains;
3366                 old_write_domain = obj->base.write_domain;
3367
3368                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3369                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3370
3371                 trace_i915_gem_object_change_domain(obj,
3372                                                     old_read_domains,
3373                                                     old_write_domain);
3374         }
3375
3376         obj->cache_level = cache_level;
3377         i915_gem_verify_gtt(dev);
3378         return 0;
3379 }
3380
3381 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3382                                struct drm_file *file)
3383 {
3384         struct drm_i915_gem_caching *args = data;
3385         struct drm_i915_gem_object *obj;
3386         int ret;
3387
3388         ret = i915_mutex_lock_interruptible(dev);
3389         if (ret)
3390                 return ret;
3391
3392         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3393         if (&obj->base == NULL) {
3394                 ret = -ENOENT;
3395                 goto unlock;
3396         }
3397
3398         args->caching = obj->cache_level != I915_CACHE_NONE;
3399
3400         drm_gem_object_unreference(&obj->base);
3401 unlock:
3402         mutex_unlock(&dev->struct_mutex);
3403         return ret;
3404 }
3405
3406 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3407                                struct drm_file *file)
3408 {
3409         struct drm_i915_gem_caching *args = data;
3410         struct drm_i915_gem_object *obj;
3411         enum i915_cache_level level;
3412         int ret;
3413
3414         switch (args->caching) {
3415         case I915_CACHING_NONE:
3416                 level = I915_CACHE_NONE;
3417                 break;
3418         case I915_CACHING_CACHED:
3419                 level = I915_CACHE_LLC;
3420                 break;
3421         default:
3422                 return -EINVAL;
3423         }
3424
3425         ret = i915_mutex_lock_interruptible(dev);
3426         if (ret)
3427                 return ret;
3428
3429         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3430         if (&obj->base == NULL) {
3431                 ret = -ENOENT;
3432                 goto unlock;
3433         }
3434
3435         ret = i915_gem_object_set_cache_level(obj, level);
3436
3437         drm_gem_object_unreference(&obj->base);
3438 unlock:
3439         mutex_unlock(&dev->struct_mutex);
3440         return ret;
3441 }
3442
3443 /*
3444  * Prepare buffer for display plane (scanout, cursors, etc).
3445  * Can be called from an uninterruptible phase (modesetting) and allows
3446  * any flushes to be pipelined (for pageflips).
3447  */
3448 int
3449 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3450                                      u32 alignment,
3451                                      struct intel_ring_buffer *pipelined)
3452 {
3453         u32 old_read_domains, old_write_domain;
3454         int ret;
3455
3456         if (pipelined != obj->ring) {
3457                 ret = i915_gem_object_sync(obj, pipelined);
3458                 if (ret)
3459                         return ret;
3460         }
3461
3462         /* The display engine is not coherent with the LLC cache on gen6.  As
3463          * a result, we make sure that the pinning that is about to occur is
3464          * done with uncached PTEs. This is lowest common denominator for all
3465          * chipsets.
3466          *
3467          * However for gen6+, we could do better by using the GFDT bit instead
3468          * of uncaching, which would allow us to flush all the LLC-cached data
3469          * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3470          */
3471         ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
3472         if (ret)
3473                 return ret;
3474
3475         /* As the user may map the buffer once pinned in the display plane
3476          * (e.g. libkms for the bootup splash), we have to ensure that we
3477          * always use map_and_fenceable for all scanout buffers.
3478          */
3479         ret = i915_gem_object_pin(obj, alignment, true, false);
3480         if (ret)
3481                 return ret;
3482
3483         i915_gem_object_flush_cpu_write_domain(obj);
3484
3485         old_write_domain = obj->base.write_domain;
3486         old_read_domains = obj->base.read_domains;
3487
3488         /* It should now be out of any other write domains, and we can update
3489          * the domain values for our changes.
3490          */
3491         obj->base.write_domain = 0;
3492         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3493
3494         trace_i915_gem_object_change_domain(obj,
3495                                             old_read_domains,
3496                                             old_write_domain);
3497
3498         return 0;
3499 }
3500
3501 int
3502 i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
3503 {
3504         int ret;
3505
3506         if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
3507                 return 0;
3508
3509         ret = i915_gem_object_wait_rendering(obj, false);
3510         if (ret)
3511                 return ret;
3512
3513         /* Ensure that we invalidate the GPU's caches and TLBs. */
3514         obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
3515         return 0;
3516 }
3517
3518 /**
3519  * Moves a single object to the CPU read, and possibly write domain.
3520  *
3521  * This function returns when the move is complete, including waiting on
3522  * flushes to occur.
3523  */
3524 int
3525 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3526 {
3527         uint32_t old_write_domain, old_read_domains;
3528         int ret;
3529
3530         if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3531                 return 0;
3532
3533         ret = i915_gem_object_wait_rendering(obj, !write);
3534         if (ret)
3535                 return ret;
3536
3537         i915_gem_object_flush_gtt_write_domain(obj);
3538
3539         old_write_domain = obj->base.write_domain;
3540         old_read_domains = obj->base.read_domains;
3541
3542         /* Flush the CPU cache if it's still invalid. */
3543         if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3544                 i915_gem_clflush_object(obj);
3545
3546                 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3547         }
3548
3549         /* It should now be out of any other write domains, and we can update
3550          * the domain values for our changes.
3551          */
3552         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3553
3554         /* If we're writing through the CPU, then the GPU read domains will
3555          * need to be invalidated at next use.
3556          */
3557         if (write) {
3558                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3559                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3560         }
3561
3562         trace_i915_gem_object_change_domain(obj,
3563                                             old_read_domains,
3564                                             old_write_domain);
3565
3566         return 0;
3567 }
3568
3569 /* Throttle our rendering by waiting until the ring has completed our requests
3570  * emitted over 20 msec ago.
3571  *
3572  * Note that if we were to use the current jiffies each time around the loop,
3573  * we wouldn't escape the function with any frames outstanding if the time to
3574  * render a frame was over 20ms.
3575  *
3576  * This should get us reasonable parallelism between CPU and GPU but also
3577  * relatively low latency when blocking on a particular request to finish.
3578  */
3579 static int
3580 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3581 {
3582         struct drm_i915_private *dev_priv = dev->dev_private;
3583         struct drm_i915_file_private *file_priv = file->driver_priv;
3584         unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
3585         struct drm_i915_gem_request *request;
3586         struct intel_ring_buffer *ring = NULL;
3587         unsigned reset_counter;
3588         u32 seqno = 0;
3589         int ret;
3590
3591         ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
3592         if (ret)
3593                 return ret;
3594
3595         ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
3596         if (ret)
3597                 return ret;
3598
3599         spin_lock(&file_priv->mm.lock);
3600         list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3601                 if (time_after_eq(request->emitted_jiffies, recent_enough))
3602                         break;
3603
3604                 ring = request->ring;
3605                 seqno = request->seqno;
3606         }
3607         reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
3608         spin_unlock(&file_priv->mm.lock);
3609
3610         if (seqno == 0)
3611                 return 0;
3612
3613         ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
3614         if (ret == 0)
3615                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
3616
3617         return ret;
3618 }
3619
3620 int
3621 i915_gem_object_pin(struct drm_i915_gem_object *obj,
3622                     uint32_t alignment,
3623                     bool map_and_fenceable,
3624                     bool nonblocking)
3625 {
3626         int ret;
3627
3628         if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3629                 return -EBUSY;
3630
3631         if (i915_gem_obj_ggtt_bound(obj)) {
3632                 if ((alignment && i915_gem_obj_ggtt_offset(obj) & (alignment - 1)) ||
3633                     (map_and_fenceable && !obj->map_and_fenceable)) {
3634                         WARN(obj->pin_count,
3635                              "bo is already pinned with incorrect alignment:"
3636                              " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
3637                              " obj->map_and_fenceable=%d\n",
3638                              i915_gem_obj_ggtt_offset(obj), alignment,
3639                              map_and_fenceable,
3640                              obj->map_and_fenceable);
3641                         ret = i915_gem_object_unbind(obj);
3642                         if (ret)
3643                                 return ret;
3644                 }
3645         }
3646
3647         if (!i915_gem_obj_ggtt_bound(obj)) {
3648                 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3649
3650                 ret = i915_gem_object_bind_to_gtt(obj, alignment,
3651                                                   map_and_fenceable,
3652                                                   nonblocking);
3653                 if (ret)
3654                         return ret;
3655
3656                 if (!dev_priv->mm.aliasing_ppgtt)
3657                         i915_gem_gtt_bind_object(obj, obj->cache_level);
3658         }
3659
3660         if (!obj->has_global_gtt_mapping && map_and_fenceable)
3661                 i915_gem_gtt_bind_object(obj, obj->cache_level);
3662
3663         obj->pin_count++;
3664         obj->pin_mappable |= map_and_fenceable;
3665
3666         return 0;
3667 }
3668
3669 void
3670 i915_gem_object_unpin(struct drm_i915_gem_object *obj)
3671 {
3672         BUG_ON(obj->pin_count == 0);
3673         BUG_ON(!i915_gem_obj_ggtt_bound(obj));
3674
3675         if (--obj->pin_count == 0)
3676                 obj->pin_mappable = false;
3677 }
3678
3679 int
3680 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3681                    struct drm_file *file)
3682 {
3683         struct drm_i915_gem_pin *args = data;
3684         struct drm_i915_gem_object *obj;
3685         int ret;
3686
3687         ret = i915_mutex_lock_interruptible(dev);
3688         if (ret)
3689                 return ret;
3690
3691         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3692         if (&obj->base == NULL) {
3693                 ret = -ENOENT;
3694                 goto unlock;
3695         }
3696
3697         if (obj->madv != I915_MADV_WILLNEED) {
3698                 DRM_ERROR("Attempting to pin a purgeable buffer\n");
3699                 ret = -EINVAL;
3700                 goto out;
3701         }
3702
3703         if (obj->pin_filp != NULL && obj->pin_filp != file) {
3704                 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3705                           args->handle);
3706                 ret = -EINVAL;
3707                 goto out;
3708         }
3709
3710         if (obj->user_pin_count == 0) {
3711                 ret = i915_gem_object_pin(obj, args->alignment, true, false);
3712                 if (ret)
3713                         goto out;
3714         }
3715
3716         obj->user_pin_count++;
3717         obj->pin_filp = file;
3718
3719         /* XXX - flush the CPU caches for pinned objects
3720          * as the X server doesn't manage domains yet
3721          */
3722         i915_gem_object_flush_cpu_write_domain(obj);
3723         args->offset = i915_gem_obj_ggtt_offset(obj);
3724 out:
3725         drm_gem_object_unreference(&obj->base);
3726 unlock:
3727         mutex_unlock(&dev->struct_mutex);
3728         return ret;
3729 }
3730
3731 int
3732 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
3733                      struct drm_file *file)
3734 {
3735         struct drm_i915_gem_pin *args = data;
3736         struct drm_i915_gem_object *obj;
3737         int ret;
3738
3739         ret = i915_mutex_lock_interruptible(dev);
3740         if (ret)
3741                 return ret;
3742
3743         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3744         if (&obj->base == NULL) {
3745                 ret = -ENOENT;
3746                 goto unlock;
3747         }
3748
3749         if (obj->pin_filp != file) {
3750                 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3751                           args->handle);
3752                 ret = -EINVAL;
3753                 goto out;
3754         }
3755         obj->user_pin_count--;
3756         if (obj->user_pin_count == 0) {
3757                 obj->pin_filp = NULL;
3758                 i915_gem_object_unpin(obj);
3759         }
3760
3761 out:
3762         drm_gem_object_unreference(&obj->base);
3763 unlock:
3764         mutex_unlock(&dev->struct_mutex);
3765         return ret;
3766 }
3767
3768 int
3769 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3770                     struct drm_file *file)
3771 {
3772         struct drm_i915_gem_busy *args = data;
3773         struct drm_i915_gem_object *obj;
3774         int ret;
3775
3776         ret = i915_mutex_lock_interruptible(dev);
3777         if (ret)
3778                 return ret;
3779
3780         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3781         if (&obj->base == NULL) {
3782                 ret = -ENOENT;
3783                 goto unlock;
3784         }
3785
3786         /* Count all active objects as busy, even if they are currently not used
3787          * by the gpu. Users of this interface expect objects to eventually
3788          * become non-busy without any further actions, therefore emit any
3789          * necessary flushes here.
3790          */
3791         ret = i915_gem_object_flush_active(obj);
3792
3793         args->busy = obj->active;
3794         if (obj->ring) {
3795                 BUILD_BUG_ON(I915_NUM_RINGS > 16);
3796                 args->busy |= intel_ring_flag(obj->ring) << 16;
3797         }
3798
3799         drm_gem_object_unreference(&obj->base);
3800 unlock:
3801         mutex_unlock(&dev->struct_mutex);
3802         return ret;
3803 }
3804
3805 int
3806 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3807                         struct drm_file *file_priv)
3808 {
3809         return i915_gem_ring_throttle(dev, file_priv);
3810 }
3811
3812 int
3813 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3814                        struct drm_file *file_priv)
3815 {
3816         struct drm_i915_gem_madvise *args = data;
3817         struct drm_i915_gem_object *obj;
3818         int ret;
3819
3820         switch (args->madv) {
3821         case I915_MADV_DONTNEED:
3822         case I915_MADV_WILLNEED:
3823             break;
3824         default:
3825             return -EINVAL;
3826         }
3827
3828         ret = i915_mutex_lock_interruptible(dev);
3829         if (ret)
3830                 return ret;
3831
3832         obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
3833         if (&obj->base == NULL) {
3834                 ret = -ENOENT;
3835                 goto unlock;
3836         }
3837
3838         if (obj->pin_count) {
3839                 ret = -EINVAL;
3840                 goto out;
3841         }
3842
3843         if (obj->madv != __I915_MADV_PURGED)
3844                 obj->madv = args->madv;
3845
3846         /* if the object is no longer attached, discard its backing storage */
3847         if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
3848                 i915_gem_object_truncate(obj);
3849
3850         args->retained = obj->madv != __I915_MADV_PURGED;
3851
3852 out:
3853         drm_gem_object_unreference(&obj->base);
3854 unlock:
3855         mutex_unlock(&dev->struct_mutex);
3856         return ret;
3857 }
3858
3859 void i915_gem_object_init(struct drm_i915_gem_object *obj,
3860                           const struct drm_i915_gem_object_ops *ops)
3861 {
3862         INIT_LIST_HEAD(&obj->mm_list);
3863         INIT_LIST_HEAD(&obj->global_list);
3864         INIT_LIST_HEAD(&obj->ring_list);
3865         INIT_LIST_HEAD(&obj->exec_list);
3866         INIT_LIST_HEAD(&obj->vma_list);
3867
3868         obj->ops = ops;
3869
3870         obj->fence_reg = I915_FENCE_REG_NONE;
3871         obj->madv = I915_MADV_WILLNEED;
3872         /* Avoid an unnecessary call to unbind on the first bind. */
3873         obj->map_and_fenceable = true;
3874
3875         i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
3876 }
3877
3878 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
3879         .get_pages = i915_gem_object_get_pages_gtt,
3880         .put_pages = i915_gem_object_put_pages_gtt,
3881 };
3882
3883 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3884                                                   size_t size)
3885 {
3886         struct drm_i915_gem_object *obj;
3887         struct address_space *mapping;
3888         gfp_t mask;
3889
3890         obj = i915_gem_object_alloc(dev);
3891         if (obj == NULL)
3892                 return NULL;
3893
3894         if (drm_gem_object_init(dev, &obj->base, size) != 0) {
3895                 i915_gem_object_free(obj);
3896                 return NULL;
3897         }
3898
3899         mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
3900         if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
3901                 /* 965gm cannot relocate objects above 4GiB. */
3902                 mask &= ~__GFP_HIGHMEM;
3903                 mask |= __GFP_DMA32;
3904         }
3905
3906         mapping = file_inode(obj->base.filp)->i_mapping;
3907         mapping_set_gfp_mask(mapping, mask);
3908
3909         i915_gem_object_init(obj, &i915_gem_object_ops);
3910
3911         obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3912         obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3913
3914         if (HAS_LLC(dev)) {
3915                 /* On some devices, we can have the GPU use the LLC (the CPU
3916                  * cache) for about a 10% performance improvement
3917                  * compared to uncached.  Graphics requests other than
3918                  * display scanout are coherent with the CPU in
3919                  * accessing this cache.  This means in this mode we
3920                  * don't need to clflush on the CPU side, and on the
3921                  * GPU side we only need to flush internal caches to
3922                  * get data visible to the CPU.
3923                  *
3924                  * However, we maintain the display planes as UC, and so
3925                  * need to rebind when first used as such.
3926                  */
3927                 obj->cache_level = I915_CACHE_LLC;
3928         } else
3929                 obj->cache_level = I915_CACHE_NONE;
3930
3931         trace_i915_gem_object_create(obj);
3932
3933         return obj;
3934 }
3935
3936 int i915_gem_init_object(struct drm_gem_object *obj)
3937 {
3938         BUG();
3939
3940         return 0;
3941 }
3942
3943 void i915_gem_free_object(struct drm_gem_object *gem_obj)
3944 {
3945         struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
3946         struct drm_device *dev = obj->base.dev;
3947         drm_i915_private_t *dev_priv = dev->dev_private;
3948
3949         trace_i915_gem_object_destroy(obj);
3950
3951         if (obj->phys_obj)
3952                 i915_gem_detach_phys_object(dev, obj);
3953
3954         obj->pin_count = 0;
3955         if (WARN_ON(i915_gem_object_unbind(obj) == -ERESTARTSYS)) {
3956                 bool was_interruptible;
3957
3958                 was_interruptible = dev_priv->mm.interruptible;
3959                 dev_priv->mm.interruptible = false;
3960
3961                 WARN_ON(i915_gem_object_unbind(obj));
3962
3963                 dev_priv->mm.interruptible = was_interruptible;
3964         }
3965
3966         /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
3967          * before progressing. */
3968         if (obj->stolen)
3969                 i915_gem_object_unpin_pages(obj);
3970
3971         if (WARN_ON(obj->pages_pin_count))
3972                 obj->pages_pin_count = 0;
3973         i915_gem_object_put_pages(obj);
3974         i915_gem_object_free_mmap_offset(obj);
3975         i915_gem_object_release_stolen(obj);
3976
3977         BUG_ON(obj->pages);
3978
3979         if (obj->base.import_attach)
3980                 drm_prime_gem_destroy(&obj->base, NULL);
3981
3982         drm_gem_object_release(&obj->base);
3983         i915_gem_info_remove_obj(dev_priv, obj->base.size);
3984
3985         kfree(obj->bit_17);
3986         i915_gem_object_free(obj);
3987 }
3988
3989 struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
3990                                      struct i915_address_space *vm)
3991 {
3992         struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
3993         if (vma == NULL)
3994                 return ERR_PTR(-ENOMEM);
3995
3996         INIT_LIST_HEAD(&vma->vma_link);
3997         vma->vm = vm;
3998         vma->obj = obj;
3999
4000         return vma;
4001 }
4002
4003 void i915_gem_vma_destroy(struct i915_vma *vma)
4004 {
4005         WARN_ON(vma->node.allocated);
4006         kfree(vma);
4007 }
4008
4009 int
4010 i915_gem_idle(struct drm_device *dev)
4011 {
4012         drm_i915_private_t *dev_priv = dev->dev_private;
4013         int ret;
4014
4015         if (dev_priv->ums.mm_suspended) {
4016                 mutex_unlock(&dev->struct_mutex);
4017                 return 0;
4018         }
4019
4020         ret = i915_gpu_idle(dev);
4021         if (ret) {
4022                 mutex_unlock(&dev->struct_mutex);
4023                 return ret;
4024         }
4025         i915_gem_retire_requests(dev);
4026
4027         /* Under UMS, be paranoid and evict. */
4028         if (!drm_core_check_feature(dev, DRIVER_MODESET))
4029                 i915_gem_evict_everything(dev);
4030
4031         del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
4032
4033         i915_kernel_lost_context(dev);
4034         i915_gem_cleanup_ringbuffer(dev);
4035
4036         /* Cancel the retire work handler, which should be idle now. */
4037         cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4038
4039         return 0;
4040 }
4041
4042 void i915_gem_l3_remap(struct drm_device *dev)
4043 {
4044         drm_i915_private_t *dev_priv = dev->dev_private;
4045         u32 misccpctl;
4046         int i;
4047
4048         if (!HAS_L3_GPU_CACHE(dev))
4049                 return;
4050
4051         if (!dev_priv->l3_parity.remap_info)
4052                 return;
4053
4054         misccpctl = I915_READ(GEN7_MISCCPCTL);
4055         I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
4056         POSTING_READ(GEN7_MISCCPCTL);
4057
4058         for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
4059                 u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
4060                 if (remap && remap != dev_priv->l3_parity.remap_info[i/4])
4061                         DRM_DEBUG("0x%x was already programmed to %x\n",
4062                                   GEN7_L3LOG_BASE + i, remap);
4063                 if (remap && !dev_priv->l3_parity.remap_info[i/4])
4064                         DRM_DEBUG_DRIVER("Clearing remapped register\n");
4065                 I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]);
4066         }
4067
4068         /* Make sure all the writes land before disabling dop clock gating */
4069         POSTING_READ(GEN7_L3LOG_BASE);
4070
4071         I915_WRITE(GEN7_MISCCPCTL, misccpctl);
4072 }
4073
4074 void i915_gem_init_swizzling(struct drm_device *dev)
4075 {
4076         drm_i915_private_t *dev_priv = dev->dev_private;
4077
4078         if (INTEL_INFO(dev)->gen < 5 ||
4079             dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4080                 return;
4081
4082         I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4083                                  DISP_TILE_SURFACE_SWIZZLING);
4084
4085         if (IS_GEN5(dev))
4086                 return;
4087
4088         I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4089         if (IS_GEN6(dev))
4090                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4091         else if (IS_GEN7(dev))
4092                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
4093         else
4094                 BUG();
4095 }
4096
4097 static bool
4098 intel_enable_blt(struct drm_device *dev)
4099 {
4100         if (!HAS_BLT(dev))
4101                 return false;
4102
4103         /* The blitter was dysfunctional on early prototypes */
4104         if (IS_GEN6(dev) && dev->pdev->revision < 8) {
4105                 DRM_INFO("BLT not supported on this pre-production hardware;"
4106                          " graphics performance will be degraded.\n");
4107                 return false;
4108         }
4109
4110         return true;
4111 }
4112
4113 static int i915_gem_init_rings(struct drm_device *dev)
4114 {
4115         struct drm_i915_private *dev_priv = dev->dev_private;
4116         int ret;
4117
4118         ret = intel_init_render_ring_buffer(dev);
4119         if (ret)
4120                 return ret;
4121
4122         if (HAS_BSD(dev)) {
4123                 ret = intel_init_bsd_ring_buffer(dev);
4124                 if (ret)
4125                         goto cleanup_render_ring;
4126         }
4127
4128         if (intel_enable_blt(dev)) {
4129                 ret = intel_init_blt_ring_buffer(dev);
4130                 if (ret)
4131                         goto cleanup_bsd_ring;
4132         }
4133
4134         if (HAS_VEBOX(dev)) {
4135                 ret = intel_init_vebox_ring_buffer(dev);
4136                 if (ret)
4137                         goto cleanup_blt_ring;
4138         }
4139
4140
4141         ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
4142         if (ret)
4143                 goto cleanup_vebox_ring;
4144
4145         return 0;
4146
4147 cleanup_vebox_ring:
4148         intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
4149 cleanup_blt_ring:
4150         intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
4151 cleanup_bsd_ring:
4152         intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
4153 cleanup_render_ring:
4154         intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
4155
4156         return ret;
4157 }
4158
4159 int
4160 i915_gem_init_hw(struct drm_device *dev)
4161 {
4162         drm_i915_private_t *dev_priv = dev->dev_private;
4163         int ret;
4164
4165         if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
4166                 return -EIO;
4167
4168         if (dev_priv->ellc_size)
4169                 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4170
4171         if (HAS_PCH_NOP(dev)) {
4172                 u32 temp = I915_READ(GEN7_MSG_CTL);
4173                 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4174                 I915_WRITE(GEN7_MSG_CTL, temp);
4175         }
4176
4177         i915_gem_l3_remap(dev);
4178
4179         i915_gem_init_swizzling(dev);
4180
4181         ret = i915_gem_init_rings(dev);
4182         if (ret)
4183                 return ret;
4184
4185         /*
4186          * XXX: There was some w/a described somewhere suggesting loading
4187          * contexts before PPGTT.
4188          */
4189         i915_gem_context_init(dev);
4190         if (dev_priv->mm.aliasing_ppgtt) {
4191                 ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
4192                 if (ret) {
4193                         i915_gem_cleanup_aliasing_ppgtt(dev);
4194                         DRM_INFO("PPGTT enable failed. This is not fatal, but unexpected\n");
4195                 }
4196         }
4197
4198         return 0;
4199 }
4200
4201 int i915_gem_init(struct drm_device *dev)
4202 {
4203         struct drm_i915_private *dev_priv = dev->dev_private;
4204         int ret;
4205
4206         mutex_lock(&dev->struct_mutex);
4207
4208         if (IS_VALLEYVIEW(dev)) {
4209                 /* VLVA0 (potential hack), BIOS isn't actually waking us */
4210                 I915_WRITE(VLV_GTLC_WAKE_CTRL, 1);
4211                 if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) & 1) == 1, 10))
4212                         DRM_DEBUG_DRIVER("allow wake ack timed out\n");
4213         }
4214
4215         i915_gem_init_global_gtt(dev);
4216
4217         ret = i915_gem_init_hw(dev);
4218         mutex_unlock(&dev->struct_mutex);
4219         if (ret) {
4220                 i915_gem_cleanup_aliasing_ppgtt(dev);
4221                 return ret;
4222         }
4223
4224         /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
4225         if (!drm_core_check_feature(dev, DRIVER_MODESET))
4226                 dev_priv->dri1.allow_batchbuffer = 1;
4227         return 0;
4228 }
4229
4230 void
4231 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4232 {
4233         drm_i915_private_t *dev_priv = dev->dev_private;
4234         struct intel_ring_buffer *ring;
4235         int i;
4236
4237         for_each_ring(ring, dev_priv, i)
4238                 intel_cleanup_ring_buffer(ring);
4239 }
4240
4241 int
4242 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4243                        struct drm_file *file_priv)
4244 {
4245         struct drm_i915_private *dev_priv = dev->dev_private;
4246         int ret;
4247
4248         if (drm_core_check_feature(dev, DRIVER_MODESET))
4249                 return 0;
4250
4251         if (i915_reset_in_progress(&dev_priv->gpu_error)) {
4252                 DRM_ERROR("Reenabling wedged hardware, good luck\n");
4253                 atomic_set(&dev_priv->gpu_error.reset_counter, 0);
4254         }
4255
4256         mutex_lock(&dev->struct_mutex);
4257         dev_priv->ums.mm_suspended = 0;
4258
4259         ret = i915_gem_init_hw(dev);
4260         if (ret != 0) {
4261                 mutex_unlock(&dev->struct_mutex);
4262                 return ret;
4263         }
4264
4265         BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
4266         mutex_unlock(&dev->struct_mutex);
4267
4268         ret = drm_irq_install(dev);
4269         if (ret)
4270                 goto cleanup_ringbuffer;
4271
4272         return 0;
4273
4274 cleanup_ringbuffer:
4275         mutex_lock(&dev->struct_mutex);
4276         i915_gem_cleanup_ringbuffer(dev);
4277         dev_priv->ums.mm_suspended = 1;
4278         mutex_unlock(&dev->struct_mutex);
4279
4280         return ret;
4281 }
4282
4283 int
4284 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4285                        struct drm_file *file_priv)
4286 {
4287         struct drm_i915_private *dev_priv = dev->dev_private;
4288         int ret;
4289
4290         if (drm_core_check_feature(dev, DRIVER_MODESET))
4291                 return 0;
4292
4293         drm_irq_uninstall(dev);
4294
4295         mutex_lock(&dev->struct_mutex);
4296         ret =  i915_gem_idle(dev);
4297
4298         /* Hack!  Don't let anybody do execbuf while we don't control the chip.
4299          * We need to replace this with a semaphore, or something.
4300          * And not confound ums.mm_suspended!
4301          */
4302         if (ret != 0)
4303                 dev_priv->ums.mm_suspended = 1;
4304         mutex_unlock(&dev->struct_mutex);
4305
4306         return ret;
4307 }
4308
4309 void
4310 i915_gem_lastclose(struct drm_device *dev)
4311 {
4312         int ret;
4313
4314         if (drm_core_check_feature(dev, DRIVER_MODESET))
4315                 return;
4316
4317         mutex_lock(&dev->struct_mutex);
4318         ret = i915_gem_idle(dev);
4319         if (ret)
4320                 DRM_ERROR("failed to idle hardware: %d\n", ret);
4321         mutex_unlock(&dev->struct_mutex);
4322 }
4323
4324 static void
4325 init_ring_lists(struct intel_ring_buffer *ring)
4326 {
4327         INIT_LIST_HEAD(&ring->active_list);
4328         INIT_LIST_HEAD(&ring->request_list);
4329 }
4330
4331 void
4332 i915_gem_load(struct drm_device *dev)
4333 {
4334         drm_i915_private_t *dev_priv = dev->dev_private;
4335         int i;
4336
4337         dev_priv->slab =
4338                 kmem_cache_create("i915_gem_object",
4339                                   sizeof(struct drm_i915_gem_object), 0,
4340                                   SLAB_HWCACHE_ALIGN,
4341                                   NULL);
4342
4343         INIT_LIST_HEAD(&dev_priv->gtt.base.active_list);
4344         INIT_LIST_HEAD(&dev_priv->gtt.base.inactive_list);
4345         INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4346         INIT_LIST_HEAD(&dev_priv->mm.bound_list);
4347         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4348         for (i = 0; i < I915_NUM_RINGS; i++)
4349                 init_ring_lists(&dev_priv->ring[i]);
4350         for (i = 0; i < I915_MAX_NUM_FENCES; i++)
4351                 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
4352         INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4353                           i915_gem_retire_work_handler);
4354         init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
4355
4356         /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4357         if (IS_GEN3(dev)) {
4358                 I915_WRITE(MI_ARB_STATE,
4359                            _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
4360         }
4361
4362         dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4363
4364         /* Old X drivers will take 0-2 for front, back, depth buffers */
4365         if (!drm_core_check_feature(dev, DRIVER_MODESET))
4366                 dev_priv->fence_reg_start = 3;
4367
4368         if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
4369                 dev_priv->num_fence_regs = 32;
4370         else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4371                 dev_priv->num_fence_regs = 16;
4372         else
4373                 dev_priv->num_fence_regs = 8;
4374
4375         /* Initialize fence registers to zero */
4376         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4377         i915_gem_restore_fences(dev);
4378
4379         i915_gem_detect_bit_6_swizzle(dev);
4380         init_waitqueue_head(&dev_priv->pending_flip_queue);
4381
4382         dev_priv->mm.interruptible = true;
4383
4384         dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
4385         dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
4386         register_shrinker(&dev_priv->mm.inactive_shrinker);
4387 }
4388
4389 /*
4390  * Create a physically contiguous memory object for this object
4391  * e.g. for cursor + overlay regs
4392  */
4393 static int i915_gem_init_phys_object(struct drm_device *dev,
4394                                      int id, int size, int align)
4395 {
4396         drm_i915_private_t *dev_priv = dev->dev_private;
4397         struct drm_i915_gem_phys_object *phys_obj;
4398         int ret;
4399
4400         if (dev_priv->mm.phys_objs[id - 1] || !size)
4401                 return 0;
4402
4403         phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
4404         if (!phys_obj)
4405                 return -ENOMEM;
4406
4407         phys_obj->id = id;
4408
4409         phys_obj->handle = drm_pci_alloc(dev, size, align);
4410         if (!phys_obj->handle) {
4411                 ret = -ENOMEM;
4412                 goto kfree_obj;
4413         }
4414 #ifdef CONFIG_X86
4415         set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4416 #endif
4417
4418         dev_priv->mm.phys_objs[id - 1] = phys_obj;
4419
4420         return 0;
4421 kfree_obj:
4422         kfree(phys_obj);
4423         return ret;
4424 }
4425
4426 static void i915_gem_free_phys_object(struct drm_device *dev, int id)
4427 {
4428         drm_i915_private_t *dev_priv = dev->dev_private;
4429         struct drm_i915_gem_phys_object *phys_obj;
4430
4431         if (!dev_priv->mm.phys_objs[id - 1])
4432                 return;
4433
4434         phys_obj = dev_priv->mm.phys_objs[id - 1];
4435         if (phys_obj->cur_obj) {
4436                 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4437         }
4438
4439 #ifdef CONFIG_X86
4440         set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4441 #endif
4442         drm_pci_free(dev, phys_obj->handle);
4443         kfree(phys_obj);
4444         dev_priv->mm.phys_objs[id - 1] = NULL;
4445 }
4446
4447 void i915_gem_free_all_phys_object(struct drm_device *dev)
4448 {
4449         int i;
4450
4451         for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
4452                 i915_gem_free_phys_object(dev, i);
4453 }
4454
4455 void i915_gem_detach_phys_object(struct drm_device *dev,
4456                                  struct drm_i915_gem_object *obj)
4457 {
4458         struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
4459         char *vaddr;
4460         int i;
4461         int page_count;
4462
4463         if (!obj->phys_obj)
4464                 return;
4465         vaddr = obj->phys_obj->handle->vaddr;
4466
4467         page_count = obj->base.size / PAGE_SIZE;
4468         for (i = 0; i < page_count; i++) {
4469                 struct page *page = shmem_read_mapping_page(mapping, i);
4470                 if (!IS_ERR(page)) {
4471                         char *dst = kmap_atomic(page);
4472                         memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
4473                         kunmap_atomic(dst);
4474
4475                         drm_clflush_pages(&page, 1);
4476
4477                         set_page_dirty(page);
4478                         mark_page_accessed(page);
4479                         page_cache_release(page);
4480                 }
4481         }
4482         i915_gem_chipset_flush(dev);
4483
4484         obj->phys_obj->cur_obj = NULL;
4485         obj->phys_obj = NULL;
4486 }
4487
4488 int
4489 i915_gem_attach_phys_object(struct drm_device *dev,
4490                             struct drm_i915_gem_object *obj,
4491                             int id,
4492                             int align)
4493 {
4494         struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
4495         drm_i915_private_t *dev_priv = dev->dev_private;
4496         int ret = 0;
4497         int page_count;
4498         int i;
4499
4500         if (id > I915_MAX_PHYS_OBJECT)
4501                 return -EINVAL;
4502
4503         if (obj->phys_obj) {
4504                 if (obj->phys_obj->id == id)
4505                         return 0;
4506                 i915_gem_detach_phys_object(dev, obj);
4507         }
4508
4509         /* create a new object */
4510         if (!dev_priv->mm.phys_objs[id - 1]) {
4511                 ret = i915_gem_init_phys_object(dev, id,
4512                                                 obj->base.size, align);
4513                 if (ret) {
4514                         DRM_ERROR("failed to init phys object %d size: %zu\n",
4515                                   id, obj->base.size);
4516                         return ret;
4517                 }
4518         }
4519
4520         /* bind to the object */
4521         obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4522         obj->phys_obj->cur_obj = obj;
4523
4524         page_count = obj->base.size / PAGE_SIZE;
4525
4526         for (i = 0; i < page_count; i++) {
4527                 struct page *page;
4528                 char *dst, *src;
4529
4530                 page = shmem_read_mapping_page(mapping, i);
4531                 if (IS_ERR(page))
4532                         return PTR_ERR(page);
4533
4534                 src = kmap_atomic(page);
4535                 dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4536                 memcpy(dst, src, PAGE_SIZE);
4537                 kunmap_atomic(src);
4538
4539                 mark_page_accessed(page);
4540                 page_cache_release(page);
4541         }
4542
4543         return 0;
4544 }
4545
4546 static int
4547 i915_gem_phys_pwrite(struct drm_device *dev,
4548                      struct drm_i915_gem_object *obj,
4549                      struct drm_i915_gem_pwrite *args,
4550                      struct drm_file *file_priv)
4551 {
4552         void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
4553         char __user *user_data = to_user_ptr(args->data_ptr);
4554
4555         if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4556                 unsigned long unwritten;
4557
4558                 /* The physical object once assigned is fixed for the lifetime
4559                  * of the obj, so we can safely drop the lock and continue
4560                  * to access vaddr.
4561                  */
4562                 mutex_unlock(&dev->struct_mutex);
4563                 unwritten = copy_from_user(vaddr, user_data, args->size);
4564                 mutex_lock(&dev->struct_mutex);
4565                 if (unwritten)
4566                         return -EFAULT;
4567         }
4568
4569         i915_gem_chipset_flush(dev);
4570         return 0;
4571 }
4572
4573 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4574 {
4575         struct drm_i915_file_private *file_priv = file->driver_priv;
4576
4577         /* Clean up our request list when the client is going away, so that
4578          * later retire_requests won't dereference our soon-to-be-gone
4579          * file_priv.
4580          */
4581         spin_lock(&file_priv->mm.lock);
4582         while (!list_empty(&file_priv->mm.request_list)) {
4583                 struct drm_i915_gem_request *request;
4584
4585                 request = list_first_entry(&file_priv->mm.request_list,
4586                                            struct drm_i915_gem_request,
4587                                            client_list);
4588                 list_del(&request->client_list);
4589                 request->file_priv = NULL;
4590         }
4591         spin_unlock(&file_priv->mm.lock);
4592 }
4593
4594 static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
4595 {
4596         if (!mutex_is_locked(mutex))
4597                 return false;
4598
4599 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
4600         return mutex->owner == task;
4601 #else
4602         /* Since UP may be pre-empted, we cannot assume that we own the lock */
4603         return false;
4604 #endif
4605 }
4606
4607 static int
4608 i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
4609 {
4610         struct drm_i915_private *dev_priv =
4611                 container_of(shrinker,
4612                              struct drm_i915_private,
4613                              mm.inactive_shrinker);
4614         struct drm_device *dev = dev_priv->dev;
4615         struct i915_address_space *vm = &dev_priv->gtt.base;
4616         struct drm_i915_gem_object *obj;
4617         int nr_to_scan = sc->nr_to_scan;
4618         bool unlock = true;
4619         int cnt;
4620
4621         if (!mutex_trylock(&dev->struct_mutex)) {
4622                 if (!mutex_is_locked_by(&dev->struct_mutex, current))
4623                         return 0;
4624
4625                 if (dev_priv->mm.shrinker_no_lock_stealing)
4626                         return 0;
4627
4628                 unlock = false;
4629         }
4630
4631         if (nr_to_scan) {
4632                 nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan);
4633                 if (nr_to_scan > 0)
4634                         nr_to_scan -= __i915_gem_shrink(dev_priv, nr_to_scan,
4635                                                         false);
4636                 if (nr_to_scan > 0)
4637                         i915_gem_shrink_all(dev_priv);
4638         }
4639
4640         cnt = 0;
4641         list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
4642                 if (obj->pages_pin_count == 0)
4643                         cnt += obj->base.size >> PAGE_SHIFT;
4644         list_for_each_entry(obj, &vm->inactive_list, mm_list)
4645                 if (obj->pin_count == 0 && obj->pages_pin_count == 0)
4646                         cnt += obj->base.size >> PAGE_SHIFT;
4647
4648         if (unlock)
4649                 mutex_unlock(&dev->struct_mutex);
4650         return cnt;
4651 }